From eef3223f6cee2d1642c42402ab238dd486665671 Mon Sep 17 00:00:00 2001 From: Peter Leschev Date: Thu, 27 Mar 2014 17:11:34 +1100 Subject: [PATCH 001/956] Adding the ability to skip nat port forwarding for ssh connectivity --- builder/virtualbox/common/ssh_config.go | 1 + builder/virtualbox/common/step_export.go | 29 ++++---- builder/virtualbox/common/step_forward_ssh.go | 70 ++++++++++--------- builder/virtualbox/iso/builder.go | 14 ++-- builder/virtualbox/ovf/builder.go | 14 ++-- .../builders/virtualbox-iso.html.markdown | 4 ++ .../builders/virtualbox-ovf.html.markdown | 4 ++ 7 files changed, 80 insertions(+), 56 deletions(-) diff --git a/builder/virtualbox/common/ssh_config.go b/builder/virtualbox/common/ssh_config.go index 00c6167c6..5da8ad403 100644 --- a/builder/virtualbox/common/ssh_config.go +++ b/builder/virtualbox/common/ssh_config.go @@ -16,6 +16,7 @@ type SSHConfig struct { SSHPort uint `mapstructure:"ssh_port"` SSHUser string `mapstructure:"ssh_username"` RawSSHWaitTimeout string `mapstructure:"ssh_wait_timeout"` + SSHSkipNatMapping bool `mapstructure:"ssh_skip_nat_mapping"` SSHWaitTimeout time.Duration } diff --git a/builder/virtualbox/common/step_export.go b/builder/virtualbox/common/step_export.go index e4e860155..0a3cd816c 100644 --- a/builder/virtualbox/common/step_export.go +++ b/builder/virtualbox/common/step_export.go @@ -17,9 +17,10 @@ import ( // Produces: // exportPath string - The path to the resulting export. type StepExport struct { - Format string - OutputDir string - ExportOpts []string + Format string + OutputDir string + ExportOpts []string + SkipNatMapping bool } func (s *StepExport) Run(state multistep.StateBag) multistep.StepAction { @@ -33,15 +34,19 @@ func (s *StepExport) Run(state multistep.StateBag) multistep.StepAction { // Clear out the Packer-created forwarding rule ui.Say("Preparing to export machine...") - ui.Message(fmt.Sprintf( - "Deleting forwarded port mapping for SSH (host port %d)", - state.Get("sshHostPort"))) - command := []string{"modifyvm", vmName, "--natpf1", "delete", "packerssh"} - if err := driver.VBoxManage(command...); err != nil { - err := fmt.Errorf("Error deleting port forwarding rule: %s", err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt + var command []string + + if s.SkipNatMapping == false { + ui.Message(fmt.Sprintf( + "Deleting forwarded port mapping for SSH (host port %d)", + state.Get("sshHostPort"))) + command := []string{"modifyvm", vmName, "--natpf1", "delete", "packerssh"} + if err := driver.VBoxManage(command...); err != nil { + err := fmt.Errorf("Error deleting port forwarding rule: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } } // Export the VM to an OVF diff --git a/builder/virtualbox/common/step_forward_ssh.go b/builder/virtualbox/common/step_forward_ssh.go index 862432952..d6d604e00 100644 --- a/builder/virtualbox/common/step_forward_ssh.go +++ b/builder/virtualbox/common/step_forward_ssh.go @@ -19,9 +19,10 @@ import ( // // Produces: type StepForwardSSH struct { - GuestPort uint - HostPortMin uint - HostPortMax uint + GuestPort uint + HostPortMin uint + HostPortMax uint + SkipNatMapping bool } func (s *StepForwardSSH) Run(state multistep.StateBag) multistep.StepAction { @@ -29,39 +30,44 @@ func (s *StepForwardSSH) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) vmName := state.Get("vmName").(string) - log.Printf("Looking for available SSH port between %d and %d", - s.HostPortMin, s.HostPortMax) var sshHostPort uint - var offset uint = 0 + if s.SkipNatMapping { + sshHostPort = s.GuestPort + log.Printf("Skipping SSH NAT mapping and using SSH port %d", sshHostPort) + } else { + log.Printf("Looking for available SSH port between %d and %d", + s.HostPortMin, s.HostPortMax) + var offset uint = 0 - portRange := int(s.HostPortMax - s.HostPortMin) - if portRange > 0 { - // Have to check if > 0 to avoid a panic - offset = uint(rand.Intn(portRange)) - } - - for { - sshHostPort = offset + s.HostPortMin - log.Printf("Trying port: %d", sshHostPort) - l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", sshHostPort)) - if err == nil { - defer l.Close() - break + portRange := int(s.HostPortMax - s.HostPortMin) + if portRange > 0 { + // Have to check if > 0 to avoid a panic + offset = uint(rand.Intn(portRange)) } - } - // Create a forwarded port mapping to the VM - ui.Say(fmt.Sprintf("Creating forwarded port mapping for SSH (host port %d)", sshHostPort)) - command := []string{ - "modifyvm", vmName, - "--natpf1", - fmt.Sprintf("packerssh,tcp,127.0.0.1,%d,,%d", sshHostPort, s.GuestPort), - } - if err := driver.VBoxManage(command...); err != nil { - err := fmt.Errorf("Error creating port forwarding rule: %s", err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt + for { + sshHostPort = offset + s.HostPortMin + log.Printf("Trying port: %d", sshHostPort) + l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", sshHostPort)) + if err == nil { + defer l.Close() + break + } + } + + // Create a forwarded port mapping to the VM + ui.Say(fmt.Sprintf("Creating forwarded port mapping for SSH (host port %d)", sshHostPort)) + command := []string{ + "modifyvm", vmName, + "--natpf1", + fmt.Sprintf("packerssh,tcp,127.0.0.1,%d,,%d", sshHostPort, s.GuestPort), + } + if err := driver.VBoxManage(command...); err != nil { + err := fmt.Errorf("Error creating port forwarding rule: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } } // Save the port we're using so that future steps can use it diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go index 31660a961..7ec34bfe8 100644 --- a/builder/virtualbox/iso/builder.go +++ b/builder/virtualbox/iso/builder.go @@ -290,9 +290,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe new(stepAttachGuestAdditions), new(vboxcommon.StepAttachFloppy), &vboxcommon.StepForwardSSH{ - GuestPort: b.config.SSHPort, - HostPortMin: b.config.SSHHostPortMin, - HostPortMax: b.config.SSHHostPortMax, + GuestPort: b.config.SSHPort, + HostPortMin: b.config.SSHHostPortMin, + HostPortMax: b.config.SSHHostPortMax, + SkipNatMapping: b.config.SSHSkipNatMapping, }, &vboxcommon.StepVBoxManage{ Commands: b.config.VBoxManage, @@ -319,9 +320,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, new(vboxcommon.StepRemoveDevices), &vboxcommon.StepExport{ - Format: b.config.Format, - OutputDir: b.config.OutputDir, - ExportOpts: b.config.ExportOpts.ExportOpts, + Format: b.config.Format, + OutputDir: b.config.OutputDir, + ExportOpts: b.config.ExportOpts.ExportOpts, + SkipNatMapping: b.config.SSHSkipNatMapping, }, } diff --git a/builder/virtualbox/ovf/builder.go b/builder/virtualbox/ovf/builder.go index f2e6a92f5..65c4fe72a 100644 --- a/builder/virtualbox/ovf/builder.go +++ b/builder/virtualbox/ovf/builder.go @@ -65,9 +65,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe */ new(vboxcommon.StepAttachFloppy), &vboxcommon.StepForwardSSH{ - GuestPort: b.config.SSHPort, - HostPortMin: b.config.SSHHostPortMin, - HostPortMax: b.config.SSHHostPortMax, + GuestPort: b.config.SSHPort, + HostPortMin: b.config.SSHHostPortMin, + HostPortMax: b.config.SSHHostPortMax, + SkipNatMapping: b.config.SSHSkipNatMapping, }, &vboxcommon.StepVBoxManage{ Commands: b.config.VBoxManage, @@ -95,9 +96,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, new(vboxcommon.StepRemoveDevices), &vboxcommon.StepExport{ - Format: b.config.Format, - OutputDir: b.config.OutputDir, - ExportOpts: b.config.ExportOpts.ExportOpts, + Format: b.config.Format, + OutputDir: b.config.OutputDir, + ExportOpts: b.config.ExportOpts.ExportOpts, + SkipNatMapping: b.config.SSHSkipNatMapping, }, } diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index e6e3b2454..3c2e87e7f 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -191,6 +191,10 @@ Optional: available. By default this is "20m", or 20 minutes. Note that this should be quite long since the timer begins as soon as the virtual machine is booted. +* `ssh_skip_nat_mapping` (bool) - Defaults to false. When enabled, Packer does + not setup forwarded port mapping for SSH requests and uses `ssh_port` on the + host to communicate to the virtual machine + * `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to execute in order to further customize the virtual machine being created. The value of this is an array of commands to execute. The commands are executed diff --git a/website/source/docs/builders/virtualbox-ovf.html.markdown b/website/source/docs/builders/virtualbox-ovf.html.markdown index 6bf6e49d9..43ce70339 100644 --- a/website/source/docs/builders/virtualbox-ovf.html.markdown +++ b/website/source/docs/builders/virtualbox-ovf.html.markdown @@ -126,6 +126,10 @@ Optional: available. By default this is "20m", or 20 minutes. Note that this should be quite long since the timer begins as soon as the virtual machine is booted. +* `ssh_skip_nat_mapping` (bool) - Defaults to false. When enabled, Packer does + not setup forwarded port mapping for SSH requests and uses `ssh_port` on the + host to communicate to the virtual machine + * `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to execute in order to further customize the virtual machine being created. The value of this is an array of commands to execute. The commands are executed From 88ec874c323fd4b4120c30dcc166f26a9ba4e4dd Mon Sep 17 00:00:00 2001 From: Nathan Hartwell Date: Fri, 9 May 2014 09:08:41 -0500 Subject: [PATCH 002/956] Adding disable_sudo support to salt-masterless provisioner This is already present in some other provisioners and is helpful when using a builder that gives you root access. --- provisioner/salt-masterless/provisioner.go | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/provisioner/salt-masterless/provisioner.go b/provisioner/salt-masterless/provisioner.go index d13ca1f84..1f65b7ac5 100644 --- a/provisioner/salt-masterless/provisioner.go +++ b/provisioner/salt-masterless/provisioner.go @@ -19,6 +19,8 @@ type Config struct { SkipBootstrap bool `mapstructure:"skip_bootstrap"` BootstrapArgs string `mapstructure:"bootstrap_args"` + DisableSudo bool `mapstructure:"disable_sudo"` + // Local path to the minion config MinionConfig string `mapstructure:"minion_config"` @@ -108,7 +110,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { ui.Say("Provisioning with Salt...") if !p.config.SkipBootstrap { cmd := &packer.RemoteCmd{ - Command: fmt.Sprintf("wget -O - http://bootstrap.saltstack.org | sudo sh -s %s", p.config.BootstrapArgs), + Command: fmt.Sprintf("wget -O - http://bootstrap.saltstack.org | %s %s", p.sudo("sh -s"), p.config.BootstrapArgs), } ui.Message(fmt.Sprintf("Installing Salt with command %s", cmd)) if err = cmd.StartWithUi(comm, ui); err != nil { @@ -133,7 +135,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } ui.Message(fmt.Sprintf("Moving %s/minion to /etc/salt/minion", p.config.TempConfigDir)) - cmd = &packer.RemoteCmd{Command: fmt.Sprintf("sudo mv %s/minion /etc/salt/minion", p.config.TempConfigDir)} + cmd = &packer.RemoteCmd{Command: p.sudo(fmt.Sprintf("mv %s/minion /etc/salt/minion", p.config.TempConfigDir))} if err = cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 { if err == nil { err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) @@ -150,7 +152,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } ui.Message(fmt.Sprintf("Moving %s/states to /srv/salt", p.config.TempConfigDir)) - cmd = &packer.RemoteCmd{Command: fmt.Sprintf("sudo mv %s/states /srv/salt", p.config.TempConfigDir)} + cmd = &packer.RemoteCmd{Command: p.sudo(fmt.Sprintf("mv %s/states /srv/salt", p.config.TempConfigDir))} if err = cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 { if err == nil { err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) @@ -167,7 +169,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } ui.Message(fmt.Sprintf("Moving %s/pillar to /srv/pillar", p.config.TempConfigDir)) - cmd = &packer.RemoteCmd{Command: fmt.Sprintf("sudo mv %s/pillar /srv/pillar", p.config.TempConfigDir)} + cmd = &packer.RemoteCmd{Command: p.sudo(fmt.Sprintf("mv %s/pillar /srv/pillar", p.config.TempConfigDir))} if err = cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 { if err == nil { err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) @@ -178,7 +180,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } ui.Message("Running highstate") - cmd = &packer.RemoteCmd{Command: "sudo salt-call --local state.highstate -l info"} + cmd = &packer.RemoteCmd{Command: p.sudo("salt-call --local state.highstate -l info")} if err = cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 { if err == nil { err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) @@ -196,6 +198,15 @@ func (p *Provisioner) Cancel() { os.Exit(0) } +// Prepends sudo to supplied command if config says to +func (p *Provisioner) sudo(cmd string) string { + if p.config.DisableSudo { + return cmd + } + + return "sudo " + cmd +} + func uploadMinionConfig(comm packer.Communicator, dst string, src string) error { f, err := os.Open(src) if err != nil { From fd3b4ecc25865f6464b583595c32c27edc08a93a Mon Sep 17 00:00:00 2001 From: Andrew Beresford Date: Fri, 7 Nov 2014 10:55:31 +0000 Subject: [PATCH 003/956] Add 1/10th second delay between key events to VNC --- builder/vmware/common/step_type_boot_command.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/builder/vmware/common/step_type_boot_command.go b/builder/vmware/common/step_type_boot_command.go index 82e8b3e17..f67940280 100644 --- a/builder/vmware/common/step_type_boot_command.go +++ b/builder/vmware/common/step_type_boot_command.go @@ -200,7 +200,9 @@ func vncSendString(c *vnc.ClientConn, original string) { } c.KeyEvent(keyCode, true) + time.Sleep(time.Second/10) c.KeyEvent(keyCode, false) + time.Sleep(time.Second/10) if keyShift { c.KeyEvent(KeyLeftShift, false) From 42d05368ae45b98548fb327d760ce9447f316c53 Mon Sep 17 00:00:00 2001 From: Colin Hebert Date: Sat, 10 Jan 2015 23:52:45 +1100 Subject: [PATCH 004/956] Save the generated SSH key as a file in debug mode --- builder/digitalocean/builder.go | 5 +++- builder/digitalocean/step_create_ssh_key.go | 31 ++++++++++++++++++++- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/builder/digitalocean/builder.go b/builder/digitalocean/builder.go index cf76f6970..055861984 100644 --- a/builder/digitalocean/builder.go +++ b/builder/digitalocean/builder.go @@ -244,7 +244,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe // Build the steps steps := []multistep.Step{ - new(stepCreateSSHKey), + &stepCreateSSHKey{ + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("do_%s.pem", b.config.PackerBuildName), + }, new(stepCreateDroplet), new(stepDropletInfo), &common.StepConnectSSH{ diff --git a/builder/digitalocean/step_create_ssh_key.go b/builder/digitalocean/step_create_ssh_key.go index 78bb474c1..ade19d7b3 100644 --- a/builder/digitalocean/step_create_ssh_key.go +++ b/builder/digitalocean/step_create_ssh_key.go @@ -7,6 +7,8 @@ import ( "encoding/pem" "fmt" "log" + "runtime" + "os" "code.google.com/p/gosshold/ssh" "github.com/mitchellh/multistep" @@ -15,7 +17,9 @@ import ( ) type stepCreateSSHKey struct { - keyId uint + Debug bool + DebugKeyPath string + keyId uint } func (s *stepCreateSSHKey) Run(state multistep.StateBag) multistep.StepAction { @@ -62,6 +66,31 @@ func (s *stepCreateSSHKey) Run(state multistep.StateBag) multistep.StepAction { // Remember some state for the future state.Put("ssh_key_id", keyId) + // If we're in debug mode, output the private key to the working directory. + if s.Debug { + ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.DebugKeyPath)) + f, err := os.Create(s.DebugKeyPath) + if err != nil { + state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) + return multistep.ActionHalt + } + defer f.Close() + + // Write the key out + if _, err := f.Write(pem.EncodeToMemory(&priv_blk)); err != nil { + state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) + return multistep.ActionHalt + } + + // Chmod it so that it is SSH ready + if runtime.GOOS != "windows" { + if err := f.Chmod(0600); err != nil { + state.Put("error", fmt.Errorf("Error setting permissions of debug key: %s", err)) + return multistep.ActionHalt + } + } + } + return multistep.ActionContinue } From a100e9393b9ae963f502c7dcab578cc0dabe4652 Mon Sep 17 00:00:00 2001 From: Colin Hebert Date: Sun, 11 Jan 2015 10:25:48 +1100 Subject: [PATCH 005/956] Add support for custom working directory for puppet --- provisioner/puppet-masterless/provisioner.go | 14 +++++++++++++- .../provisioners/puppet-masterless.html.markdown | 7 +++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index 307ecce38..d96397e41 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -41,6 +41,10 @@ type Config struct { // The directory where files will be uploaded. Packer requires write // permissions in this directory. StagingDir string `mapstructure:"staging_directory"` + + // The directory from which the command will be executed. + // Packer requires the directory to exist when running puppet. + WorkingDir string `mapstructure:"working_directory"` } type Provisioner struct { @@ -48,6 +52,7 @@ type Provisioner struct { } type ExecuteTemplate struct { + WorkingDir string FacterVars string HieraConfigPath string ModulePath string @@ -73,7 +78,8 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { // Set some defaults if p.config.ExecuteCommand == "" { - p.config.ExecuteCommand = "{{.FacterVars}} {{if .Sudo}} sudo -E {{end}}" + + p.config.ExecuteCommand = "cd {{.WorkingDir}} && " + + "{{.FacterVars}} {{if .Sudo}} sudo -E {{end}}" + "puppet apply --verbose --modulepath='{{.ModulePath}}' " + "{{if ne .HieraConfigPath \"\"}}--hiera_config='{{.HieraConfigPath}}' {{end}}" + "{{if ne .ManifestDir \"\"}}--manifestdir='{{.ManifestDir}}' {{end}}" + @@ -85,12 +91,17 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { p.config.StagingDir = "/tmp/packer-puppet-masterless" } + if p.config.WorkingDir == "" { + p.config.StagingDir = p.config.StagingDir + } + // Templates templates := map[string]*string{ "hiera_config_path": &p.config.HieraConfigPath, "manifest_file": &p.config.ManifestFile, "manifest_dir": &p.config.ManifestDir, "staging_dir": &p.config.StagingDir, + "working_dir": &p.config.WorkingDir, } for n, ptr := range templates { @@ -256,6 +267,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { // Execute Puppet command, err := p.config.tpl.Process(p.config.ExecuteCommand, &ExecuteTemplate{ + WorkingDir: p.config.WorkingDir, FacterVars: strings.Join(facterVars, " "), HieraConfigPath: remoteHieraConfigPath, ManifestDir: remoteManifestDir, diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index 953dca6ea..4ed566bc6 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -79,12 +79,18 @@ Optional parameters: this folder. If the permissions are not correct, use a shell provisioner prior to this to configure it properly. +* `working_directory` (string) - This is the directory from which the puppet command + will be run. When using hiera with a relative path, this option allows to ensure + that he paths are working properly. If not specified, defaults to the value of + specified `staging_directory` (or its default value if not specified either). + ## Execute Command By default, Packer uses the following command (broken across multiple lines for readability) to execute Puppet: ```liquid +cd {{.WorkingDir}} && \ {{.FacterVars}}{{if .Sudo}} sudo -E {{end}}puppet apply \ --verbose \ --modulepath='{{.ModulePath}}' \ @@ -98,6 +104,7 @@ This command can be customized using the `execute_command` configuration. As you can see from the default value above, the value of this configuration can contain various template variables, defined below: +* `WorkingDir` - The path from which Puppet will be executed. * `FacterVars` - Shell-friendly string of environmental variables used to set custom facts configured for this provisioner. * `HieraConfigPath` - The path to a hiera configuration file. From b7fccec91c930d67b11b18d5cf6b3afece228f3a Mon Sep 17 00:00:00 2001 From: Colin Hebert Date: Sun, 11 Jan 2015 10:29:01 +1100 Subject: [PATCH 006/956] Set the working dir to staging dir --- provisioner/puppet-masterless/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index d96397e41..6bbfc5c1f 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -92,7 +92,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } if p.config.WorkingDir == "" { - p.config.StagingDir = p.config.StagingDir + p.config.WorkingDir = p.config.StagingDir } // Templates From 8c87b1cc00618632ef1a80e4d349e2918bf92c8b Mon Sep 17 00:00:00 2001 From: "Lesko, Matthew (NIH/NLM/NCBI) [C]" Date: Tue, 13 Jan 2015 12:20:31 -0500 Subject: [PATCH 007/956] First attempt for re-using a named EC2 keypair Adds a 'ssh_keypair_name' option to the configuration for AWS, along with some munging to create the temporarily keypair if one isn't specific. NOT YET WORKING. From a 'make' I get the following errors: builder/amazon/ebs/builder.go:94: b.config.SSHKeyPairName undefined (type config has no field or method SSHKeyPairName) builder/amazon/instance/builder.go:199: b.config.SSHKeyPairName undefined (type Config has no field or method SSHKeyPairName) --- builder/amazon/common/run_config.go | 7 +++++-- builder/amazon/common/run_config_test.go | 4 ++-- builder/amazon/common/step_key_pair.go | 22 ++++++++++++---------- builder/amazon/ebs/builder.go | 2 +- builder/amazon/instance/builder.go | 2 +- 5 files changed, 21 insertions(+), 16 deletions(-) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index a71387623..f6e859c03 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -24,6 +24,7 @@ type RunConfig struct { RawSSHTimeout string `mapstructure:"ssh_timeout"` SSHUsername string `mapstructure:"ssh_username"` SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file"` + SSHKeyPairName string `mapstructure:"ssh_keypair_name"` SSHPrivateIp bool `mapstructure:"ssh_private_ip"` SSHPort int `mapstructure:"ssh_port"` SecurityGroupId string `mapstructure:"security_group_id"` @@ -55,6 +56,7 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { "ssh_timeout": &c.RawSSHTimeout, "ssh_username": &c.SSHUsername, "ssh_private_key_file": &c.SSHPrivateKeyFile, + "ssh_keypair_name": &c.SSHKeyPairName, "source_ami": &c.SourceAmi, "subnet_id": &c.SubnetId, "temporary_key_pair_name": &c.TemporaryKeyPairName, @@ -84,8 +86,9 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { c.RawSSHTimeout = "5m" } - if c.TemporaryKeyPairName == "" { - c.TemporaryKeyPairName = fmt.Sprintf( + // if we are not given an explicit keypairname, create a temporary one + if c.SSHKeyPairName == "" { + c.SSHKeyPairName = fmt.Sprintf( "packer %s", uuid.TimeOrderedUUID()) } diff --git a/builder/amazon/common/run_config_test.go b/builder/amazon/common/run_config_test.go index 8e9c4b6b9..c4e1fa110 100644 --- a/builder/amazon/common/run_config_test.go +++ b/builder/amazon/common/run_config_test.go @@ -142,12 +142,12 @@ func TestRunConfigPrepare_UserDataFile(t *testing.T) { func TestRunConfigPrepare_TemporaryKeyPairName(t *testing.T) { c := testConfig() - c.TemporaryKeyPairName = "" + c.SSHKeyPairName = "" if err := c.Prepare(nil); len(err) != 0 { t.Fatalf("err: %s", err) } - if c.TemporaryKeyPairName == "" { + if c.SSHKeyPairName == "" { t.Fatal("keypair empty") } } diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index 3a7eb9f35..db60e1e40 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -21,7 +21,7 @@ type StepKeyPair struct { func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { if s.PrivateKeyFile != "" { - s.keyName = "" + s.keyName = s.KeyPairName // need to get from config privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile) if err != nil { @@ -29,7 +29,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - state.Put("keyPair", "") + state.Put("keyPair", s.keyName) state.Put("privateKey", string(privateKeyBytes)) return multistep.ActionContinue @@ -83,17 +83,19 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { func (s *StepKeyPair) Cleanup(state multistep.StateBag) { // If no key name is set, then we never created it, so just return - if s.keyName == "" { + // If we used an SSH private key file, do not go about deleting + // keypairs + if s.PrivateKeyFile != "" { return } - ec2conn := state.Get("ec2").(*ec2.EC2) + //ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) - ui.Say("Deleting temporary keypair...") - _, err := ec2conn.DeleteKeyPair(s.keyName) - if err != nil { - ui.Error(fmt.Sprintf( - "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) - } + ui.Say("DANGER: Deleting temporary keypair (not really)...") + //_, err := ec2conn.DeleteKeyPair(s.keyName) + //if err != nil { + //ui.Error(fmt.Sprintf( + //"Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) + //} } diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 889cc7b60..083507993 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -91,7 +91,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &awscommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.TemporaryKeyPairName, + KeyPairName: b.config.SSHKeyPairName, PrivateKeyFile: b.config.SSHPrivateKeyFile, }, &awscommon.StepSecurityGroup{ diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 1f5c1d9c8..ce582e039 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -196,7 +196,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &awscommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.TemporaryKeyPairName, + KeyPairName: b.config.SSHKeyPairName, PrivateKeyFile: b.config.SSHPrivateKeyFile, }, &awscommon.StepSecurityGroup{ From 9d097f9d4ef331adac5f322d14ca185e879331a0 Mon Sep 17 00:00:00 2001 From: "Lesko, Matthew (NIH/NLM/NCBI) [C]" Date: Tue, 13 Jan 2015 16:27:33 -0500 Subject: [PATCH 008/956] Permit Temp keys and named SSH keypairs These changes permit the use of pre-created SSH keypairs with AWS. If so, the configuration for the builder needs to include an ssh_keypair_name option and a ssh_private_key_file. If ssh_private_key_file is *not* defined, it'll go through the rigamarole of creating a temporary keypair. The ssh_keypair_name option by itself won't make that change, because it doesn't make sense to specify a keypair but not tell packer where the private key is, but it does happen that you could have a private key and the public-key is "baked in", and not part of your EC2 account. --- builder/amazon/common/run_config.go | 2 +- builder/amazon/common/run_config_test.go | 4 ++-- builder/amazon/common/step_key_pair.go | 25 +++++++++++++----------- builder/amazon/ebs/builder.go | 9 +++++---- builder/amazon/instance/builder.go | 9 +++++---- 5 files changed, 27 insertions(+), 22 deletions(-) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index f6e859c03..67ec74d79 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -88,7 +88,7 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { // if we are not given an explicit keypairname, create a temporary one if c.SSHKeyPairName == "" { - c.SSHKeyPairName = fmt.Sprintf( + c.TemporaryKeyPairName = fmt.Sprintf( "packer %s", uuid.TimeOrderedUUID()) } diff --git a/builder/amazon/common/run_config_test.go b/builder/amazon/common/run_config_test.go index c4e1fa110..8e9c4b6b9 100644 --- a/builder/amazon/common/run_config_test.go +++ b/builder/amazon/common/run_config_test.go @@ -142,12 +142,12 @@ func TestRunConfigPrepare_UserDataFile(t *testing.T) { func TestRunConfigPrepare_TemporaryKeyPairName(t *testing.T) { c := testConfig() - c.SSHKeyPairName = "" + c.TemporaryKeyPairName = "" if err := c.Prepare(nil); len(err) != 0 { t.Fatalf("err: %s", err) } - if c.SSHKeyPairName == "" { + if c.TemporaryKeyPairName == "" { t.Fatal("keypair empty") } } diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index db60e1e40..f6e6a0555 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -13,6 +13,7 @@ import ( type StepKeyPair struct { Debug bool DebugKeyPath string + TemporaryKeyPairName string KeyPairName string PrivateKeyFile string @@ -21,7 +22,9 @@ type StepKeyPair struct { func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { if s.PrivateKeyFile != "" { - s.keyName = s.KeyPairName // need to get from config + if s.KeyPairName != "" { + s.keyName = s.KeyPairName // need to get from config + } privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile) if err != nil { @@ -38,15 +41,15 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) - ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.KeyPairName)) - keyResp, err := ec2conn.CreateKeyPair(s.KeyPairName) + ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.TemporaryKeyPairName)) + keyResp, err := ec2conn.CreateKeyPair(s.TemporaryKeyPairName) if err != nil { state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err)) return multistep.ActionHalt } // Set the keyname so we know to delete it later - s.keyName = s.KeyPairName + s.keyName = s.TemporaryKeyPairName // Set some state data for use in future steps state.Put("keyPair", s.keyName) @@ -89,13 +92,13 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) { return } - //ec2conn := state.Get("ec2").(*ec2.EC2) + ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) - ui.Say("DANGER: Deleting temporary keypair (not really)...") - //_, err := ec2conn.DeleteKeyPair(s.keyName) - //if err != nil { - //ui.Error(fmt.Sprintf( - //"Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) - //} + ui.Say("DANGER: Deleting temporary keypair...") + _, err := ec2conn.DeleteKeyPair(s.keyName) + if err != nil { + ui.Error(fmt.Sprintf( + "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) + } } diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 083507993..95e7ea016 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -89,10 +89,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe EnhancedNetworking: b.config.AMIEnhancedNetworking, }, &awscommon.StepKeyPair{ - Debug: b.config.PackerDebug, - DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.SSHKeyPairName, - PrivateKeyFile: b.config.SSHPrivateKeyFile, + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), + TemporaryKeyPairName: b.config.TemporaryKeyPairName, + KeyPairName: b.config.SSHKeyPairName, + PrivateKeyFile: b.config.SSHPrivateKeyFile, }, &awscommon.StepSecurityGroup{ SecurityGroupIds: b.config.SecurityGroupIds, diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index ce582e039..b677f4da9 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -194,10 +194,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe EnhancedNetworking: b.config.AMIEnhancedNetworking, }, &awscommon.StepKeyPair{ - Debug: b.config.PackerDebug, - DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.SSHKeyPairName, - PrivateKeyFile: b.config.SSHPrivateKeyFile, + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), + TemporaryKeyPairName: b.config.TemporaryKeyPairName, + KeyPairName: b.config.SSHKeyPairName, + PrivateKeyFile: b.config.SSHPrivateKeyFile, }, &awscommon.StepSecurityGroup{ SecurityGroupIds: b.config.SecurityGroupIds, From 62e054c404e7e622203a5aadb9e062c77c663d87 Mon Sep 17 00:00:00 2001 From: "Lesko, Matthew (NIH/NLM/NCBI) [C]" Date: Tue, 13 Jan 2015 21:40:15 -0500 Subject: [PATCH 009/956] simplify output --- builder/amazon/common/step_key_pair.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index f6e6a0555..a4baf8e0b 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -95,7 +95,7 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) { ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) - ui.Say("DANGER: Deleting temporary keypair...") + ui.Say("Deleting temporary keypair...") _, err := ec2conn.DeleteKeyPair(s.keyName) if err != nil { ui.Error(fmt.Sprintf( From 43f08b2664d9736a71152e0bd0df397f994e6afe Mon Sep 17 00:00:00 2001 From: "Lesko, Matthew (NIH/NLM/NCBI) [C]" Date: Tue, 13 Jan 2015 22:58:41 -0500 Subject: [PATCH 010/956] go fmt all the things --- builder/amazon/common/run_config.go | 2 +- builder/amazon/common/step_key_pair.go | 24 ++++++++++++------------ builder/amazon/ebs/builder.go | 2 +- builder/amazon/instance/builder.go | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index 67ec74d79..d4ebec0e1 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -87,7 +87,7 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { } // if we are not given an explicit keypairname, create a temporary one - if c.SSHKeyPairName == "" { + if c.SSHKeyPairName == "" { c.TemporaryKeyPairName = fmt.Sprintf( "packer %s", uuid.TimeOrderedUUID()) } diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index a4baf8e0b..3f40e3d77 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -11,20 +11,20 @@ import ( ) type StepKeyPair struct { - Debug bool - DebugKeyPath string - TemporaryKeyPairName string - KeyPairName string - PrivateKeyFile string + Debug bool + DebugKeyPath string + TemporaryKeyPairName string + KeyPairName string + PrivateKeyFile string keyName string } func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { if s.PrivateKeyFile != "" { - if s.KeyPairName != "" { - s.keyName = s.KeyPairName // need to get from config - } + if s.KeyPairName != "" { + s.keyName = s.KeyPairName // need to get from config + } privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile) if err != nil { @@ -86,8 +86,8 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { func (s *StepKeyPair) Cleanup(state multistep.StateBag) { // If no key name is set, then we never created it, so just return - // If we used an SSH private key file, do not go about deleting - // keypairs + // If we used an SSH private key file, do not go about deleting + // keypairs if s.PrivateKeyFile != "" { return } @@ -98,7 +98,7 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) { ui.Say("Deleting temporary keypair...") _, err := ec2conn.DeleteKeyPair(s.keyName) if err != nil { - ui.Error(fmt.Sprintf( - "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) + ui.Error(fmt.Sprintf( + "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) } } diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 95e7ea016..0c2258ad6 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -91,7 +91,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &awscommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - TemporaryKeyPairName: b.config.TemporaryKeyPairName, + TemporaryKeyPairName: b.config.TemporaryKeyPairName, KeyPairName: b.config.SSHKeyPairName, PrivateKeyFile: b.config.SSHPrivateKeyFile, }, diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index b677f4da9..538e9efb7 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -196,7 +196,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &awscommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - TemporaryKeyPairName: b.config.TemporaryKeyPairName, + TemporaryKeyPairName: b.config.TemporaryKeyPairName, KeyPairName: b.config.SSHKeyPairName, PrivateKeyFile: b.config.SSHPrivateKeyFile, }, From 729a0c01c7d75a0224370925b34923d61a0aa850 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Sat, 24 Jan 2015 17:25:47 -0600 Subject: [PATCH 011/956] Updates to Into: Build An Image docs - the listed AMI isn't found - t2.micros can only be in a VPC(? so said an error), and the docs say we're using a t1.micro anyway - Updates to the vagrant file to get the website to build --- website/Vagrantfile | 2 ++ .../source/intro/getting-started/build-image.html.markdown | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/website/Vagrantfile b/website/Vagrantfile index b8da4b2db..0cdf78d94 100644 --- a/website/Vagrantfile +++ b/website/Vagrantfile @@ -6,6 +6,8 @@ sudo apt-get -y update # RVM/Ruby sudo apt-get -y install curl +sudo apt-get -y install git +gpg --keyserver hkp://keys.gnupg.net --recv-keys D39DC0E3 curl -sSL https://get.rvm.io | bash -s stable . ~/.bashrc . ~/.bash_profile diff --git a/website/source/intro/getting-started/build-image.html.markdown b/website/source/intro/getting-started/build-image.html.markdown index 1cda08a7c..b17b5f823 100644 --- a/website/source/intro/getting-started/build-image.html.markdown +++ b/website/source/intro/getting-started/build-image.html.markdown @@ -54,8 +54,8 @@ briefly. Create a file `example.json` and fill it with the following contents: "access_key": "{{user `aws_access_key`}}", "secret_key": "{{user `aws_secret_key`}}", "region": "us-east-1", - "source_ami": "ami-9eaa1cf6", - "instance_type": "t2.micro", + "source_ami": "ami-c65be9ae", + "instance_type": "t1.micro", "ssh_username": "ubuntu", "ami_name": "packer-example {{timestamp}}" }] From 2184892f8a4a4da56c6cfc14dd039f099a7bc258 Mon Sep 17 00:00:00 2001 From: "Billie H. Cleek" Date: Thu, 12 Feb 2015 20:18:54 -0800 Subject: [PATCH 012/956] do not request a pty Change the default behavior from requesting a PTY when executing a command with the ssh communicator to requesting a PTY only when configured to do so. Update the vmware builders to be fully backward compatible with the new behavior. --- builder/vmware/iso/builder.go | 2 +- builder/vmware/iso/driver_esx5.go | 1 - builder/vmware/vmx/builder.go | 2 +- common/step_connect_ssh.go | 6 +++--- communicator/ssh/communicator.go | 6 +++--- 5 files changed, 8 insertions(+), 9 deletions(-) diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index 674b308c9..661a7ed00 100644 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -349,7 +349,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe SSHAddress: driver.SSHAddress, SSHConfig: vmwcommon.SSHConfigFunc(&b.config.SSHConfig), SSHWaitTimeout: b.config.SSHWaitTimeout, - NoPty: b.config.SSHSkipRequestPty, + Pty: !b.config.SSHSkipRequestPty, }, &vmwcommon.StepUploadTools{ RemoteType: b.config.RemoteType, diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index 1f9bd7a78..59c6dcd29 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -334,7 +334,6 @@ func (d *ESX5Driver) connect() error { User: d.Username, Auth: auth, }, - NoPty: true, } comm, err := ssh.New(address, sshConfig) diff --git a/builder/vmware/vmx/builder.go b/builder/vmware/vmx/builder.go index 4597e647b..216f45486 100644 --- a/builder/vmware/vmx/builder.go +++ b/builder/vmware/vmx/builder.go @@ -94,7 +94,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe SSHAddress: driver.SSHAddress, SSHConfig: vmwcommon.SSHConfigFunc(&b.config.SSHConfig), SSHWaitTimeout: b.config.SSHWaitTimeout, - NoPty: b.config.SSHSkipRequestPty, + Pty: !b.config.SSHSkipRequestPty, }, &vmwcommon.StepUploadTools{ RemoteType: b.config.RemoteType, diff --git a/common/step_connect_ssh.go b/common/step_connect_ssh.go index b00d5bfc0..352d59ace 100644 --- a/common/step_connect_ssh.go +++ b/common/step_connect_ssh.go @@ -34,8 +34,8 @@ type StepConnectSSH struct { // SSHWaitTimeout is the total timeout to wait for SSH to become available. SSHWaitTimeout time.Duration - // NoPty, if true, will not request a Pty from the remote end. - NoPty bool + // Pty, if true, will request a Pty from the remote end. + Pty bool comm packer.Communicator } @@ -138,7 +138,7 @@ func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan stru config := &ssh.Config{ Connection: connFunc, SSHConfig: sshConfig, - NoPty: s.NoPty, + Pty: s.Pty, } log.Println("Attempting SSH connection...") diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 07fb1eaa2..dfe178361 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -33,8 +33,8 @@ type Config struct { // case an error occurs. Connection func() (net.Conn, error) - // NoPty, if true, will not request a pty from the remote end. - NoPty bool + // Pty, if true, will request a pty from the remote end. + Pty bool } // Creates a new packer.Communicator implementation over SSH. This takes @@ -65,7 +65,7 @@ func (c *comm) Start(cmd *packer.RemoteCmd) (err error) { session.Stdout = cmd.Stdout session.Stderr = cmd.Stderr - if !c.config.NoPty { + if c.config.Pty { // Request a PTY termModes := ssh.TerminalModes{ ssh.ECHO: 0, // do not echo From 8e694037351464950e2d57478805a611db8cc59f Mon Sep 17 00:00:00 2001 From: Brian Hourigan Date: Thu, 19 Feb 2015 14:32:27 -0500 Subject: [PATCH 013/956] Prepending -i to sudo so PATH is inherited from profile.d scripts related to ec2 tooling --- builder/amazon/instance/builder.go | 4 ++-- website/source/docs/builders/amazon-instance.html.markdown | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 1f5c1d9c8..bf6791700 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -68,7 +68,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } if b.config.BundleUploadCommand == "" { - b.config.BundleUploadCommand = "sudo -n ec2-upload-bundle " + + b.config.BundleUploadCommand = "sudo -i -n ec2-upload-bundle " + "-b {{.BucketName}} " + "-m {{.ManifestPath}} " + "-a {{.AccessKey}} " + @@ -80,7 +80,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } if b.config.BundleVolCommand == "" { - b.config.BundleVolCommand = "sudo -n ec2-bundle-vol " + + b.config.BundleVolCommand = "sudo i -n ec2-bundle-vol " + "-k {{.KeyPath}} " + "-u {{.AccountId}} " + "-c {{.CertPath}} " + diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 96110aacd..86d790ff9 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -270,7 +270,7 @@ is responsible for executing `ec2-bundle-vol` in order to store and image of the root filesystem to use to create the AMI. ```text -sudo -n ec2-bundle-vol \ +sudo i -n ec2-bundle-vol \ -k {{.KeyPath}} \ -u {{.AccountId}} \ -c {{.CertPath}} \ @@ -297,7 +297,7 @@ across multiple lines for convenience of reading. The bundle upload command is responsible for taking the bundled volume and uploading it to S3. ```text -sudo -n ec2-upload-bundle \ +sudo i -n ec2-upload-bundle \ -b {{.BucketName}} \ -m {{.ManifestPath}} \ -a {{.AccessKey}} \ From d1445bc6fe331eb20270f771795b621ed6ea9059 Mon Sep 17 00:00:00 2001 From: David Danzilio Date: Tue, 27 Jan 2015 14:11:08 -0500 Subject: [PATCH 014/956] Make PackerBuildName and PackerBuilderType available as Facts during a masterless run similar to the way we do with the Shell provisioner. --- provisioner/puppet-masterless/provisioner.go | 2 ++ .../provisioners/puppet-masterless.html.markdown | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index 307ecce38..c1085e2f4 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -147,6 +147,8 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { newFacts[k] = v } + newFacts["packer_build_name"] = p.config.PackerBuildName + newFacts["packer_builder_type"] = p.config.PackerBuilderType p.config.Facter = newFacts // Validation diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index bc65ae812..d0d8e1c95 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -106,3 +106,17 @@ can contain various template variables, defined below: * `ModulePath` - The paths to the module directories. * `Sudo` - A boolean of whether to `sudo` the command or not, depending on the value of the `prevent_sudo` configuration. + +## Default Facts + +In addition to being able to specify custom Facter facts using the `facter` +configuration, the provisioner automatically defines certain commonly useful +facts: + +* `packer_build_name` is set to the name of the build that Packer is running. + This is most useful when Packer is making multiple builds and you want to + distinguish them in your Hiera hierarchy. + +* `packer_builder_type` is the type of the builder that was used to create the + machine that Puppet is running on. This is useful if you want to run only + certain parts of your Puppet code on systems built with certain builders. From 8404f6ce860c6dbba8d9889f4769b5cdfd384635 Mon Sep 17 00:00:00 2001 From: David Danzilio Date: Tue, 24 Feb 2015 22:52:09 -0500 Subject: [PATCH 015/956] Taking a stab at a test for the facter facts --- .../puppet-masterless/provisioner_test.go | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/provisioner/puppet-masterless/provisioner_test.go b/provisioner/puppet-masterless/provisioner_test.go index 0d5576b6b..7ed4a59cd 100644 --- a/provisioner/puppet-masterless/provisioner_test.go +++ b/provisioner/puppet-masterless/provisioner_test.go @@ -133,3 +133,47 @@ func TestProvisionerPrepare_modulePaths(t *testing.T) { t.Fatalf("err: %s", err) } } + +func TestProvisionerPrepare_facterFacts(t *testing.T) { + config := testConfig() + + delete(config, "facter") + p := new(Provisioner) + err := p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test with malformed fact + config["facter"] = "fact=stringified" + p = new(Provisioner) + err = p.Prepare(config) + if err == nil { + t.Fatal("should be an error") + } + + // Test with a good one + td, err := ioutil.TempDir("", "packer") + if err != nil { + t.Fatalf("error: %s", err) + } + defer os.RemoveAll(td) + + facts := make(map[string]string) + facts["fact_name"] = "fact_value" + config["facter"] = facts + + p = new(Provisioner) + err = p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Make sure the default facts are present + delete(config, "facter") + p = new(Provisioner) + err = p.Prepare(config) + if p.config.Facter == nil { + t.Fatalf("err: Default facts are not set in the Puppet provisioner!") + } +} From 7e3d172581aff9d5dc967019ef4901b5d5572642 Mon Sep 17 00:00:00 2001 From: David Danzilio Date: Tue, 24 Feb 2015 22:56:37 -0500 Subject: [PATCH 016/956] Fixing spacing on line 162 and 164 of provisioner/puppet-masterless/provisioner_test.go --- provisioner/puppet-masterless/provisioner_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/provisioner/puppet-masterless/provisioner_test.go b/provisioner/puppet-masterless/provisioner_test.go index 7ed4a59cd..42ddd9d7a 100644 --- a/provisioner/puppet-masterless/provisioner_test.go +++ b/provisioner/puppet-masterless/provisioner_test.go @@ -159,9 +159,9 @@ func TestProvisionerPrepare_facterFacts(t *testing.T) { } defer os.RemoveAll(td) - facts := make(map[string]string) + facts := make(map[string]string) facts["fact_name"] = "fact_value" - config["facter"] = facts + config["facter"] = facts p = new(Provisioner) err = p.Prepare(config) @@ -169,7 +169,7 @@ func TestProvisionerPrepare_facterFacts(t *testing.T) { t.Fatalf("err: %s", err) } - // Make sure the default facts are present + // Make sure the default facts are present delete(config, "facter") p = new(Provisioner) err = p.Prepare(config) From 2a9dc513ab19d3f6cdc4ec0c12ddb82964067d77 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 26 Feb 2015 16:35:31 -0800 Subject: [PATCH 017/956] Fix statement in machine-readable 'ui' type docs. The text previously stated that "ui"-type messages represent messages that would be shown if Packer is *not* running in human-readable mode. This is rather talking about what would happen when Packer *is* using human-readable mode. --- website/source/docs/machine-readable/general.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/machine-readable/general.html.markdown b/website/source/docs/machine-readable/general.html.markdown index 05eb2699a..1f08be4d2 100644 --- a/website/source/docs/machine-readable/general.html.markdown +++ b/website/source/docs/machine-readable/general.html.markdown @@ -15,7 +15,7 @@ machine-readable output and are provided by Packer core itself.

Specifies the output and type of output that would've normally - gone to the console if Packer wasn't running in human-readable + gone to the console if Packer were running in human-readable mode.

From 34e34d1f1825c6a1657ddedf4f9304f81df0c52c Mon Sep 17 00:00:00 2001 From: Colin Hebert Date: Sat, 28 Feb 2015 23:02:49 +1100 Subject: [PATCH 018/956] Fix typo --- .../source/docs/provisioners/puppet-masterless.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index 4ed566bc6..bd239ebe0 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -81,7 +81,7 @@ Optional parameters: * `working_directory` (string) - This is the directory from which the puppet command will be run. When using hiera with a relative path, this option allows to ensure - that he paths are working properly. If not specified, defaults to the value of + that the paths are working properly. If not specified, defaults to the value of specified `staging_directory` (or its default value if not specified either). ## Execute Command From 361e859556ace480bf6dcec922ac0a0662b6388e Mon Sep 17 00:00:00 2001 From: Ash Caire Date: Sun, 1 Mar 2015 10:20:37 +1100 Subject: [PATCH 019/956] Add EBS snapshot tags --- builder/amazon/common/step_create_tags.go | 25 +++++++++++++++++-- test/builder_amazon_ebs.bats | 18 +++++++++++++ .../amazon-ebs/ami_snapshot_tags.json | 20 +++++++++++++++ .../docs/builders/amazon-ebs.html.markdown | 3 ++- 4 files changed, 63 insertions(+), 3 deletions(-) create mode 100644 test/fixtures/amazon-ebs/ami_snapshot_tags.json diff --git a/builder/amazon/common/step_create_tags.go b/builder/amazon/common/step_create_tags.go index a204ca321..1d8b80ca7 100644 --- a/builder/amazon/common/step_create_tags.go +++ b/builder/amazon/common/step_create_tags.go @@ -19,7 +19,28 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction { if len(s.Tags) > 0 { for region, ami := range amis { - ui.Say(fmt.Sprintf("Adding tags to AMI (%s)...", ami)) + ui.Say(fmt.Sprintf("Preparing tags for AMI (%s) and related snapshots", ami)) + + // Declare list of resources to tag + resourceIds := []string{ami} + + // Retrieve image list for given AMI + imageResp, err := ec2conn.Images([]string{ami}, ec2.NewFilter()) + if err != nil { + err := fmt.Errorf("Error retrieving details for AMI (%s): %s", ami, err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + image := &imageResp.Images[0] + + // Add only those with a Snapshot ID, i.e. not Ephemeral + for _, device := range image.BlockDevices { + if device.SnapshotId != "" { + ui.Say(fmt.Sprintf("Tagging snapshot: %s", device.SnapshotId)) + resourceIds = append(resourceIds, device.SnapshotId) + } + } var ec2Tags []ec2.Tag for key, value := range s.Tags { @@ -28,7 +49,7 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction { } regionconn := ec2.New(ec2conn.Auth, aws.Regions[region]) - _, err := regionconn.CreateTags([]string{ami}, ec2Tags) + _, err = regionconn.CreateTags(resourceIds, ec2Tags) if err != nil { err := fmt.Errorf("Error adding tags to AMI (%s): %s", ami, err) state.Put("error", err) diff --git a/test/builder_amazon_ebs.bats b/test/builder_amazon_ebs.bats index 7dd17acbd..89f32a4a0 100755 --- a/test/builder_amazon_ebs.bats +++ b/test/builder_amazon_ebs.bats @@ -15,6 +15,18 @@ aws_ami_region_copy_count() { | wc -l } +# This verifies AMI tags are correctly applied to relevant snapshots +aws_ami_snapshot_tags_count() { + filter='Name=tag:packer-id,Values=ami_snapshot_tags' + aws ec2 describe-images --region $1 --owners self --output text \ + --filters "$filter" \ + --query "Images[*].BlockDeviceMappings[*].Ebs.SnapshotId" \ + | aws ec2 describe-snapshots --region $1 --owners self --output text \ + --filters "$filter" \ + --snapshot-ids \ + | wc -l +} + teardown() { aws_ami_cleanup 'us-east-1' aws_ami_cleanup 'us-west-1' @@ -34,3 +46,9 @@ teardown() { [ "$(aws_ami_region_copy_count 'us-west-1')" -eq "1" ] [ "$(aws_ami_region_copy_count 'us-west-2')" -eq "1" ] } + +@test "amazon-ebs: AMI snapshot tags" { + run packer build $FIXTURE_ROOT/ami_snapshot_tags.json + [ "$status" -eq 0 ] + [ "$(aws_ami_snapshot_tags)" -eq "2" ] +} diff --git a/test/fixtures/amazon-ebs/ami_snapshot_tags.json b/test/fixtures/amazon-ebs/ami_snapshot_tags.json new file mode 100644 index 000000000..278474a32 --- /dev/null +++ b/test/fixtures/amazon-ebs/ami_snapshot_tags.json @@ -0,0 +1,20 @@ +{ + "builders": [{ + "type": "amazon-ebs", + "ami_name": "packer-test {{timestamp}}", + "instance_type": "m1.small", + "region": "us-east-1", + "ssh_username": "ubuntu", + "source_ami": "ami-0568456c", + "tags": { + "packer-test": "true", + "packer-id": "ami_snapshot_tags" + }, + "ami_block_device_mappings": [ + { + "device_name": "/dev/sde", + "volume_type": "standard" + } + ] + }] +} diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 5e2c31e90..946b18eb9 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -146,7 +146,8 @@ each category, the available configuration keys are alphabetized. * `subnet_id` (string) - If using VPC, the ID of the subnet, such as "subnet-12345def", where Packer will launch the EC2 instance. -* `tags` (object of key/value strings) - Tags applied to the AMI. +* `tags` (object of key/value strings) - Tags applied to the AMI and + relevant snapshots. * `temporary_key_pair_name` (string) - The name of the temporary keypair to generate. By default, Packer generates a name with a UUID. From f9c14aee90d9df48e9c4e7ce5c221ce4af3747ff Mon Sep 17 00:00:00 2001 From: Ryan Uber Date: Mon, 2 Mar 2015 11:59:18 -0800 Subject: [PATCH 020/956] post-processor/atlas: fix index out of range panic when artifacts are present --- post-processor/atlas/post-processor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/post-processor/atlas/post-processor.go b/post-processor/atlas/post-processor.go index f66eab9ab..504388d81 100644 --- a/post-processor/atlas/post-processor.go +++ b/post-processor/atlas/post-processor.go @@ -178,7 +178,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac // Modify the archive options to only include the files // that are in our file list. - include := make([]string, 0, len(fs)) + include := make([]string, len(fs)) for i, f := range fs { include[i] = strings.Replace(f, path, "", 1) } From 97c56347a147f6122e158f623945385b560e1a0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gr=C3=A9goire=20Pineau?= Date: Wed, 4 Mar 2015 19:57:03 +0100 Subject: [PATCH 021/956] Better error reporting when a config key in template is Unknown MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch will allow to fix the following bug much faster: ``` 1 error(s) occurred: * Unknown configuration key: output_directory ``` Related configuration: ``` "output_directory ": "build/sl_base/", ``` After the patch, the error reporting will be: ``` 1 error(s) occurred: * Unknown configuration key: "output_directory¤" ``` --- common/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/config.go b/common/config.go index 4a56cc356..72b3bdd27 100644 --- a/common/config.go +++ b/common/config.go @@ -33,7 +33,7 @@ func CheckUnusedConfig(md *mapstructure.Metadata) *packer.MultiError { for _, unused := range md.Unused { if unused != "type" && !strings.HasPrefix(unused, "packer_") { errs = append( - errs, fmt.Errorf("Unknown configuration key: %s", unused)) + errs, fmt.Errorf("Unknown configuration key: %q", unused)) } } } From a81c8905fb0292d8499a529b0e93b4c168a87fea Mon Sep 17 00:00:00 2001 From: Andrew Beresford Date: Fri, 7 Nov 2014 10:55:31 +0000 Subject: [PATCH 022/956] Add 1/10th second delay between key events to VNC --- builder/vmware/common/step_type_boot_command.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/builder/vmware/common/step_type_boot_command.go b/builder/vmware/common/step_type_boot_command.go index 82e8b3e17..f67940280 100644 --- a/builder/vmware/common/step_type_boot_command.go +++ b/builder/vmware/common/step_type_boot_command.go @@ -200,7 +200,9 @@ func vncSendString(c *vnc.ClientConn, original string) { } c.KeyEvent(keyCode, true) + time.Sleep(time.Second/10) c.KeyEvent(keyCode, false) + time.Sleep(time.Second/10) if keyShift { c.KeyEvent(KeyLeftShift, false) From 698f913bbfefbfd12420348aab4cd1cec39a5869 Mon Sep 17 00:00:00 2001 From: Kevin Fishner Date: Tue, 3 Mar 2015 17:18:22 -0800 Subject: [PATCH 023/956] explain how packer works with atlas --- .../getting-started/build-image.html.markdown | 9 ++- .../intro/getting-started/next.html.markdown | 3 + .../remote-builds.html.markdown | 75 +++++++++++++++++++ .../getting-started/vagrant.html.markdown | 4 +- website/source/layouts/intro.erb | 1 + 5 files changed, 87 insertions(+), 5 deletions(-) create mode 100644 website/source/intro/getting-started/remote-builds.html.markdown diff --git a/website/source/intro/getting-started/build-image.html.markdown b/website/source/intro/getting-started/build-image.html.markdown index 1cda08a7c..989e4be29 100644 --- a/website/source/intro/getting-started/build-image.html.markdown +++ b/website/source/intro/getting-started/build-image.html.markdown @@ -157,10 +157,13 @@ the Packer output. Packer only builds images. It does not attempt to manage them in any way. After they're built, it is up to you to launch or destroy them as you see -fit. As a result of this, after running the above example, your AWS account -now has an AMI associated with it. +fit. If you want to store and namespace images for easy reference, you +can use [Atlas by HashiCorp](https://atlas.hashicorp.com). We'll cover +remotely building and storing images at the end of this getting started guide. -AMIs are stored in S3 by Amazon, so unless you want to be charged about $0.01 +After running the above example, your AWS account +now has an AMI associated with it. AMIs are stored in S3 by Amazon, +so unless you want to be charged about $0.01 per month, you'll probably want to remove it. Remove the AMI by first deregistering it on the [AWS AMI management page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Images). Next, delete the associated snapshot on the diff --git a/website/source/intro/getting-started/next.html.markdown b/website/source/intro/getting-started/next.html.markdown index 11f5ea4f7..62480823a 100644 --- a/website/source/intro/getting-started/next.html.markdown +++ b/website/source/intro/getting-started/next.html.markdown @@ -16,6 +16,9 @@ From this point forward, the most important reference for you will be the [documentation](/docs). The documentation is less of a guide and more of a reference of all the overall features and options of Packer. +If you're interested in learning more about how Packer fits into the +HashiCorp ecosystem of tools, read our [Atlas getting started overview](https://atlas.hashicorp.com/help/getting-started/getting-started-overview). + As you use Packer more, please voice your comments and concerns on the [mailing list or IRC](/community). Additionally, Packer is [open source](https://github.com/mitchellh/packer) so please contribute diff --git a/website/source/intro/getting-started/remote-builds.html.markdown b/website/source/intro/getting-started/remote-builds.html.markdown new file mode 100644 index 000000000..dd7afa911 --- /dev/null +++ b/website/source/intro/getting-started/remote-builds.html.markdown @@ -0,0 +1,75 @@ +--- +layout: "intro" +page_title: "Remote Builds and Storage" +prev_url: "/intro/getting-started/vagrant.html" +next_url: "/intro/getting-started/next.html" +next_title: "Next Steps" +description: |- + Up to this point in the guide, you have been running Packer on your local machine to build and provision images on AWS and DigitalOcean. However, you can use Atlas by HashiCorp to both run Packer builds remotely and store the output of builds. +--- + +# Remote Builds and Storage +Up to this point in the guide, you have been running Packer on your local machine to build and provision images on AWS and DigitalOcean. However, you can use [Atlas by HashiCorp](https://atlas.hashicorp.com) to run Packer builds remotely and store the output of builds. + +## Why Build Remotely? +By building remotely, you can move access credentials off of developer machines, release local machines from long-running Packer processes, and automatically start Packer builds from trigger sources such as `vagrant push`, a version control system, or CI tool. + +## Run Packer Builds Remotely +To run Packer remotely, there are two changes that must be made to the Packer template. The first is the addition of the `push` [configuration](https://www.packer.io/docs/templates/push.html), which sends the Packer template to Atlas so it can run Packer remotely. The second modification is updating the variables section to read variables from the Atlas environment rather than the local environment. Remove the `post-processors` section for now if it is still in your template. + +```javascript +{ + "variables": { + "aws_access_key": "{{env `aws_access_key`}}", + "aws_secret_key": "{{env `aws_secret_key`}}" + }, + "builders": [{ + "type": "amazon-ebs", + "access_key": "{{user `aws_access_key`}}", + "secret_key": "{{user `aws_secret_key`}}", + "region": "us-east-1", + "source_ami": "ami-9eaa1cf6", + "instance_type": "t2.micro", + "ssh_username": "ubuntu", + "ami_name": "packer-example {{timestamp}}" + }], + "provisioners": [{ + "type": "shell", + "inline": [ + "sleep 30", + "sudo apt-get update", + "sudo apt-get install -y redis-server" + ] + }], + "push": { + "name": "ATLAS_USERNAME/packer-tutorial" + } +} +``` + +To get an Atlas username, [create an account here](https://atlas.hashicorp.com/account/new?utm_source=oss&utm_medium=getting-started&utm_campaign=packer). Replace "ATLAS_USERNAME" with your username, then run `packer push -create example.json` to send the configuration to Atlas, which automatically starts the build. + +This build will fail since neither `aws_access_key` or `aws_secret_key` are set in the Atlas environment. To set environment variables in Atlas, navigate to the [operations tab](https://atlas.hashicorp.com/operations), click the "packer-tutorial" build configuration that was just created, and then click 'variables' in the left navigation. Set `aws_access_key` and `aws_secret_key` with their respective values. Now restart the Packer build by either clicking 'rebuild' in the Atlas UI or by running `packer push example.json` again. Now when you click on the active build, you can view the logs in real-time. + +-> **Note:** Whenever a change is made to the Packer template, you must `packer push` to update the configuration in Atlas. + +## Store Packer Outputs +Now we have Atlas building an AMI with Redis pre-configured. This is great, but it's even better to store and version the AMI output so it can be easily deployed by a tool like [Terraform](https://terraform.io). The `atlas` [post-processor](/docs/post-processors/atlas.html) makes this process simple: + + ```javascript +{ + "variables": ["..."], + "builders": ["..."], + "provisioners": ["..."], + "push": ["..."] + "post-processors": [ + { + "type": "atlas", + "artifact": "ATLAS_USERNAME/packer-tutorial", + "artifact_type": "aws.ami" + } + ] +} +``` + +Update the `post-processors` block with your Atlas username, then `packer push example.json` and watch the build kick off in Atlas! When the build completes, the resulting artifact will be saved and stored in Atlas. \ No newline at end of file diff --git a/website/source/intro/getting-started/vagrant.html.markdown b/website/source/intro/getting-started/vagrant.html.markdown index 7353c6f0b..4d6e20caf 100644 --- a/website/source/intro/getting-started/vagrant.html.markdown +++ b/website/source/intro/getting-started/vagrant.html.markdown @@ -2,8 +2,8 @@ layout: "intro" page_title: "Vagrant Boxes" prev_url: "/intro/getting-started/parallel-builds.html" -next_url: "/intro/getting-started/next.html" -next_title: "Next Steps" +next_url: "/intro/getting-started/remote-builds.html" +next_title: "Remote Builds and Storage" description: |- Packer also has the ability to take the results of a builder (such as an AMI or plain VMware image) and turn it into a Vagrant box. --- diff --git a/website/source/layouts/intro.erb b/website/source/layouts/intro.erb index 8b1e6f18c..17e900baf 100644 --- a/website/source/layouts/intro.erb +++ b/website/source/layouts/intro.erb @@ -17,6 +17,7 @@
  • Provision
  • Parallel Builds
  • Vagrant Boxes
  • +
  • Remote Builds
  • Next Steps
  • <% end %> From d046e1cc5b8e1cf00cf8f1d59fc09258abfcca7e Mon Sep 17 00:00:00 2001 From: renat-sabitov-sirca Date: Tue, 16 Dec 2014 12:18:47 +1100 Subject: [PATCH 024/956] Fixing transient AWS errors during EBS builds Relates to #1539 AWS is eventually consistent and instance can be not visibile for some time after creation. This fix eliminates describe-instances call before going to the proper wait loop --- builder/amazon/common/state.go | 6 +++--- .../amazon/common/step_run_source_instance.go | 16 ++++------------ builder/amazon/ebs/step_create_ami.go | 2 +- builder/amazon/ebs/step_stop_instance.go | 2 +- 4 files changed, 9 insertions(+), 17 deletions(-) diff --git a/builder/amazon/common/state.go b/builder/amazon/common/state.go index 62e861d74..a3abd6ee9 100644 --- a/builder/amazon/common/state.go +++ b/builder/amazon/common/state.go @@ -63,9 +63,9 @@ func AMIStateRefreshFunc(conn *ec2.EC2, imageId string) StateRefreshFunc { // InstanceStateRefreshFunc returns a StateRefreshFunc that is used to watch // an EC2 instance. -func InstanceStateRefreshFunc(conn *ec2.EC2, i *ec2.Instance) StateRefreshFunc { +func InstanceStateRefreshFunc(conn *ec2.EC2, instanceId string) StateRefreshFunc { return func() (interface{}, string, error) { - resp, err := conn.Instances([]string{i.InstanceId}, ec2.NewFilter()) + resp, err := conn.Instances([]string{instanceId}, ec2.NewFilter()) if err != nil { if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidInstanceID.NotFound" { // Set this to nil as if we didn't find anything. @@ -85,7 +85,7 @@ func InstanceStateRefreshFunc(conn *ec2.EC2, i *ec2.Instance) StateRefreshFunc { return nil, "", nil } - i = &resp.Reservations[0].Instances[0] + i := &resp.Reservations[0].Instances[0] return i, i.State.Name, nil } } diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index 50cedf6ea..22e9da847 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -195,21 +195,13 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi instanceId = spotResp.SpotRequestResults[0].InstanceId } - instanceResp, err := ec2conn.Instances([]string{instanceId}, nil) - if err != nil { - err := fmt.Errorf("Error finding source instance (%s): %s", instanceId, err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } - s.instance = &instanceResp.Reservations[0].Instances[0] - ui.Message(fmt.Sprintf("Instance ID: %s", s.instance.InstanceId)) + ui.Message(fmt.Sprintf("Instance ID: %s", instanceId)) - ui.Say(fmt.Sprintf("Waiting for instance (%s) to become ready...", s.instance.InstanceId)) + ui.Say(fmt.Sprintf("Waiting for instance (%v) to become ready...", instanceId)) stateChange := StateChangeConf{ Pending: []string{"pending"}, Target: "running", - Refresh: InstanceStateRefreshFunc(ec2conn, s.instance), + Refresh: InstanceStateRefreshFunc(ec2conn, instanceId), StepState: state, } latestInstance, err := WaitForState(&stateChange) @@ -285,7 +277,7 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) { } stateChange := StateChangeConf{ Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"}, - Refresh: InstanceStateRefreshFunc(ec2conn, s.instance), + Refresh: InstanceStateRefreshFunc(ec2conn, s.instance.InstanceId), Target: "terminated", } diff --git a/builder/amazon/ebs/step_create_ami.go b/builder/amazon/ebs/step_create_ami.go index f380ea0b1..66390b08e 100644 --- a/builder/amazon/ebs/step_create_ami.go +++ b/builder/amazon/ebs/step_create_ami.go @@ -87,7 +87,7 @@ func (s *stepCreateAMI) Cleanup(state multistep.StateBag) { ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", err)) return } else if resp.Return == false { - ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %t", resp.Return)) + ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around")) return } } diff --git a/builder/amazon/ebs/step_stop_instance.go b/builder/amazon/ebs/step_stop_instance.go index 09c19bddb..ebd0a27f3 100644 --- a/builder/amazon/ebs/step_stop_instance.go +++ b/builder/amazon/ebs/step_stop_instance.go @@ -37,7 +37,7 @@ func (s *stepStopInstance) Run(state multistep.StateBag) multistep.StepAction { stateChange := awscommon.StateChangeConf{ Pending: []string{"running", "stopping"}, Target: "stopped", - Refresh: awscommon.InstanceStateRefreshFunc(ec2conn, instance), + Refresh: awscommon.InstanceStateRefreshFunc(ec2conn, instance.InstanceId), StepState: state, } _, err = awscommon.WaitForState(&stateChange) From 63597af8bbe636fddb4c171e7433e1a1981034c6 Mon Sep 17 00:00:00 2001 From: Aneesh Agrawal Date: Wed, 11 Mar 2015 01:20:30 -0400 Subject: [PATCH 025/956] Clarify the plugin discovery search & priority orders. --- config.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/config.go b/config.go index 23bb4b6ef..4acb3c3b1 100644 --- a/config.go +++ b/config.go @@ -49,11 +49,13 @@ func decodeConfig(r io.Reader, c *config) error { // Discover discovers plugins. // -// This looks in the directory of the executable and the CWD, in that -// order for priority. +// Search the directory of the executable, then the plugins directory, and +// finally the CWD, in that order. Any conflicts will overwrite previously +// found plugins, in that order. +// Hence, the priority order is the reverse of the search order - i.e., the +// CWD has the highest priority. func (c *config) Discover() error { - // Next, look in the same directory as the executable. Any conflicts - // will overwrite those found in our current directory. + // First, look in the same directory as the executable. exePath, err := osext.Executable() if err != nil { log.Printf("[ERR] Error loading exe directory: %s", err) @@ -63,7 +65,7 @@ func (c *config) Discover() error { } } - // Look in the plugins directory + // Next, look in the plugins directory. dir, err := ConfigDir() if err != nil { log.Printf("[ERR] Error loading config directory: %s", err) @@ -73,7 +75,7 @@ func (c *config) Discover() error { } } - // Look in the cwd. + // Last, look in the CWD. if err := c.discover("."); err != nil { return err } From d174ffe1fabb3e26875c97865bf5490ffff1abfc Mon Sep 17 00:00:00 2001 From: Mojo Talantikite Date: Thu, 12 Mar 2015 19:01:51 -0400 Subject: [PATCH 026/956] Fix digitalocean provider for private images [fixes mitchellh/packer#1792] --- builder/digitalocean/api_v2.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/builder/digitalocean/api_v2.go b/builder/digitalocean/api_v2.go index b52aeaaaf..46454a9f8 100644 --- a/builder/digitalocean/api_v2.go +++ b/builder/digitalocean/api_v2.go @@ -138,8 +138,13 @@ func (d DigitalOceanClientV2) CreateDroplet(name string, size string, image stri return 0, fmt.Errorf("Invalid region or lookup failure: '%s': %s", region, err) } + if found_image.Slug == "" { + req.Image = strconv.Itoa(int(found_image.Id)) + } else { + req.Image = found_image.Slug + } + req.Size = found_size.Slug - req.Image = found_image.Slug req.Region = found_region.Slug req.SSHKeys = []string{fmt.Sprintf("%v", keyId)} req.PrivateNetworking = privateNetworking From 90e48eabc38fc77b83c187bbc864e69ec74621fc Mon Sep 17 00:00:00 2001 From: dragon788 Date: Fri, 13 Mar 2015 13:44:07 -0500 Subject: [PATCH 027/956] Invalid boot command section for Parallels ISO --- website/source/docs/builders/parallels-iso.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index 3d13acae3..04e0e5367 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -257,9 +257,9 @@ The available variables are: Example boot command. This is actually a working boot command used to start an Ubuntu 12.04 installer: -```javascript +```text [ - "<esc><esc><enter><wait>", + "", "/install/vmlinuz noapic ", "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg ", "debian-installer=en_US auto locale=en_US kbd-chooser/method=us ", From a565f43ca9a81c5f93d45603ba8f6fbbf657d05c Mon Sep 17 00:00:00 2001 From: dragon788 Date: Fri, 13 Mar 2015 13:44:59 -0500 Subject: [PATCH 028/956] Replaced missed command --- website/source/docs/builders/parallels-iso.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index 04e0e5367..e0b1083be 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -267,7 +267,7 @@ an Ubuntu 12.04 installer: "fb=false debconf/frontend=noninteractive ", "keyboard-configuration/modelcode=SKIP keyboard-configuration/layout=USA ", "keyboard-configuration/variant=USA console-setup/ask_detect=false ", - "initrd=/install/initrd.gz -- <enter>" + "initrd=/install/initrd.gz -- ;" ] ``` From 0e8cd451ed471383ad359cf50a31fdfac93eb419 Mon Sep 17 00:00:00 2001 From: dragon788 Date: Fri, 13 Mar 2015 13:45:30 -0500 Subject: [PATCH 029/956] Invalid boot command section for VirtualboxISO --- website/source/docs/builders/virtualbox-iso.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index 51b575221..020222beb 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -283,7 +283,7 @@ The available variables are: Example boot command. This is actually a working boot command used to start an Ubuntu 12.04 installer: -``` +```text [ "", "/install/vmlinuz noapic ", From 7daf8f6b5fc130e136241ab55bb30c37fdecc199 Mon Sep 17 00:00:00 2001 From: dragon788 Date: Fri, 13 Mar 2015 13:45:43 -0500 Subject: [PATCH 030/956] Invalid boot command section for VMware ISO --- website/source/docs/builders/vmware-iso.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index aba8dd902..b1d026e44 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -311,7 +311,7 @@ The available variables are: Example boot command. This is actually a working boot command used to start an Ubuntu 12.04 installer: -``` +```text [ "", "/install/vmlinuz noapic ", From 9b2d219cabd7de04d28be3aa5e1a0b61020da2e1 Mon Sep 17 00:00:00 2001 From: Marc O'Morain Date: Wed, 18 Mar 2015 16:40:05 +0000 Subject: [PATCH 031/956] MAC address can be upper or lower case --- builder/vmware/common/guest_ip.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/builder/vmware/common/guest_ip.go b/builder/vmware/common/guest_ip.go index ad345d435..25ca3b795 100644 --- a/builder/vmware/common/guest_ip.go +++ b/builder/vmware/common/guest_ip.go @@ -2,6 +2,7 @@ package common import ( "errors" + "fmt" "io/ioutil" "log" "os" @@ -75,14 +76,14 @@ func (f *DHCPLeaseGuestLookup) GuestIP() (string, error) { // If the mac address matches and this lease ends farther in the // future than the last match we might have, then choose it. matches = macLineRe.FindStringSubmatch(line) - if matches != nil && matches[1] == f.MACAddress && curLeaseEnd.Before(lastLeaseEnd) { + if matches != nil && strings.EqualFold(matches[1], f.MACAddress) && curLeaseEnd.Before(lastLeaseEnd) { curIp = lastIp curLeaseEnd = lastLeaseEnd } } if curIp == "" { - return "", errors.New("IP not found for MAC in DHCP leases") + return "", fmt.Errorf("IP not found for MAC %s in DHCP leases at %s", f.MACAddress, dhcpLeasesPath) } return curIp, nil From 9b318941fcb9f40973524c8ef7b282078a781abe Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Thu, 19 Mar 2015 10:43:20 -0400 Subject: [PATCH 032/956] Update middleman-hashicorp --- website/Gemfile.lock | 100 ++++++++++++++++++++++--------------------- 1 file changed, 52 insertions(+), 48 deletions(-) diff --git a/website/Gemfile.lock b/website/Gemfile.lock index 170034efc..6b8e19a04 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -1,63 +1,67 @@ GIT remote: git://github.com/hashicorp/middleman-hashicorp.git - revision: b82c2c2fdc244cd0bd529ff27cfab24e43f07708 + revision: 783fe9517dd02badb85e5ddfeda4d8e35bbd05a8 specs: middleman-hashicorp (0.1.0) - bootstrap-sass (~> 3.2) + bootstrap-sass (~> 3.3) builder (~> 3.2) less (~> 2.6) middleman (~> 3.3) - middleman-livereload (~> 3.3) + middleman-livereload (~> 3.4) middleman-minify-html (~> 3.4) middleman-syntax (~> 2.0) - rack-contrib (~> 1.1) + rack-contrib (~> 1.2) rack-rewrite (~> 1.5) rack-ssl-enforcer (~> 0.2) - redcarpet (~> 3.1) + redcarpet (~> 3.2) therubyracer (~> 0.12) thin (~> 1.6) GEM remote: https://rubygems.org/ specs: - activesupport (4.1.6) + activesupport (4.1.9) i18n (~> 0.6, >= 0.6.9) json (~> 1.7, >= 1.7.7) minitest (~> 5.1) thread_safe (~> 0.1) tzinfo (~> 1.1) - bootstrap-sass (3.2.0.2) - sass (~> 3.2) + autoprefixer-rails (5.1.7.1) + execjs + json + bootstrap-sass (3.3.4.1) + autoprefixer-rails (>= 5.0.0.1) + sass (>= 3.2.19) builder (3.2.2) celluloid (0.16.0) timers (~> 4.0.0) - chunky_png (1.3.3) + chunky_png (1.3.4) coffee-script (2.3.0) coffee-script-source execjs - coffee-script-source (1.8.0) + coffee-script-source (1.9.1) commonjs (0.2.7) - compass (1.0.1) + compass (1.0.3) chunky_png (~> 1.2) - compass-core (~> 1.0.1) + compass-core (~> 1.0.2) compass-import-once (~> 1.0.5) rb-fsevent (>= 0.9.3) rb-inotify (>= 0.9) sass (>= 3.3.13, < 3.5) - compass-core (1.0.1) + compass-core (1.0.3) multi_json (~> 1.0) sass (>= 3.3.0, < 3.5) compass-import-once (1.0.5) sass (>= 3.2, < 3.5) - daemons (1.1.9) + daemons (1.2.2) em-websocket (0.5.1) eventmachine (>= 0.12.9) http_parser.rb (~> 0.6.0) erubis (2.7.0) - eventmachine (1.0.3) - execjs (2.2.2) - ffi (1.9.6) - haml (4.0.5) + eventmachine (1.0.7) + execjs (2.4.0) + ffi (1.9.8) + haml (4.0.6) tilt hike (1.2.3) hitimes (1.2.2) @@ -65,86 +69,86 @@ GEM uber (~> 0.0.4) htmlcompressor (0.1.2) http_parser.rb (0.6.0) - i18n (0.6.11) - json (1.8.1) - kramdown (1.5.0) + i18n (0.7.0) + json (1.8.2) + kramdown (1.6.0) less (2.6.0) commonjs (~> 0.2.7) libv8 (3.16.14.7) - listen (2.7.11) + listen (2.9.0) celluloid (>= 0.15.2) rb-fsevent (>= 0.9.3) rb-inotify (>= 0.9) - middleman (3.3.6) + middleman (3.3.10) coffee-script (~> 2.2) compass (>= 1.0.0, < 2.0.0) compass-import-once (= 1.0.5) execjs (~> 2.0) haml (>= 4.0.5) kramdown (~> 1.2) - middleman-core (= 3.3.6) + middleman-core (= 3.3.10) middleman-sprockets (>= 3.1.2) sass (>= 3.4.0, < 4.0) uglifier (~> 2.5) - middleman-core (3.3.6) + middleman-core (3.3.10) activesupport (~> 4.1.0) bundler (~> 1.1) erubis hooks (~> 0.3) - i18n (~> 0.6.9) + i18n (~> 0.7.0) listen (>= 2.7.9, < 3.0) padrino-helpers (~> 0.12.3) rack (>= 1.4.5, < 2.0) rack-test (~> 0.6.2) thor (>= 0.15.2, < 2.0) tilt (~> 1.4.1, < 2.0) - middleman-livereload (3.3.4) + middleman-livereload (3.4.2) em-websocket (~> 0.5.1) - middleman-core (~> 3.2) + middleman-core (>= 3.3) rack-livereload (~> 0.3.15) middleman-minify-html (3.4.0) htmlcompressor (~> 0.1.0) middleman-core (>= 3.2) - middleman-sprockets (3.3.10) - middleman-core (~> 3.3) + middleman-sprockets (3.4.2) + middleman-core (>= 3.3) sprockets (~> 2.12.1) sprockets-helpers (~> 1.1.0) - sprockets-sass (~> 1.2.0) + sprockets-sass (~> 1.3.0) middleman-syntax (2.0.0) middleman-core (~> 3.2) rouge (~> 1.0) - minitest (5.4.2) - multi_json (1.10.1) - padrino-helpers (0.12.4) + minitest (5.5.1) + multi_json (1.11.0) + padrino-helpers (0.12.5) i18n (~> 0.6, >= 0.6.7) - padrino-support (= 0.12.4) + padrino-support (= 0.12.5) tilt (~> 1.4.1) - padrino-support (0.12.4) + padrino-support (0.12.5) activesupport (>= 3.1) - rack (1.5.2) - rack-contrib (1.1.0) + rack (1.6.0) + rack-contrib (1.2.0) rack (>= 0.9.1) rack-livereload (0.3.15) rack - rack-rewrite (1.5.0) + rack-rewrite (1.5.1) rack-ssl-enforcer (0.2.8) - rack-test (0.6.2) + rack-test (0.6.3) rack (>= 1.0) rb-fsevent (0.9.4) rb-inotify (0.9.5) ffi (>= 0.5.0) - redcarpet (3.2.0) + redcarpet (3.2.2) ref (1.0.5) - rouge (1.7.2) - sass (3.4.6) - sprockets (2.12.2) + rouge (1.8.0) + sass (3.4.13) + sprockets (2.12.3) hike (~> 1.2) multi_json (~> 1.0) rack (~> 1.0) tilt (~> 1.1, != 1.3.0) sprockets-helpers (1.1.0) sprockets (~> 2.0) - sprockets-sass (1.2.0) + sprockets-sass (1.3.1) sprockets (~> 2.0) tilt (~> 1.1) therubyracer (0.12.1) @@ -155,14 +159,14 @@ GEM eventmachine (~> 1.0) rack (~> 1.0) thor (0.19.1) - thread_safe (0.3.4) + thread_safe (0.3.5) tilt (1.4.1) timers (4.0.1) hitimes tzinfo (1.2.2) thread_safe (~> 0.1) - uber (0.0.10) - uglifier (2.5.3) + uber (0.0.13) + uglifier (2.7.1) execjs (>= 0.3.0) json (>= 1.8.0) From e3dc34433ea43ec64a2895960388d912a30035fb Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Thu, 19 Mar 2015 10:43:25 -0400 Subject: [PATCH 033/956] Address some mobile issues --- website/source/assets/images/favicon.ico | Bin 0 -> 1150 bytes website/source/layouts/layout.erb | 10 ++++++++++ 2 files changed, 10 insertions(+) create mode 100644 website/source/assets/images/favicon.ico diff --git a/website/source/assets/images/favicon.ico b/website/source/assets/images/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..fa5bd4589da30dcf6b9f6aa183214815c85bf1f1 GIT binary patch literal 1150 zcmc(fOHaZ;5XaZVn-}82vqwLgAIfOLQwcW{qk=}|sk8+}T3ArDfK(ugkyjP?J1q~v z2vIz^?d)V{=QrKmnJr@r^jIv6`c<~R$k+;FY>k9WWa-;UF-CLCOMNlN)L};Jbh>CZ zbQtQZ^ypI7pljvo3^XJJdq1&rf5G*#RF(Ss`#2qTC4~<0Re&etU^py#Tc= zp;)~_x7RV_O>{$^)@*Q5?ilfYaR;)x5Ba_??YGwfI<}V?d_Z)A7mkt2`%IaKPZy2` zycd7s)qm#mzyH3U&hK;ozS;Xv+042TvQ$4*Lt&X}Fzy+hzoH&Oe{X!7zoa4}C~qJ_ zF@gy@{4qNMv2DaMyJr5Al>gFGa-wX;2U@v^bWT9}RzM`-K|HfZ>pnrgBBA<{LsPGj i`&iA4!^VtpKj>|dpY|-H-mak2)xUU64u}7NdG!a1l&&xU literal 0 HcmV?d00001 diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index 898de87c1..192b7303d 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -6,6 +6,16 @@ <%= stylesheet_link_tag "application" %> + + + + + + + + " type="image/x-icon"> + " type="image/x-icon"> + From df7623d9d8c1530f0083c755690d6e81f53c2e5b Mon Sep 17 00:00:00 2001 From: Donald Guy Date: Fri, 20 Mar 2015 12:04:38 -0400 Subject: [PATCH 034/956] builder/docker: Run scripts /w `exec` if -v > 1.4 --- builder/docker/communicator.go | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index bad2c1ff6..c6e3829f0 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -9,12 +9,14 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "strconv" "sync" "syscall" "time" "github.com/ActiveState/tail" + "github.com/hashicorp/go-version" "github.com/mitchellh/packer/packer" ) @@ -26,6 +28,27 @@ type Communicator struct { lock sync.Mutex } +var dockerVersion version.Version +var useDockerExec bool + +func init() { + execConstraint, _ := version.NewConstraint(">= 1.4.0") + + versionExtractor := regexp.MustCompile(version.VersionRegexpRaw) + dockerVersionOutput, _ := exec.Command("docker", "-v").Output() + dockerVersionString := string(versionExtractor.FindSubmatch(dockerVersionOutput)[0]) + + dockerVersion, err := version.NewVersion(dockerVersionString) + if err != nil { + log.Printf("Docker returned malformed version string: %e", err) + log.Printf("Assuming no `exec` capability, using `attatch`") + useDockerExec = false + } else { + log.Printf("Docker version detected as %s", dockerVersion) + useDockerExec = execConstraint.Check(dockerVersion) + } +} + func (c *Communicator) Start(remote *packer.RemoteCmd) error { // Create a temporary file to store the output. Because of a bug in // Docker, sometimes all the output doesn't properly show up. This @@ -41,7 +64,13 @@ func (c *Communicator) Start(remote *packer.RemoteCmd) error { // This file will store the exit code of the command once it is complete. exitCodePath := outputFile.Name() + "-exit" - cmd := exec.Command("docker", "attach", c.ContainerId) + var cmd *exec.Cmd + if useDockerExec { + cmd = exec.Command("docker", "exec", "-i", c.ContainerId, "/bin/sh") + } else { + cmd = exec.Command("docker", "attach", c.ContainerId) + } + stdin_w, err := cmd.StdinPipe() if err != nil { // We have to do some cleanup since run was never called From a7206aebd79738993ad1105faf6bc6be6161c90f Mon Sep 17 00:00:00 2001 From: Donald Guy Date: Fri, 20 Mar 2015 12:50:03 -0400 Subject: [PATCH 035/956] builder/docker: attempt to satisfy travis for #1993 --- builder/docker/communicator.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index c6e3829f0..bd1c87240 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -28,19 +28,23 @@ type Communicator struct { lock sync.Mutex } -var dockerVersion version.Version +var dockerVersion *version.Version var useDockerExec bool func init() { execConstraint, _ := version.NewConstraint(">= 1.4.0") versionExtractor := regexp.MustCompile(version.VersionRegexpRaw) - dockerVersionOutput, _ := exec.Command("docker", "-v").Output() - dockerVersionString := string(versionExtractor.FindSubmatch(dockerVersionOutput)[0]) + dockerVersionOutput, err := exec.Command("docker", "-v").Output() + extractedVersion := versionExtractor.FindSubmatch(dockerVersionOutput) - dockerVersion, err := version.NewVersion(dockerVersionString) - if err != nil { - log.Printf("Docker returned malformed version string: %e", err) + if extractedVersion != nil { + dockerVersionString := string(extractedVersion[0]) + dockerVersion, err = version.NewVersion(dockerVersionString) + } + + if dockerVersion == nil { + log.Printf("Could not determine docker version: %v", err) log.Printf("Assuming no `exec` capability, using `attatch`") useDockerExec = false } else { From 23ac351c85082e7098f8d523ba90288265c04283 Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Tue, 24 Mar 2015 15:41:17 -0400 Subject: [PATCH 036/956] Clarify doubly-nested array use in post-processors Expands on the description of the Vagrant and Vagrant Cloud post-processors needing to be in a doubly-nested array. Also, separate the two arrays in the JSON sample so they stick out more (and reformat the JSON). --- .../vagrant-cloud.html.markdown | 41 ++++++++++--------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/website/source/docs/post-processors/vagrant-cloud.html.markdown b/website/source/docs/post-processors/vagrant-cloud.html.markdown index 843e5c511..374e8c73a 100644 --- a/website/source/docs/post-processors/vagrant-cloud.html.markdown +++ b/website/source/docs/post-processors/vagrant-cloud.html.markdown @@ -78,31 +78,34 @@ the box will not be uploaded to the Vagrant Cloud. ## Use with Vagrant Post-Processor You'll need to use the Vagrant post-processor before using this post-processor. -An example configuration is below. Note the use of the array specifying -the execution order. +An example configuration is below. Note the use of a doubly-nested array, which +ensures that the Vagrant Cloud post-processor is run after the Vagrant +post-processor. ```javascript { - "variables": { - "version": "", - "cloud_token": "" - }, - "builders": [{ + "variables": { + "version": "", + "cloud_token": "" + }, + "builders": [{ // ... - }], - "post-processors": [ - [{ - "type": "vagrant", - "include": ["image.iso"], - "vagrantfile_template": "vagrantfile.tpl", - "output": "proxycore_{{.Provider}}.box" + }], + "post-processors": [ + [ + { + "type": "vagrant", + "include": ["image.iso"], + "vagrantfile_template": "vagrantfile.tpl", + "output": "proxycore_{{.Provider}}.box" }, { - "type": "vagrant-cloud", - "box_tag": "hashicorp/precise64", - "access_token": "{{user `cloud_token`}}", - "version": "{{user `version`}}" - }] + "type": "vagrant-cloud", + "box_tag": "hashicorp/precise64", + "access_token": "{{user `cloud_token`}}", + "version": "{{user `version`}}" + } ] + ] } ``` From 8c0169b1c4f60476d90287078bc9f32759c7b59d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Georg=20Gro=C3=9Fberger?= Date: Wed, 25 Mar 2015 14:45:15 +0100 Subject: [PATCH 037/956] Make the Vmware build extract the host IP properly from ifconfig stdout --- builder/vmware/common/host_ip_ifconfig.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/vmware/common/host_ip_ifconfig.go b/builder/vmware/common/host_ip_ifconfig.go index 2a985aada..10f527c99 100644 --- a/builder/vmware/common/host_ip_ifconfig.go +++ b/builder/vmware/common/host_ip_ifconfig.go @@ -44,7 +44,7 @@ func (f *IfconfigIPFinder) HostIP() (string, error) { return "", err } - re := regexp.MustCompile(`inet\s*(?:addr:)?(.+?)\s`) + re := regexp.MustCompile(`inet[^\d]+([\d\.]+)\s`) matches := re.FindStringSubmatch(stdout.String()) if matches == nil { return "", errors.New("IP not found in ifconfig output...") From 6686b623475caf81be9c06a38dca8e4db6e53955 Mon Sep 17 00:00:00 2001 From: Nevins Bartolomeo Date: Fri, 3 Apr 2015 20:52:54 -0400 Subject: [PATCH 038/956] AWS availability zone could be incorrect when using spot instances with no AZ specified --- builder/amazon/common/step_run_source_instance.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index 50cedf6ea..f114e156b 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -75,6 +75,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi } spotPrice := s.SpotPrice + availabilityZone := s.AvailabilityZone if spotPrice == "auto" { ui.Message(fmt.Sprintf( "Finding spot price for %s %s...", @@ -96,6 +97,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi } var price float64 + for _, history := range resp.History { log.Printf("[INFO] Candidate spot price: %s", history.SpotPrice) current, err := strconv.ParseFloat(history.SpotPrice, 64) @@ -105,6 +107,9 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi } if price == 0 || current < price { price = current + if s.AvailabilityZone == "" { + availabilityZone = history.AvailabilityZone + } } } if price == 0 { @@ -158,7 +163,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi SubnetId: s.SubnetId, AssociatePublicIpAddress: s.AssociatePublicIpAddress, BlockDevices: s.BlockDevices.BuildLaunchDevices(), - AvailZone: s.AvailabilityZone, + AvailZone: availabilityZone, } runSpotResp, err := ec2conn.RequestSpotInstances(runOpts) if err != nil { From 43f86180da02df68969f8da9d99a7b602ed57734 Mon Sep 17 00:00:00 2001 From: Alex Shadrin Date: Sat, 4 Apr 2015 13:51:59 +0300 Subject: [PATCH 039/956] zsh completion --- contrib/zsh-completion/_packer | 64 ++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 contrib/zsh-completion/_packer diff --git a/contrib/zsh-completion/_packer b/contrib/zsh-completion/_packer new file mode 100644 index 000000000..8e36eedaa --- /dev/null +++ b/contrib/zsh-completion/_packer @@ -0,0 +1,64 @@ +#compdef packer + +local -a _packer_cmds +_packer_cmds=( + 'build:Build image(s) from template' + 'fix:Fixes templates from old versions of packer' + 'inspect:See components of a template' + 'push:Push template files to a Packer build service' + 'validate:Check that a template is valid' + 'version:Prints the Packer version' +) + +__build() { + _arguments \ + '-debug[Debug mode enabled for builds]' \ + '-force[Force a build to continue if artifacts exist, deletes existing artifacts]' \ + '-machine-readable[Machine-readable output]' \ + '-except=[(foo,bar,baz) Build all builds other than these]' \ + '-only=[(foo,bar,baz) Only build the given builds by name]' \ + '-parallel=[(false) Disable parallelization (on by default)]' \ + '-var[("key=value") Variable for templates, can be used multiple times.]' \ + '-var-file=[(path) JSON file containing user variables.]' +} + + +__inspect() { + _arguments \ + '-machine-readable[Machine-readable output]' +} + +__push() { + _arguments \ + '-create[Create the build configuration if it does not exist].' \ + '-token=[() Access token to use to upload.]' +} + +__validate() { + _arguments \ + '-syntax-only[Only check syntax. Do not verify config of the template.]' \ + '-except=[(foo,bar,baz) Validate all builds other than these]' \ + '-only=[(foo,bar,baz) Validate only these builds]' \ + '-var[("key=value") Variable for templates, can be used multiple times.]' \ + '-var-file=[(path) JSON file containing user variables.]' +} + + +_arguments '*:: :->command' + +if (( CURRENT == 1 )); then + _describe -t commands "packer command" _packer_cmds + return +fi + +local -a _command_args +case "$words[1]" in + build) + __build ;; + inspect) + __inspect ;; + push) + __push ;; + validate) + __validate ;; +esac From a4cfd921f40463b7e241b91e646e39aaa787f509 Mon Sep 17 00:00:00 2001 From: Brian Hourigan Date: Sat, 4 Apr 2015 10:52:17 -0400 Subject: [PATCH 040/956] Adding missing hyphen to sudo options --- builder/amazon/instance/builder.go | 2 +- website/source/docs/builders/amazon-instance.html.markdown | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index bf6791700..54324af46 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -80,7 +80,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } if b.config.BundleVolCommand == "" { - b.config.BundleVolCommand = "sudo i -n ec2-bundle-vol " + + b.config.BundleVolCommand = "sudo -i -n ec2-bundle-vol " + "-k {{.KeyPath}} " + "-u {{.AccountId}} " + "-c {{.CertPath}} " + diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 86d790ff9..f12d168bd 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -270,7 +270,7 @@ is responsible for executing `ec2-bundle-vol` in order to store and image of the root filesystem to use to create the AMI. ```text -sudo i -n ec2-bundle-vol \ +sudo -i -n ec2-bundle-vol \ -k {{.KeyPath}} \ -u {{.AccountId}} \ -c {{.CertPath}} \ @@ -297,7 +297,7 @@ across multiple lines for convenience of reading. The bundle upload command is responsible for taking the bundled volume and uploading it to S3. ```text -sudo i -n ec2-upload-bundle \ +sudo -i -n ec2-upload-bundle \ -b {{.BucketName}} \ -m {{.ManifestPath}} \ -a {{.AccessKey}} \ From a973dce7d35bf6af6fb8e294c3b1ac37e42ea70e Mon Sep 17 00:00:00 2001 From: Matt Fellows Date: Tue, 7 Apr 2015 12:11:34 +1000 Subject: [PATCH 041/956] Report error code during Temporary Security Group creation (#2021) --- builder/amazon/common/step_security_group.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builder/amazon/common/step_security_group.go b/builder/amazon/common/step_security_group.go index 1d95619fe..79e18d3b0 100644 --- a/builder/amazon/common/step_security_group.go +++ b/builder/amazon/common/step_security_group.go @@ -44,6 +44,7 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction { groupResp, err := ec2conn.CreateSecurityGroup(group) if err != nil { ui.Error(err.Error()) + state.Put("error", err) return multistep.ActionHalt } From bd425db546a1637901980f76b3c13c5f513ff7d9 Mon Sep 17 00:00:00 2001 From: Emil Hessman Date: Tue, 7 Apr 2015 20:33:58 +0200 Subject: [PATCH 042/956] packer: skip colored ui test if ui doesn't support colors Fixes test failure on Windows. --- packer/ui_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packer/ui_test.go b/packer/ui_test.go index 08dd530a5..a42cd3033 100644 --- a/packer/ui_test.go +++ b/packer/ui_test.go @@ -35,6 +35,10 @@ func TestColoredUi(t *testing.T) { bufferUi := testUi() ui := &ColoredUi{UiColorYellow, UiColorRed, bufferUi} + if !ui.supportsColors() { + t.Skip("skipping for ui without color support") + } + ui.Say("foo") result := readWriter(bufferUi) if result != "\033[1;33mfoo\033[0m\n" { From 5c06af872dfef440926f8941328ee5e6b5ce7a0f Mon Sep 17 00:00:00 2001 From: Ameir Abdeldayem Date: Thu, 9 Apr 2015 02:15:16 -0400 Subject: [PATCH 043/956] Support chef-client 'client_key' and default to /client.pem. --- provisioner/chef-client/provisioner.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index b3d91b3e4..a4eac96fc 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -35,6 +35,7 @@ type Config struct { SkipCleanNode bool `mapstructure:"skip_clean_node"` SkipInstall bool `mapstructure:"skip_install"` StagingDir string `mapstructure:"staging_directory"` + ClientKey string `mapstructure:"client_key"` ValidationKeyPath string `mapstructure:"validation_key_path"` ValidationClientName string `mapstructure:"validation_client_name"` @@ -48,6 +49,7 @@ type Provisioner struct { type ConfigTemplate struct { NodeName string ServerUrl string + ClientKey string ValidationKeyPath string ValidationClientName string ChefEnvironment string @@ -88,6 +90,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { "chef_server_url": &p.config.ServerUrl, "execute_command": &p.config.ExecuteCommand, "install_command": &p.config.InstallCommand, + "client_key": &p.config.ClientKey, "validation_key_path": &p.config.ValidationKeyPath, "validation_client_name": &p.config.ValidationClientName, } @@ -209,6 +212,10 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return fmt.Errorf("Error creating staging directory: %s", err) } + if p.config.ClientKey == "" { + p.config.ClientKey = fmt.Sprintf("%s/client.pem", p.config.StagingDir) + } + if p.config.ValidationKeyPath != "" { remoteValidationKeyPath = fmt.Sprintf("%s/validation.pem", p.config.StagingDir) if err := p.copyValidationKey(ui, comm, remoteValidationKeyPath); err != nil { @@ -217,7 +224,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } configPath, err := p.createConfig( - ui, comm, nodeName, serverUrl, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment, p.config.SslVerifyMode) + ui, comm, nodeName, serverUrl, p.config.ClientKey, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment, p.config.SslVerifyMode) if err != nil { return fmt.Errorf("Error creating Chef config file: %s", err) } @@ -271,7 +278,7 @@ func (p *Provisioner) uploadDirectory(ui packer.Ui, comm packer.Communicator, ds return comm.UploadDir(dst, src, nil) } -func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, remoteKeyPath string, validationClientName string, chefEnvironment string, sslVerifyMode string) (string, error) { +func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, clientKey string, remoteKeyPath string, validationClientName string, chefEnvironment string, sslVerifyMode string) (string, error) { ui.Message("Creating configuration file 'client.rb'") // Read the template @@ -294,6 +301,7 @@ func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeN configString, err := p.config.tpl.Process(tpl, &ConfigTemplate{ NodeName: nodeName, ServerUrl: serverUrl, + ClientKey: clientKey, ValidationKeyPath: remoteKeyPath, ValidationClientName: validationClientName, ChefEnvironment: chefEnvironment, @@ -566,6 +574,7 @@ var DefaultConfigTemplate = ` log_level :info log_location STDOUT chef_server_url "{{.ServerUrl}}" +client_key "{{.ClientKey}}" {{if ne .ValidationClientName ""}} validation_client_name "{{.ValidationClientName}}" {{else}} From 4735ab004a32facd41ee8bff17e27ea8b2e74ac9 Mon Sep 17 00:00:00 2001 From: Ameir Abdeldayem Date: Thu, 9 Apr 2015 02:19:52 -0400 Subject: [PATCH 044/956] Add docs for `client_key` option of `chef-client` provisioner. --- website/source/docs/provisioners/chef-client.html.markdown | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/provisioners/chef-client.html.markdown b/website/source/docs/provisioners/chef-client.html.markdown index a2e2f6f5a..eaeadbf45 100644 --- a/website/source/docs/provisioners/chef-client.html.markdown +++ b/website/source/docs/provisioners/chef-client.html.markdown @@ -88,6 +88,9 @@ configuration is actually required. this folder. If the permissions are not correct, use a shell provisioner prior to this to configure it properly. +* `client_key` (string) - Path to client key. If not set, this defaults to a file + named client.pem in `staging_directory`. + * `validation_client_name` (string) - Name of the validation client. If not set, this won't be set in the configuration and the default that Chef uses will be used. From 59499426b05ff652dcb19cd4b898e51c8003df03 Mon Sep 17 00:00:00 2001 From: Spencer Owen Date: Thu, 9 Apr 2015 14:43:12 -0600 Subject: [PATCH 045/956] Changes date example in isotime The example of using 1506 doesn't make much sense since 15 is the hour and 06 is the year. Using HHMM is a more intuitive example --- .../source/docs/templates/configuration-templates.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/templates/configuration-templates.html.markdown b/website/source/docs/templates/configuration-templates.html.markdown index b7236ae7e..f5ae4a362 100644 --- a/website/source/docs/templates/configuration-templates.html.markdown +++ b/website/source/docs/templates/configuration-templates.html.markdown @@ -116,7 +116,7 @@ Formatting for the function `isotime` uses the magic reference date isotime = June 7, 7:22:43pm 2014 {{isotime "2006-01-02"}} = 2014-06-07 -{{isotime "Mon 1506"}} = Sat 1914 +{{isotime "Mon 1504"}} = Sat 1922 {{isotime "01-Jan-06 03\_04\_05"}} = 07-Jun-2014 07\_22\_43 {{isotime "Hour15Year200603"}} = Hour19Year201407 ``` From 956b9ded0a673ef04589e6c0eda8eefb10571bf4 Mon Sep 17 00:00:00 2001 From: Vilmos Nebehaj Date: Fri, 10 Apr 2015 14:57:32 -0700 Subject: [PATCH 046/956] Try another ssh port if the current one is taken. --- builder/qemu/step_forward_ssh.go | 5 +++++ builder/virtualbox/common/step_forward_ssh.go | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/builder/qemu/step_forward_ssh.go b/builder/qemu/step_forward_ssh.go index 3b84d26c1..ebf54b093 100644 --- a/builder/qemu/step_forward_ssh.go +++ b/builder/qemu/step_forward_ssh.go @@ -34,12 +34,17 @@ func (s *stepForwardSSH) Run(state multistep.StateBag) multistep.StepAction { for { sshHostPort = offset + config.SSHHostPortMin + if sshHostPort >= config.SSHHostPortMax { + offset = 0 + sshHostPort = config.SSHHostPortMin + } log.Printf("Trying port: %d", sshHostPort) l, err := net.Listen("tcp", fmt.Sprintf(":%d", sshHostPort)) if err == nil { defer l.Close() break } + offset++ } ui.Say(fmt.Sprintf("Found port for SSH: %d.", sshHostPort)) diff --git a/builder/virtualbox/common/step_forward_ssh.go b/builder/virtualbox/common/step_forward_ssh.go index 862432952..4772f8d37 100644 --- a/builder/virtualbox/common/step_forward_ssh.go +++ b/builder/virtualbox/common/step_forward_ssh.go @@ -42,12 +42,17 @@ func (s *StepForwardSSH) Run(state multistep.StateBag) multistep.StepAction { for { sshHostPort = offset + s.HostPortMin + if sshHostPort >= s.HostPortMax { + offset = 0 + sshHostPort = s.HostPortMin + } log.Printf("Trying port: %d", sshHostPort) l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", sshHostPort)) if err == nil { defer l.Close() break } + offset++ } // Create a forwarded port mapping to the VM From e99cd56b6ccb6eaebfe8ac85e0e1dd68b6d49c2e Mon Sep 17 00:00:00 2001 From: James Nugent Date: Sun, 5 Apr 2015 17:58:48 -0400 Subject: [PATCH 047/956] Migrate from mitchellh/goamz to awslabs/aws-sdk-go This commit moves the Amazon builders of Packer away from the Hashicorp fork of the goamz library to the official AWS SDK for Go, in order that third party plugins may depend on the more complete official library more easily. --- builder/amazon/chroot/builder.go | 11 +- builder/amazon/chroot/step_attach_volume.go | 21 ++- .../amazon/chroot/step_check_root_device.go | 4 +- builder/amazon/chroot/step_create_volume.go | 31 ++-- builder/amazon/chroot/step_instance_info.go | 13 +- builder/amazon/chroot/step_mount_device.go | 11 +- builder/amazon/chroot/step_register_ami.go | 35 ++-- .../amazon/chroot/step_register_ami_test.go | 44 ++--- builder/amazon/chroot/step_snapshot.go | 19 +- builder/amazon/common/access_config.go | 77 +++++--- builder/amazon/common/ami_config.go | 3 +- builder/amazon/common/artifact.go | 17 +- builder/amazon/common/block_device.go | 41 ++-- builder/amazon/common/block_device_test.go | 27 +-- builder/amazon/common/regions.go | 16 ++ builder/amazon/common/ssh.go | 25 +-- builder/amazon/common/state.go | 37 ++-- builder/amazon/common/step_ami_region_copy.go | 38 ++-- builder/amazon/common/step_create_tags.go | 19 +- builder/amazon/common/step_key_pair.go | 15 +- .../common/step_modify_ami_attributes.go | 45 +++-- .../amazon/common/step_run_source_instance.go | 175 ++++++++++-------- builder/amazon/common/step_security_group.go | 37 ++-- builder/amazon/common/step_source_ami_info.go | 8 +- builder/amazon/ebs/builder.go | 11 +- builder/amazon/ebs/builder_test.go | 3 +- builder/amazon/ebs/step_create_ami.go | 27 ++- builder/amazon/ebs/step_modify_instance.go | 13 +- builder/amazon/ebs/step_stop_instance.go | 7 +- builder/amazon/instance/builder.go | 11 +- builder/amazon/instance/step_bundle_volume.go | 4 +- builder/amazon/instance/step_register_ami.go | 21 ++- builder/amazon/instance/step_upload_bundle.go | 2 +- 33 files changed, 508 insertions(+), 360 deletions(-) create mode 100644 builder/amazon/common/regions.go diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index 832a26022..58dbc71fa 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -10,7 +10,7 @@ import ( "log" "runtime" - "github.com/mitchellh/goamz/ec2" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/common" @@ -153,17 +153,12 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, errors.New("The amazon-chroot builder only works on Linux environments.") } - region, err := b.config.Region() + config, err := b.config.Config() if err != nil { return nil, err } - auth, err := b.config.AccessConfig.Auth() - if err != nil { - return nil, err - } - - ec2conn := ec2.New(auth, region) + ec2conn := ec2.New(config) wrappedCommand := func(command string) (string, error) { return b.config.tpl.Process( diff --git a/builder/amazon/chroot/step_attach_volume.go b/builder/amazon/chroot/step_attach_volume.go index e67479550..2263defd0 100644 --- a/builder/amazon/chroot/step_attach_volume.go +++ b/builder/amazon/chroot/step_attach_volume.go @@ -3,11 +3,12 @@ package chroot import ( "errors" "fmt" - "github.com/mitchellh/goamz/ec2" + "strings" + + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" - "strings" ) // StepAttachVolume attaches the previously created volume to an @@ -32,7 +33,11 @@ func (s *StepAttachVolume) Run(state multistep.StateBag) multistep.StepAction { attachVolume := strings.Replace(device, "/xvd", "/sd", 1) ui.Say(fmt.Sprintf("Attaching the root volume to %s", attachVolume)) - _, err := ec2conn.AttachVolume(volumeId, instance.InstanceId, attachVolume) + _, err := ec2conn.AttachVolume(&ec2.AttachVolumeInput{ + InstanceID: instance.InstanceID, + VolumeID: &volumeId, + Device: &attachVolume, + }) if err != nil { err := fmt.Errorf("Error attaching volume: %s", err) state.Put("error", err) @@ -50,7 +55,7 @@ func (s *StepAttachVolume) Run(state multistep.StateBag) multistep.StepAction { StepState: state, Target: "attached", Refresh: func() (interface{}, string, error) { - resp, err := ec2conn.Volumes([]string{volumeId}, ec2.NewFilter()) + resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIDs: []*string{&volumeId}}) if err != nil { return nil, "", err } @@ -60,7 +65,7 @@ func (s *StepAttachVolume) Run(state multistep.StateBag) multistep.StepAction { } a := resp.Volumes[0].Attachments[0] - return a, a.Status, nil + return a, *a.State, nil }, } @@ -92,7 +97,7 @@ func (s *StepAttachVolume) CleanupFunc(state multistep.StateBag) error { ui := state.Get("ui").(packer.Ui) ui.Say("Detaching EBS volume...") - _, err := ec2conn.DetachVolume(s.volumeId) + _, err := ec2conn.DetachVolume(&ec2.DetachVolumeInput{VolumeID: &s.volumeId}) if err != nil { return fmt.Errorf("Error detaching EBS volume: %s", err) } @@ -105,14 +110,14 @@ func (s *StepAttachVolume) CleanupFunc(state multistep.StateBag) error { StepState: state, Target: "detached", Refresh: func() (interface{}, string, error) { - resp, err := ec2conn.Volumes([]string{s.volumeId}, ec2.NewFilter()) + resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIDs: []*string{&s.volumeId}}) if err != nil { return nil, "", err } v := resp.Volumes[0] if len(v.Attachments) > 0 { - return v, v.Attachments[0].Status, nil + return v, *v.Attachments[0].State, nil } else { return v, "detached", nil } diff --git a/builder/amazon/chroot/step_check_root_device.go b/builder/amazon/chroot/step_check_root_device.go index da18599aa..49a83178a 100644 --- a/builder/amazon/chroot/step_check_root_device.go +++ b/builder/amazon/chroot/step_check_root_device.go @@ -3,7 +3,7 @@ package chroot import ( "fmt" - "github.com/mitchellh/goamz/ec2" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) @@ -18,7 +18,7 @@ func (s *StepCheckRootDevice) Run(state multistep.StateBag) multistep.StepAction ui.Say("Checking the root device on source AMI...") // It must be EBS-backed otherwise the build won't work - if image.RootDeviceType != "ebs" { + if *image.RootDeviceType != "ebs" { err := fmt.Errorf("The root device of the source AMI must be EBS-backed.") state.Put("error", err) ui.Error(err.Error()) diff --git a/builder/amazon/chroot/step_create_volume.go b/builder/amazon/chroot/step_create_volume.go index 881857e71..d1d12d65b 100644 --- a/builder/amazon/chroot/step_create_volume.go +++ b/builder/amazon/chroot/step_create_volume.go @@ -2,11 +2,12 @@ package chroot import ( "fmt" - "github.com/mitchellh/goamz/ec2" + "log" + + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" - "log" ) // StepCreateVolume creates a new volume from the snapshot of the root @@ -25,11 +26,11 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) // Determine the root device snapshot - log.Printf("Searching for root device of the image (%s)", image.RootDeviceName) + log.Printf("Searching for root device of the image (%s)", *image.RootDeviceName) var rootDevice *ec2.BlockDeviceMapping - for _, device := range image.BlockDevices { + for _, device := range image.BlockDeviceMappings { if device.DeviceName == image.RootDeviceName { - rootDevice = &device + rootDevice = device break } } @@ -42,12 +43,12 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction { } ui.Say("Creating the root volume...") - createVolume := &ec2.CreateVolume{ - AvailZone: instance.AvailZone, - Size: rootDevice.VolumeSize, - SnapshotId: rootDevice.SnapshotId, - VolumeType: rootDevice.VolumeType, - IOPS: rootDevice.IOPS, + createVolume := &ec2.CreateVolumeInput{ + AvailabilityZone: instance.Placement.AvailabilityZone, + Size: rootDevice.EBS.VolumeSize, + SnapshotID: rootDevice.EBS.SnapshotID, + VolumeType: rootDevice.EBS.VolumeType, + IOPS: rootDevice.EBS.IOPS, } log.Printf("Create args: %#v", createVolume) @@ -60,7 +61,7 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction { } // Set the volume ID so we remember to delete it later - s.volumeId = createVolumeResp.VolumeId + s.volumeId = *createVolumeResp.VolumeID log.Printf("Volume ID: %s", s.volumeId) // Wait for the volume to become ready @@ -69,13 +70,13 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction { StepState: state, Target: "available", Refresh: func() (interface{}, string, error) { - resp, err := ec2conn.Volumes([]string{s.volumeId}, ec2.NewFilter()) + resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIDs: []*string{&s.volumeId}}) if err != nil { return nil, "", err } v := resp.Volumes[0] - return v, v.Status, nil + return v, *v.State, nil }, } @@ -100,7 +101,7 @@ func (s *StepCreateVolume) Cleanup(state multistep.StateBag) { ui := state.Get("ui").(packer.Ui) ui.Say("Deleting the created EBS volume...") - _, err := ec2conn.DeleteVolume(s.volumeId) + _, err := ec2conn.DeleteVolume(&ec2.DeleteVolumeInput{VolumeID: &s.volumeId}) if err != nil { ui.Error(fmt.Sprintf("Error deleting EBS volume: %s", err)) } diff --git a/builder/amazon/chroot/step_instance_info.go b/builder/amazon/chroot/step_instance_info.go index cb694cc07..23191c54d 100644 --- a/builder/amazon/chroot/step_instance_info.go +++ b/builder/amazon/chroot/step_instance_info.go @@ -2,11 +2,12 @@ package chroot import ( "fmt" - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/ec2" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "log" + + "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/builder/amazon/common" + "github.com/mitchellh/packer/packer" ) // StepInstanceInfo verifies that this builder is running on an EC2 instance. @@ -18,7 +19,7 @@ func (s *StepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction { // Get our own instance ID ui.Say("Gathering information about this EC2 instance...") - instanceIdBytes, err := aws.GetMetaData("instance-id") + instanceIdBytes, err := common.GetInstanceMetaData("instance-id") if err != nil { log.Printf("Error: %s", err) err := fmt.Errorf( @@ -33,7 +34,7 @@ func (s *StepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction { log.Printf("Instance ID: %s", instanceId) // Query the entire instance metadata - instancesResp, err := ec2conn.Instances([]string{instanceId}, ec2.NewFilter()) + instancesResp, err := ec2conn.DescribeInstances(&ec2.DescribeInstancesInput{InstanceIDs: []*string{&instanceId}}) if err != nil { err := fmt.Errorf("Error getting instance data: %s", err) state.Put("error", err) diff --git a/builder/amazon/chroot/step_mount_device.go b/builder/amazon/chroot/step_mount_device.go index 3c3d959c1..b8196f7fa 100644 --- a/builder/amazon/chroot/step_mount_device.go +++ b/builder/amazon/chroot/step_mount_device.go @@ -3,12 +3,13 @@ package chroot import ( "bytes" "fmt" - "github.com/mitchellh/goamz/ec2" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "log" "os" "path/filepath" + + "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" ) type mountPathData struct { @@ -59,9 +60,9 @@ func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - log.Printf("Source image virtualization type is: %s", image.VirtualizationType) + log.Printf("Source image virtualization type is: %s", *image.VirtualizationType) deviceMount := device - if image.VirtualizationType == "hvm" { + if *image.VirtualizationType == "hvm" { deviceMount = fmt.Sprintf("%s%d", device, 1) } state.Put("deviceMount", deviceMount) diff --git a/builder/amazon/chroot/step_register_ami.go b/builder/amazon/chroot/step_register_ami.go index 62e6a3ff0..88da65e9f 100644 --- a/builder/amazon/chroot/step_register_ami.go +++ b/builder/amazon/chroot/step_register_ami.go @@ -3,7 +3,8 @@ package chroot import ( "fmt" - "github.com/mitchellh/goamz/ec2" + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" @@ -20,11 +21,11 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say("Registering the AMI...") - blockDevices := make([]ec2.BlockDeviceMapping, len(image.BlockDevices)) - for i, device := range image.BlockDevices { + blockDevices := make([]*ec2.BlockDeviceMapping, len(image.BlockDeviceMappings)) + for i, device := range image.BlockDeviceMappings { newDevice := device if newDevice.DeviceName == image.RootDeviceName { - newDevice.SnapshotId = snapshotId + newDevice.EBS.SnapshotID = &snapshotId } blockDevices[i] = newDevice @@ -34,7 +35,7 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { // Set SriovNetSupport to "simple". See http://goo.gl/icuXh5 if config.AMIEnhancedNetworking { - registerOpts.SriovNetSupport = "simple" + registerOpts.SRIOVNetSupport = aws.String("simple") } registerResp, err := ec2conn.RegisterImage(registerOpts) @@ -45,16 +46,16 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { } // Set the AMI ID in the state - ui.Say(fmt.Sprintf("AMI: %s", registerResp.ImageId)) + ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageID)) amis := make(map[string]string) - amis[ec2conn.Region.Name] = registerResp.ImageId + amis[ec2conn.Config.Region] = *registerResp.ImageID state.Put("amis", amis) // Wait for the image to become ready stateChange := awscommon.StateChangeConf{ Pending: []string{"pending"}, Target: "available", - Refresh: awscommon.AMIStateRefreshFunc(ec2conn, registerResp.ImageId), + Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *registerResp.ImageID), StepState: state, } @@ -71,18 +72,18 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { func (s *StepRegisterAMI) Cleanup(state multistep.StateBag) {} -func buildRegisterOpts(config *Config, image *ec2.Image, blockDevices []ec2.BlockDeviceMapping) *ec2.RegisterImage { - registerOpts := &ec2.RegisterImage{ - Name: config.AMIName, - Architecture: image.Architecture, - RootDeviceName: image.RootDeviceName, - BlockDevices: blockDevices, - VirtType: config.AMIVirtType, +func buildRegisterOpts(config *Config, image *ec2.Image, blockDevices []*ec2.BlockDeviceMapping) *ec2.RegisterImageInput { + registerOpts := &ec2.RegisterImageInput{ + Name: &config.AMIName, + Architecture: image.Architecture, + RootDeviceName: image.RootDeviceName, + BlockDeviceMappings: blockDevices, + VirtualizationType: &config.AMIVirtType, } if config.AMIVirtType != "hvm" { - registerOpts.KernelId = image.KernelId - registerOpts.RamdiskId = image.RamdiskId + registerOpts.KernelID = image.KernelID + registerOpts.RAMDiskID = image.RAMDiskID } return registerOpts diff --git a/builder/amazon/chroot/step_register_ami_test.go b/builder/amazon/chroot/step_register_ami_test.go index 393b95c8b..9d44ba684 100644 --- a/builder/amazon/chroot/step_register_ami_test.go +++ b/builder/amazon/chroot/step_register_ami_test.go @@ -1,16 +1,18 @@ package chroot import ( - "github.com/mitchellh/goamz/ec2" "testing" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/service/ec2" ) func testImage() ec2.Image { return ec2.Image{ - Id: "ami-abcd1234", - Name: "ami_test_name", - Architecture: "x86_64", - KernelId: "aki-abcd1234", + ImageID: aws.String("ami-abcd1234"), + Name: aws.String("ami_test_name"), + Architecture: aws.String("x86_64"), + KernelID: aws.String("aki-abcd1234"), } } @@ -22,23 +24,23 @@ func TestStepRegisterAmi_buildRegisterOpts_pv(t *testing.T) { image := testImage() - blockDevices := []ec2.BlockDeviceMapping{} + blockDevices := []*ec2.BlockDeviceMapping{} opts := buildRegisterOpts(&config, &image, blockDevices) expected := config.AMIVirtType - if opts.VirtType != expected { - t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, opts.VirtType) + if *opts.VirtualizationType != expected { + t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, *opts.VirtualizationType) } expected = config.AMIName - if opts.Name != expected { - t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, opts.Name) + if *opts.Name != expected { + t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, *opts.Name) } - expected = image.KernelId - if opts.KernelId != expected { - t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, opts.KernelId) + expected = *image.KernelID + if *opts.KernelID != expected { + t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, *opts.KernelID) } } @@ -51,23 +53,21 @@ func TestStepRegisterAmi_buildRegisterOpts_hvm(t *testing.T) { image := testImage() - blockDevices := []ec2.BlockDeviceMapping{} + blockDevices := []*ec2.BlockDeviceMapping{} opts := buildRegisterOpts(&config, &image, blockDevices) expected := config.AMIVirtType - if opts.VirtType != expected { - t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, opts.VirtType) + if *opts.VirtualizationType != expected { + t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, *opts.VirtualizationType) } expected = config.AMIName - if opts.Name != expected { - t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, opts.Name) + if *opts.Name != expected { + t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, *opts.Name) } - expected = "" - if opts.KernelId != expected { - t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, opts.KernelId) + if opts.KernelID != nil { + t.Fatalf("Unexpected KernelId value: expected nil got %s\n", *opts.KernelID) } - } diff --git a/builder/amazon/chroot/step_snapshot.go b/builder/amazon/chroot/step_snapshot.go index cad4b782b..e798a3a3e 100644 --- a/builder/amazon/chroot/step_snapshot.go +++ b/builder/amazon/chroot/step_snapshot.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/mitchellh/goamz/ec2" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" @@ -25,9 +25,12 @@ func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction { volumeId := state.Get("volume_id").(string) ui.Say("Creating snapshot...") - createSnapResp, err := ec2conn.CreateSnapshot( - volumeId, - fmt.Sprintf("Packer: %s", time.Now().String())) + description := fmt.Sprintf("Packer: %s", time.Now().String()) + + createSnapResp, err := ec2conn.CreateSnapshot(&ec2.CreateSnapshotInput{ + VolumeID: &volumeId, + Description: &description, + }) if err != nil { err := fmt.Errorf("Error creating snapshot: %s", err) state.Put("error", err) @@ -36,7 +39,7 @@ func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction { } // Set the snapshot ID so we can delete it later - s.snapshotId = createSnapResp.Id + s.snapshotId = *createSnapResp.SnapshotID ui.Message(fmt.Sprintf("Snapshot ID: %s", s.snapshotId)) // Wait for the snapshot to be ready @@ -45,7 +48,7 @@ func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction { StepState: state, Target: "completed", Refresh: func() (interface{}, string, error) { - resp, err := ec2conn.Snapshots([]string{s.snapshotId}, ec2.NewFilter()) + resp, err := ec2conn.DescribeSnapshots(&ec2.DescribeSnapshotsInput{SnapshotIDs: []*string{&s.snapshotId}}) if err != nil { return nil, "", err } @@ -55,7 +58,7 @@ func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction { } s := resp.Snapshots[0] - return s, s.Status, nil + return s, *s.State, nil }, } @@ -83,7 +86,7 @@ func (s *StepSnapshot) Cleanup(state multistep.StateBag) { ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) ui.Say("Removing snapshot since we cancelled or halted...") - _, err := ec2conn.DeleteSnapshots([]string{s.snapshotId}) + _, err := ec2conn.DeleteSnapshot(&ec2.DeleteSnapshotInput{SnapshotID: &s.snapshotId}) if err != nil { ui.Error(fmt.Sprintf("Error: %s", err)) } diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go index a9a60d7cf..fedaabd24 100644 --- a/builder/amazon/common/access_config.go +++ b/builder/amazon/common/access_config.go @@ -2,10 +2,13 @@ package common import ( "fmt" - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/packer/packer" + "io/ioutil" + "net/http" "strings" "unicode" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/mitchellh/packer/packer" ) // AccessConfig is for common configuration related to AWS access @@ -16,37 +19,48 @@ type AccessConfig struct { Token string `mapstructure:"token"` } -// Auth returns a valid aws.Auth object for access to AWS services, or -// an error if the authentication couldn't be resolved. -func (c *AccessConfig) Auth() (aws.Auth, error) { - auth, err := aws.GetAuth(c.AccessKey, c.SecretKey) - if err == nil { - // Store the accesskey and secret that we got... - c.AccessKey = auth.AccessKey - c.SecretKey = auth.SecretKey - c.Token = auth.Token - } - if c.Token != "" { - auth.Token = c.Token +// Config returns a valid aws.Config object for access to AWS services, or +// an error if the authentication and region couldn't be resolved +func (c *AccessConfig) Config() (*aws.Config, error) { + credsProvider := aws.DetectCreds(c.AccessKey, c.SecretKey, c.Token) + + creds, err := credsProvider.Credentials() + if err != nil { + return nil, err } - return auth, err + c.AccessKey = creds.AccessKeyID + c.SecretKey = creds.SecretAccessKey + c.Token = creds.SessionToken + + region, err := c.Region() + if err != nil { + return nil, err + } + + return &aws.Config{ + Region: region, + Credentials: credsProvider, + }, nil } // Region returns the aws.Region object for access to AWS services, requesting // the region from the instance metadata if possible. -func (c *AccessConfig) Region() (aws.Region, error) { +func (c *AccessConfig) Region() (string, error) { if c.RawRegion != "" { - return aws.Regions[c.RawRegion], nil + if valid := ValidateRegion(c.RawRegion); valid == false { + return "", fmt.Errorf("Not a valid region: %s", c.RawRegion) + } + return c.RawRegion, nil } - md, err := aws.GetMetaData("placement/availability-zone") + md, err := GetInstanceMetaData("placement/availability-zone") if err != nil { - return aws.Region{}, err + return "", err } region := strings.TrimRightFunc(string(md), unicode.IsLetter) - return aws.Regions[region], nil + return region, nil } func (c *AccessConfig) Prepare(t *packer.ConfigTemplate) []error { @@ -75,7 +89,7 @@ func (c *AccessConfig) Prepare(t *packer.ConfigTemplate) []error { } if c.RawRegion != "" { - if _, ok := aws.Regions[c.RawRegion]; !ok { + if valid := ValidateRegion(c.RawRegion); valid == false { errs = append(errs, fmt.Errorf("Unknown region: %s", c.RawRegion)) } } @@ -86,3 +100,24 @@ func (c *AccessConfig) Prepare(t *packer.ConfigTemplate) []error { return nil } + +func GetInstanceMetaData(path string) (contents []byte, err error) { + url := "http://169.254.169.254/latest/meta-data/" + path + + resp, err := http.Get(url) + if err != nil { + return + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url) + return + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + return []byte(body), err +} diff --git a/builder/amazon/common/ami_config.go b/builder/amazon/common/ami_config.go index 91c2d12d0..4dbcd03c4 100644 --- a/builder/amazon/common/ami_config.go +++ b/builder/amazon/common/ami_config.go @@ -3,7 +3,6 @@ package common import ( "fmt" - "github.com/mitchellh/goamz/aws" "github.com/mitchellh/packer/packer" ) @@ -81,7 +80,7 @@ func (c *AMIConfig) Prepare(t *packer.ConfigTemplate) []error { regionSet[region] = struct{}{} // Verify the region is real - if _, ok := aws.Regions[region]; !ok { + if valid := ValidateRegion(region); valid == false { errs = append(errs, fmt.Errorf("Unknown region: %s", region)) continue } diff --git a/builder/amazon/common/artifact.go b/builder/amazon/common/artifact.go index 8d6265508..89f3d9fa2 100644 --- a/builder/amazon/common/artifact.go +++ b/builder/amazon/common/artifact.go @@ -6,8 +6,8 @@ import ( "sort" "strings" - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/ec2" + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/packer/packer" ) @@ -67,8 +67,17 @@ func (a *Artifact) Destroy() error { for region, imageId := range a.Amis { log.Printf("Deregistering image ID (%s) from region (%s)", imageId, region) - regionconn := ec2.New(a.Conn.Auth, aws.Regions[region]) - if _, err := regionconn.DeregisterImage(imageId); err != nil { + + regionConfig := &aws.Config{ + Credentials: a.Conn.Config.Credentials, + Region: region, + } + regionConn := ec2.New(regionConfig) + + input := &ec2.DeregisterImageInput{ + ImageID: &imageId, + } + if _, err := regionConn.DeregisterImage(input); err != nil { errors = append(errors, err) } diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index 9557cc579..de155b56a 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -3,7 +3,8 @@ package common import ( "fmt" - "github.com/mitchellh/goamz/ec2" + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/packer/packer" ) @@ -25,21 +26,29 @@ type BlockDevices struct { LaunchMappings []BlockDevice `mapstructure:"launch_block_device_mappings"` } -func buildBlockDevices(b []BlockDevice) []ec2.BlockDeviceMapping { - var blockDevices []ec2.BlockDeviceMapping +func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { + var blockDevices []*ec2.BlockDeviceMapping for _, blockDevice := range b { - blockDevices = append(blockDevices, ec2.BlockDeviceMapping{ - DeviceName: blockDevice.DeviceName, - VirtualName: blockDevice.VirtualName, - SnapshotId: blockDevice.SnapshotId, - VolumeType: blockDevice.VolumeType, - VolumeSize: blockDevice.VolumeSize, - DeleteOnTermination: blockDevice.DeleteOnTermination, - IOPS: blockDevice.IOPS, - NoDevice: blockDevice.NoDevice, - Encrypted: blockDevice.Encrypted, - }) + ebsBlockDevice := &ec2.EBSBlockDevice{ + SnapshotID: &blockDevice.SnapshotId, + Encrypted: &blockDevice.Encrypted, + IOPS: &blockDevice.IOPS, + VolumeType: &blockDevice.VolumeType, + VolumeSize: &blockDevice.VolumeSize, + DeleteOnTermination: &blockDevice.DeleteOnTermination, + } + mapping := &ec2.BlockDeviceMapping{ + EBS: ebsBlockDevice, + DeviceName: &blockDevice.DeviceName, + VirtualName: &blockDevice.VirtualName, + } + + if blockDevice.NoDevice { + mapping.NoDevice = aws.String("") + } + + blockDevices = append(blockDevices, mapping) } return blockDevices } @@ -89,10 +98,10 @@ func (b *BlockDevices) Prepare(t *packer.ConfigTemplate) []error { return nil } -func (b *BlockDevices) BuildAMIDevices() []ec2.BlockDeviceMapping { +func (b *BlockDevices) BuildAMIDevices() []*ec2.BlockDeviceMapping { return buildBlockDevices(b.AMIMappings) } -func (b *BlockDevices) BuildLaunchDevices() []ec2.BlockDeviceMapping { +func (b *BlockDevices) BuildLaunchDevices() []*ec2.BlockDeviceMapping { return buildBlockDevices(b.LaunchMappings) } diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index 838b23aec..a4c1dbb79 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -1,9 +1,11 @@ package common import ( - "github.com/mitchellh/goamz/ec2" "reflect" "testing" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/service/ec2" ) func TestBlockDevice(t *testing.T) { @@ -23,13 +25,16 @@ func TestBlockDevice(t *testing.T) { }, Result: &ec2.BlockDeviceMapping{ - DeviceName: "/dev/sdb", - VirtualName: "ephemeral0", - SnapshotId: "snap-1234", - VolumeType: "standard", - VolumeSize: 8, - DeleteOnTermination: true, - IOPS: 1000, + DeviceName: aws.String("/dev/sdb"), + VirtualName: aws.String("ephemeral0"), + EBS: &ec2.EBSBlockDevice{ + Encrypted: aws.Boolean(false), + SnapshotID: aws.String("snap-1234"), + VolumeType: aws.String("standard"), + VolumeSize: aws.Long(8), + DeleteOnTermination: aws.Boolean(true), + IOPS: aws.Long(1000), + }, }, }, } @@ -40,9 +45,9 @@ func TestBlockDevice(t *testing.T) { LaunchMappings: []BlockDevice{*tc.Config}, } - expected := []ec2.BlockDeviceMapping{*tc.Result} - - if !reflect.DeepEqual(expected, blockDevices.BuildAMIDevices()) { + expected := []*ec2.BlockDeviceMapping{tc.Result} + got := blockDevices.BuildAMIDevices() + if !reflect.DeepEqual(expected, got) { t.Fatalf("bad: %#v", expected) } diff --git a/builder/amazon/common/regions.go b/builder/amazon/common/regions.go new file mode 100644 index 000000000..4d3762465 --- /dev/null +++ b/builder/amazon/common/regions.go @@ -0,0 +1,16 @@ +package common + +// IsValidRegion returns true if the supplied region is a valid AWS +// region and false if it's not. +func ValidateRegion(region string) bool { + var regions = [11]string{"us-east-1", "us-west-2", "us-west-1", "eu-west-1", + "eu-central-1", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", + "sa-east-1", "cn-north-1", "us-gov-west-1"} + + for _, valid := range regions { + if region == valid { + return true + } + } + return false +} diff --git a/builder/amazon/common/ssh.go b/builder/amazon/common/ssh.go index f31437d89..965a26994 100644 --- a/builder/amazon/common/ssh.go +++ b/builder/amazon/common/ssh.go @@ -1,12 +1,13 @@ package common import ( - "code.google.com/p/go.crypto/ssh" "errors" "fmt" - "github.com/mitchellh/goamz/ec2" - "github.com/mitchellh/multistep" "time" + + "code.google.com/p/go.crypto/ssh" + "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/mitchellh/multistep" ) // SSHAddress returns a function that can be given to the SSH communicator @@ -16,27 +17,29 @@ func SSHAddress(e *ec2.EC2, port int, private bool) func(multistep.StateBag) (st for j := 0; j < 2; j++ { var host string i := state.Get("instance").(*ec2.Instance) - if i.VpcId != "" { - if i.PublicIpAddress != "" && !private { - host = i.PublicIpAddress + if *i.VPCID != "" { + if *i.PublicIPAddress != "" && !private { + host = *i.PublicIPAddress } else { - host = i.PrivateIpAddress + host = *i.PrivateIPAddress } - } else if i.DNSName != "" { - host = i.DNSName + } else if *i.PublicDNSName != "" { + host = *i.PublicDNSName } if host != "" { return fmt.Sprintf("%s:%d", host, port), nil } - r, err := e.Instances([]string{i.InstanceId}, ec2.NewFilter()) + r, err := e.DescribeInstances(&ec2.DescribeInstancesInput{ + InstanceIDs: []*string{i.InstanceID}, + }) if err != nil { return "", err } if len(r.Reservations) == 0 || len(r.Reservations[0].Instances) == 0 { - return "", fmt.Errorf("instance not found: %s", i.InstanceId) + return "", fmt.Errorf("instance not found: %s", *i.InstanceID) } state.Put("instance", &r.Reservations[0].Instances[0]) diff --git a/builder/amazon/common/state.go b/builder/amazon/common/state.go index 62e861d74..00a58be08 100644 --- a/builder/amazon/common/state.go +++ b/builder/amazon/common/state.go @@ -3,13 +3,15 @@ package common import ( "errors" "fmt" - "github.com/mitchellh/goamz/ec2" - "github.com/mitchellh/multistep" "log" "net" "os" "strconv" "time" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/mitchellh/multistep" ) // StateRefreshFunc is a function type used for StateChangeConf that is @@ -36,9 +38,11 @@ type StateChangeConf struct { // an AMI for state changes. func AMIStateRefreshFunc(conn *ec2.EC2, imageId string) StateRefreshFunc { return func() (interface{}, string, error) { - resp, err := conn.Images([]string{imageId}, ec2.NewFilter()) + resp, err := conn.DescribeImages(&ec2.DescribeImagesInput{ + ImageIDs: []*string{&imageId}, + }) if err != nil { - if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidAMIID.NotFound" { + if ec2err, ok := err.(*aws.APIError); ok && ec2err.Code == "InvalidAMIID.NotFound" { // Set this to nil as if we didn't find anything. resp = nil } else if isTransientNetworkError(err) { @@ -57,7 +61,7 @@ func AMIStateRefreshFunc(conn *ec2.EC2, imageId string) StateRefreshFunc { } i := resp.Images[0] - return i, i.State, nil + return i, *i.State, nil } } @@ -65,9 +69,11 @@ func AMIStateRefreshFunc(conn *ec2.EC2, imageId string) StateRefreshFunc { // an EC2 instance. func InstanceStateRefreshFunc(conn *ec2.EC2, i *ec2.Instance) StateRefreshFunc { return func() (interface{}, string, error) { - resp, err := conn.Instances([]string{i.InstanceId}, ec2.NewFilter()) + resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ + InstanceIDs: []*string{i.InstanceID}, + }) if err != nil { - if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidInstanceID.NotFound" { + if ec2err, ok := err.(*aws.APIError); ok && ec2err.Code == "InvalidInstanceID.NotFound" { // Set this to nil as if we didn't find anything. resp = nil } else if isTransientNetworkError(err) { @@ -85,8 +91,8 @@ func InstanceStateRefreshFunc(conn *ec2.EC2, i *ec2.Instance) StateRefreshFunc { return nil, "", nil } - i = &resp.Reservations[0].Instances[0] - return i, i.State.Name, nil + i = resp.Reservations[0].Instances[0] + return i, *i.State.Name, nil } } @@ -94,9 +100,12 @@ func InstanceStateRefreshFunc(conn *ec2.EC2, i *ec2.Instance) StateRefreshFunc { // a spot request for state changes. func SpotRequestStateRefreshFunc(conn *ec2.EC2, spotRequestId string) StateRefreshFunc { return func() (interface{}, string, error) { - resp, err := conn.DescribeSpotRequests([]string{spotRequestId}, ec2.NewFilter()) + resp, err := conn.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{ + SpotInstanceRequestIDs: []*string{&spotRequestId}, + }) + if err != nil { - if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidSpotInstanceRequestID.NotFound" { + if ec2err, ok := err.(*aws.APIError); ok && ec2err.Code == "InvalidSpotInstanceRequestID.NotFound" { // Set this to nil as if we didn't find anything. resp = nil } else if isTransientNetworkError(err) { @@ -108,14 +117,14 @@ func SpotRequestStateRefreshFunc(conn *ec2.EC2, spotRequestId string) StateRefre } } - if resp == nil || len(resp.SpotRequestResults) == 0 { + if resp == nil || len(resp.SpotInstanceRequests) == 0 { // Sometimes AWS has consistency issues and doesn't see the // SpotRequest. Return an empty state. return nil, "", nil } - i := resp.SpotRequestResults[0] - return i, i.State, nil + i := resp.SpotInstanceRequests[0] + return i, *i.State, nil } } diff --git a/builder/amazon/common/step_ami_region_copy.go b/builder/amazon/common/step_ami_region_copy.go index 898eacc08..88ed1884f 100644 --- a/builder/amazon/common/step_ami_region_copy.go +++ b/builder/amazon/common/step_ami_region_copy.go @@ -2,11 +2,14 @@ package common import ( "fmt" - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/ec2" + + "sync" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" - "sync" ) type StepAMIRegionCopy struct { @@ -17,7 +20,7 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) amis := state.Get("amis").(map[string]string) - ami := amis[ec2conn.Region.Name] + ami := amis[ec2conn.Config.Region] if len(s.Regions) == 0 { return multistep.ActionContinue @@ -34,8 +37,7 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { go func(region string) { defer wg.Done() - id, err := amiRegionCopy(state, ec2conn.Auth, ami, - aws.Regions[region], ec2conn.Region) + id, err := amiRegionCopy(state, ec2conn.Config.Credentials, ami, region, ec2conn.Config.Region) lock.Lock() defer lock.Unlock() @@ -67,32 +69,36 @@ func (s *StepAMIRegionCopy) Cleanup(state multistep.StateBag) { // amiRegionCopy does a copy for the given AMI to the target region and // returns the resulting ID or error. -func amiRegionCopy(state multistep.StateBag, auth aws.Auth, imageId string, - target aws.Region, source aws.Region) (string, error) { +func amiRegionCopy(state multistep.StateBag, auth aws.CredentialsProvider, imageId string, + target string, source string) (string, error) { // Connect to the region where the AMI will be copied to - regionconn := ec2.New(auth, target) - resp, err := regionconn.CopyImage(&ec2.CopyImage{ - SourceRegion: source.Name, - SourceImageId: imageId, + config := &aws.Config{ + Credentials: auth, + Region: target, + } + regionconn := ec2.New(config) + resp, err := regionconn.CopyImage(&ec2.CopyImageInput{ + SourceRegion: &source, + SourceImageID: &imageId, }) if err != nil { return "", fmt.Errorf("Error Copying AMI (%s) to region (%s): %s", - imageId, target.Name, err) + imageId, target, err) } stateChange := StateChangeConf{ Pending: []string{"pending"}, Target: "available", - Refresh: AMIStateRefreshFunc(regionconn, resp.ImageId), + Refresh: AMIStateRefreshFunc(regionconn, *resp.ImageID), StepState: state, } if _, err := WaitForState(&stateChange); err != nil { return "", fmt.Errorf("Error waiting for AMI (%s) in region (%s): %s", - resp.ImageId, target.Name, err) + *resp.ImageID, target, err) } - return resp.ImageId, nil + return *resp.ImageID, nil } diff --git a/builder/amazon/common/step_create_tags.go b/builder/amazon/common/step_create_tags.go index a204ca321..fc17458ee 100644 --- a/builder/amazon/common/step_create_tags.go +++ b/builder/amazon/common/step_create_tags.go @@ -2,8 +2,9 @@ package common import ( "fmt" - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/ec2" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) @@ -21,14 +22,20 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction { for region, ami := range amis { ui.Say(fmt.Sprintf("Adding tags to AMI (%s)...", ami)) - var ec2Tags []ec2.Tag + var ec2Tags []*ec2.Tag for key, value := range s.Tags { ui.Message(fmt.Sprintf("Adding tag: \"%s\": \"%s\"", key, value)) - ec2Tags = append(ec2Tags, ec2.Tag{key, value}) + ec2Tags = append(ec2Tags, &ec2.Tag{Key: &key, Value: &value}) } - regionconn := ec2.New(ec2conn.Auth, aws.Regions[region]) - _, err := regionconn.CreateTags([]string{ami}, ec2Tags) + regionconn := ec2.New(&aws.Config{ + Credentials: ec2conn.Config.Credentials, + Region: region, + }) + _, err := regionconn.CreateTags(&ec2.CreateTagsInput{ + Resources: []*string{&ami}, + Tags: ec2Tags, + }) if err != nil { err := fmt.Errorf("Error adding tags to AMI (%s): %s", ami, err) state.Put("error", err) diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index 3a7eb9f35..5082d7b26 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -2,12 +2,13 @@ package common import ( "fmt" - "github.com/mitchellh/goamz/ec2" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "io/ioutil" "os" "runtime" + + "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" ) type StepKeyPair struct { @@ -39,7 +40,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.KeyPairName)) - keyResp, err := ec2conn.CreateKeyPair(s.KeyPairName) + keyResp, err := ec2conn.CreateKeyPair(&ec2.CreateKeyPairInput{KeyName: &s.KeyPairName}) if err != nil { state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err)) return multistep.ActionHalt @@ -50,7 +51,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { // Set some state data for use in future steps state.Put("keyPair", s.keyName) - state.Put("privateKey", keyResp.KeyMaterial) + state.Put("privateKey", *keyResp.KeyMaterial) // If we're in debug mode, output the private key to the working // directory. @@ -64,7 +65,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { defer f.Close() // Write the key out - if _, err := f.Write([]byte(keyResp.KeyMaterial)); err != nil { + if _, err := f.Write([]byte(*keyResp.KeyMaterial)); err != nil { state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) return multistep.ActionHalt } @@ -91,7 +92,7 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) { ui := state.Get("ui").(packer.Ui) ui.Say("Deleting temporary keypair...") - _, err := ec2conn.DeleteKeyPair(s.keyName) + _, err := ec2conn.DeleteKeyPair(&ec2.DeleteKeyPairInput{KeyName: &s.keyName}) if err != nil { ui.Error(fmt.Sprintf( "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) diff --git a/builder/amazon/common/step_modify_ami_attributes.go b/builder/amazon/common/step_modify_ami_attributes.go index 533d4cfd9..0628109b5 100644 --- a/builder/amazon/common/step_modify_ami_attributes.go +++ b/builder/amazon/common/step_modify_ami_attributes.go @@ -2,8 +2,9 @@ package common import ( "fmt" - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/ec2" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) @@ -34,37 +35,53 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc // Construct the modify image attribute requests we're going to make. // We need to make each separately since the EC2 API only allows changing // one type at a kind currently. - options := make(map[string]*ec2.ModifyImageAttribute) + options := make(map[string]*ec2.ModifyImageAttributeInput) if s.Description != "" { - options["description"] = &ec2.ModifyImageAttribute{ - Description: s.Description, + options["description"] = &ec2.ModifyImageAttributeInput{ + Description: &ec2.AttributeValue{Value: &s.Description}, } } if len(s.Groups) > 0 { - options["groups"] = &ec2.ModifyImageAttribute{ - AddGroups: s.Groups, + groups := make([]*string, len(s.Groups)) + for i, g := range s.Groups { + groups[i] = &g + } + options["groups"] = &ec2.ModifyImageAttributeInput{ + UserGroups: groups, } } if len(s.Users) > 0 { - options["users"] = &ec2.ModifyImageAttribute{ - AddUsers: s.Users, + users := make([]*string, len(s.Users)) + for i, u := range s.Users { + users[i] = &u + } + options["users"] = &ec2.ModifyImageAttributeInput{ + UserIDs: users, } } if len(s.ProductCodes) > 0 { - options["product codes"] = &ec2.ModifyImageAttribute{ - ProductCodes: s.ProductCodes, + codes := make([]*string, len(s.ProductCodes)) + for i, c := range s.ProductCodes { + codes[i] = &c + } + options["product codes"] = &ec2.ModifyImageAttributeInput{ + ProductCodes: codes, } } for region, ami := range amis { ui.Say(fmt.Sprintf("Modifying attributes on AMI (%s)...", ami)) - regionconn := ec2.New(ec2conn.Auth, aws.Regions[region]) - for name, opts := range options { + regionconn := ec2.New(&aws.Config{ + Credentials: ec2conn.Config.Credentials, + Region: region, + }) + for name, input := range options { ui.Message(fmt.Sprintf("Modifying: %s", name)) - _, err := regionconn.ModifyImageAttribute(ami, opts) + input.ImageID = &ami + _, err := regionconn.ModifyImageAttribute(input) if err != nil { err := fmt.Errorf("Error modify AMI attributes: %s", err) state.Put("error", err) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index 50cedf6ea..644734619 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -7,7 +7,9 @@ import ( "strconv" "time" - "github.com/mitchellh/goamz/ec2" + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) @@ -29,15 +31,20 @@ type StepRunSourceInstance struct { UserDataFile string instance *ec2.Instance - spotRequest *ec2.SpotRequestResult + spotRequest *ec2.SpotInstanceRequest } func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepAction { ec2conn := state.Get("ec2").(*ec2.EC2) keyName := state.Get("keyPair").(string) - securityGroupIds := state.Get("securityGroupIds").([]string) + tempSecurityGroupIds := state.Get("securityGroupIds").([]string) ui := state.Get("ui").(packer.Ui) + securityGroupIds := make([]*string, len(tempSecurityGroupIds)) + for i, sg := range tempSecurityGroupIds { + securityGroupIds[i] = &sg + } + userData := s.UserData if s.UserDataFile != "" { contents, err := ioutil.ReadFile(s.UserDataFile) @@ -49,13 +56,10 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi userData = string(contents) } - securityGroups := make([]ec2.SecurityGroup, len(securityGroupIds)) - for n, securityGroupId := range securityGroupIds { - securityGroups[n] = ec2.SecurityGroup{Id: securityGroupId} - } - ui.Say("Launching a source AWS instance...") - imageResp, err := ec2conn.Images([]string{s.SourceAMI}, ec2.NewFilter()) + imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ + ImageIDs: []*string{&s.SourceAMI}, + }) if err != nil { state.Put("error", fmt.Errorf("There was a problem with the source AMI: %s", err)) return multistep.ActionHalt @@ -66,11 +70,11 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi return multistep.ActionHalt } - if s.ExpectedRootDevice != "" && imageResp.Images[0].RootDeviceType != s.ExpectedRootDevice { + if s.ExpectedRootDevice != "" && *imageResp.Images[0].RootDeviceType != s.ExpectedRootDevice { state.Put("error", fmt.Errorf( "The provided source AMI has an invalid root device type.\n"+ "Expected '%s', got '%s'.", - s.ExpectedRootDevice, imageResp.Images[0].RootDeviceType)) + s.ExpectedRootDevice, *imageResp.Images[0].RootDeviceType)) return multistep.ActionHalt } @@ -82,11 +86,11 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi // Detect the spot price startTime := time.Now().Add(-1 * time.Hour) - resp, err := ec2conn.DescribeSpotPriceHistory(&ec2.DescribeSpotPriceHistory{ - InstanceType: []string{s.InstanceType}, - ProductDescription: []string{s.SpotPriceProduct}, - AvailabilityZone: s.AvailabilityZone, - StartTime: startTime, + resp, err := ec2conn.DescribeSpotPriceHistory(&ec2.DescribeSpotPriceHistoryInput{ + InstanceTypes: []*string{&s.InstanceType}, + ProductDescriptions: []*string{&s.SpotPriceProduct}, + AvailabilityZone: &s.AvailabilityZone, + StartTime: &startTime, }) if err != nil { err := fmt.Errorf("Error finding spot price: %s", err) @@ -96,9 +100,9 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi } var price float64 - for _, history := range resp.History { - log.Printf("[INFO] Candidate spot price: %s", history.SpotPrice) - current, err := strconv.ParseFloat(history.SpotPrice, 64) + for _, history := range resp.SpotPriceHistory { + log.Printf("[INFO] Candidate spot price: %s", *history.SpotPrice) + current, err := strconv.ParseFloat(*history.SpotPrice, 64) if err != nil { log.Printf("[ERR] Error parsing spot price: %s", err) continue @@ -120,20 +124,33 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi var instanceId string if spotPrice == "" { - runOpts := &ec2.RunInstances{ - KeyName: keyName, - ImageId: s.SourceAMI, - InstanceType: s.InstanceType, - UserData: []byte(userData), - MinCount: 0, - MaxCount: 0, - SecurityGroups: securityGroups, - IamInstanceProfile: s.IamInstanceProfile, - SubnetId: s.SubnetId, - AssociatePublicIpAddress: s.AssociatePublicIpAddress, - BlockDevices: s.BlockDevices.BuildLaunchDevices(), - AvailZone: s.AvailabilityZone, + runOpts := &ec2.RunInstancesInput{ + KeyName: &keyName, + ImageID: &s.SourceAMI, + InstanceType: &s.InstanceType, + UserData: &userData, + MaxCount: aws.Long(1), + MinCount: aws.Long(1), + IAMInstanceProfile: &ec2.IAMInstanceProfileSpecification{Name: &s.IamInstanceProfile}, + BlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(), + Placement: &ec2.Placement{AvailabilityZone: &s.AvailabilityZone}, } + + if s.SubnetId != "" && s.AssociatePublicIpAddress { + runOpts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{ + &ec2.InstanceNetworkInterfaceSpecification{ + DeviceIndex: aws.Long(0), + AssociatePublicIPAddress: &s.AssociatePublicIpAddress, + SubnetID: &s.SubnetId, + Groups: securityGroupIds, + DeleteOnTermination: aws.Boolean(true), + }, + } + } else { + runOpts.SubnetID = &s.SubnetId + runOpts.SecurityGroupIDs = securityGroupIds + } + runResp, err := ec2conn.RunInstances(runOpts) if err != nil { err := fmt.Errorf("Error launching source instance: %s", err) @@ -141,26 +158,29 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi ui.Error(err.Error()) return multistep.ActionHalt } - instanceId = runResp.Instances[0].InstanceId + instanceId = *runResp.Instances[0].InstanceID } else { ui.Message(fmt.Sprintf( "Requesting spot instance '%s' for: %s", s.InstanceType, spotPrice)) - runOpts := &ec2.RequestSpotInstances{ - SpotPrice: spotPrice, - KeyName: keyName, - ImageId: s.SourceAMI, - InstanceType: s.InstanceType, - UserData: []byte(userData), - SecurityGroups: securityGroups, - IamInstanceProfile: s.IamInstanceProfile, - SubnetId: s.SubnetId, - AssociatePublicIpAddress: s.AssociatePublicIpAddress, - BlockDevices: s.BlockDevices.BuildLaunchDevices(), - AvailZone: s.AvailabilityZone, - } - runSpotResp, err := ec2conn.RequestSpotInstances(runOpts) + runSpotResp, err := ec2conn.RequestSpotInstances(&ec2.RequestSpotInstancesInput{ + SpotPrice: &spotPrice, + LaunchSpecification: &ec2.RequestSpotLaunchSpecification{ + KeyName: &keyName, + ImageID: &s.SourceAMI, + InstanceType: &s.InstanceType, + UserData: &userData, + SecurityGroupIDs: securityGroupIds, + IAMInstanceProfile: &ec2.IAMInstanceProfileSpecification{Name: &s.IamInstanceProfile}, + SubnetID: &s.SubnetId, + NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ + &ec2.InstanceNetworkInterfaceSpecification{AssociatePublicIPAddress: &s.AssociatePublicIpAddress}, + }, + Placement: &ec2.SpotPlacement{AvailabilityZone: &s.AvailabilityZone}, + BlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(), + }, + }) if err != nil { err := fmt.Errorf("Error launching source spot instance: %s", err) state.Put("error", err) @@ -168,44 +188,47 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi return multistep.ActionHalt } - s.spotRequest = &runSpotResp.SpotRequestResults[0] + s.spotRequest = runSpotResp.SpotInstanceRequests[0] - spotRequestId := s.spotRequest.SpotRequestId - ui.Message(fmt.Sprintf("Waiting for spot request (%s) to become active...", spotRequestId)) + spotRequestId := s.spotRequest.SpotInstanceRequestID + ui.Message(fmt.Sprintf("Waiting for spot request (%s) to become active...", *spotRequestId)) stateChange := StateChangeConf{ Pending: []string{"open"}, Target: "active", - Refresh: SpotRequestStateRefreshFunc(ec2conn, spotRequestId), + Refresh: SpotRequestStateRefreshFunc(ec2conn, *spotRequestId), StepState: state, } _, err = WaitForState(&stateChange) if err != nil { - err := fmt.Errorf("Error waiting for spot request (%s) to become ready: %s", spotRequestId, err) + err := fmt.Errorf("Error waiting for spot request (%s) to become ready: %s", *spotRequestId, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - spotResp, err := ec2conn.DescribeSpotRequests([]string{spotRequestId}, nil) + + spotResp, err := ec2conn.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{ + SpotInstanceRequestIDs: []*string{spotRequestId}, + }) if err != nil { - err := fmt.Errorf("Error finding spot request (%s): %s", spotRequestId, err) + err := fmt.Errorf("Error finding spot request (%s): %s", *spotRequestId, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - instanceId = spotResp.SpotRequestResults[0].InstanceId + instanceId = *spotResp.SpotInstanceRequests[0].InstanceID } - instanceResp, err := ec2conn.Instances([]string{instanceId}, nil) + instanceResp, err := ec2conn.DescribeInstances(&ec2.DescribeInstancesInput{InstanceIDs: []*string{&instanceId}}) if err != nil { err := fmt.Errorf("Error finding source instance (%s): %s", instanceId, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - s.instance = &instanceResp.Reservations[0].Instances[0] - ui.Message(fmt.Sprintf("Instance ID: %s", s.instance.InstanceId)) + s.instance = instanceResp.Reservations[0].Instances[0] + ui.Message(fmt.Sprintf("Instance ID: %s", *s.instance.InstanceID)) - ui.Say(fmt.Sprintf("Waiting for instance (%s) to become ready...", s.instance.InstanceId)) + ui.Say(fmt.Sprintf("Waiting for instance (%s) to become ready...", *s.instance.InstanceID)) stateChange := StateChangeConf{ Pending: []string{"pending"}, Target: "running", @@ -214,7 +237,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi } latestInstance, err := WaitForState(&stateChange) if err != nil { - err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", s.instance.InstanceId, err) + err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", *s.instance.InstanceID, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt @@ -222,29 +245,32 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi s.instance = latestInstance.(*ec2.Instance) - ec2Tags := make([]ec2.Tag, 1, len(s.Tags)+1) - ec2Tags[0] = ec2.Tag{"Name", "Packer Builder"} + ec2Tags := make([]*ec2.Tag, 1, len(s.Tags)+1) + ec2Tags[0] = &ec2.Tag{Key: aws.String("Name"), Value: aws.String("Packer Builder")} for k, v := range s.Tags { - ec2Tags = append(ec2Tags, ec2.Tag{k, v}) + ec2Tags = append(ec2Tags, &ec2.Tag{Key: &k, Value: &v}) } - _, err = ec2conn.CreateTags([]string{s.instance.InstanceId}, ec2Tags) + _, err = ec2conn.CreateTags(&ec2.CreateTagsInput{ + Tags: ec2Tags, + Resources: []*string{s.instance.InstanceID}, + }) if err != nil { ui.Message( fmt.Sprintf("Failed to tag a Name on the builder instance: %s", err)) } if s.Debug { - if s.instance.DNSName != "" { - ui.Message(fmt.Sprintf("Public DNS: %s", s.instance.DNSName)) + if *s.instance.PublicDNSName != "" { + ui.Message(fmt.Sprintf("Public DNS: %s", *s.instance.PublicDNSName)) } - if s.instance.PublicIpAddress != "" { - ui.Message(fmt.Sprintf("Public IP: %s", s.instance.PublicIpAddress)) + if *s.instance.PublicIPAddress != "" { + ui.Message(fmt.Sprintf("Public IP: %s", *s.instance.PublicIPAddress)) } - if s.instance.PrivateIpAddress != "" { - ui.Message(fmt.Sprintf("Private IP: %s", s.instance.PrivateIpAddress)) + if *s.instance.PrivateIPAddress != "" { + ui.Message(fmt.Sprintf("Private IP: %s", *s.instance.PrivateIPAddress)) } } @@ -261,13 +287,16 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) { // Cancel the spot request if it exists if s.spotRequest != nil { ui.Say("Cancelling the spot request...") - if _, err := ec2conn.CancelSpotRequests([]string{s.spotRequest.SpotRequestId}); err != nil { + input := &ec2.CancelSpotInstanceRequestsInput{ + SpotInstanceRequestIDs: []*string{s.spotRequest.InstanceID}, + } + if _, err := ec2conn.CancelSpotInstanceRequests(input); err != nil { ui.Error(fmt.Sprintf("Error cancelling the spot request, may still be around: %s", err)) return } stateChange := StateChangeConf{ Pending: []string{"active", "open"}, - Refresh: SpotRequestStateRefreshFunc(ec2conn, s.spotRequest.SpotRequestId), + Refresh: SpotRequestStateRefreshFunc(ec2conn, *s.spotRequest.SpotInstanceRequestID), Target: "cancelled", } @@ -279,7 +308,7 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) { if s.instance != nil { ui.Say("Terminating the source AWS instance...") - if _, err := ec2conn.TerminateInstances([]string{s.instance.InstanceId}); err != nil { + if _, err := ec2conn.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIDs: []*string{s.instance.InstanceID}}); err != nil { ui.Error(fmt.Sprintf("Error terminating instance, may still be around: %s", err)) return } diff --git a/builder/amazon/common/step_security_group.go b/builder/amazon/common/step_security_group.go index 1d95619fe..356c4a752 100644 --- a/builder/amazon/common/step_security_group.go +++ b/builder/amazon/common/step_security_group.go @@ -2,12 +2,14 @@ package common import ( "fmt" - "github.com/mitchellh/goamz/ec2" + "log" + "time" + + "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common/uuid" "github.com/mitchellh/packer/packer" - "log" - "time" ) type StepSecurityGroup struct { @@ -36,10 +38,10 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction { ui.Say("Creating temporary security group for this instance...") groupName := fmt.Sprintf("packer %s", uuid.TimeOrderedUUID()) log.Printf("Temporary group name: %s", groupName) - group := ec2.SecurityGroup{ - Name: groupName, - Description: "Temporary group for Packer", - VpcId: s.VpcId, + group := &ec2.CreateSecurityGroupInput{ + GroupName: &groupName, + Description: aws.String("Temporary group for Packer"), + VPCID: &s.VpcId, } groupResp, err := ec2conn.CreateSecurityGroup(group) if err != nil { @@ -48,16 +50,15 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction { } // Set the group ID so we can delete it later - s.createdGroupId = groupResp.Id + s.createdGroupId = *groupResp.GroupID - // Authorize the SSH access - perms := []ec2.IPPerm{ - ec2.IPPerm{ - Protocol: "tcp", - FromPort: s.SSHPort, - ToPort: s.SSHPort, - SourceIPs: []string{"0.0.0.0/0"}, - }, + // Authorize the SSH access for the security group + req := &ec2.AuthorizeSecurityGroupIngressInput{ + GroupID: groupResp.GroupID, + IPProtocol: aws.String("tcp"), + FromPort: aws.Long(int64(s.SSHPort)), + ToPort: aws.Long(int64(s.SSHPort)), + CIDRIP: aws.String("0.0.0.0/0"), } // We loop and retry this a few times because sometimes the security @@ -65,7 +66,7 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction { // consistent. ui.Say("Authorizing SSH access on the temporary security group...") for i := 0; i < 5; i++ { - _, err = ec2conn.AuthorizeSecurityGroup(groupResp.SecurityGroup, perms) + _, err = ec2conn.AuthorizeSecurityGroupIngress(req) if err == nil { break } @@ -99,7 +100,7 @@ func (s *StepSecurityGroup) Cleanup(state multistep.StateBag) { var err error for i := 0; i < 5; i++ { - _, err = ec2conn.DeleteSecurityGroup(ec2.SecurityGroup{Id: s.createdGroupId}) + _, err = ec2conn.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{GroupID: &s.createdGroupId}) if err == nil { break } diff --git a/builder/amazon/common/step_source_ami_info.go b/builder/amazon/common/step_source_ami_info.go index c9f72123d..b0c941fda 100644 --- a/builder/amazon/common/step_source_ami_info.go +++ b/builder/amazon/common/step_source_ami_info.go @@ -3,7 +3,7 @@ package common import ( "fmt" - "github.com/mitchellh/goamz/ec2" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) @@ -23,7 +23,7 @@ func (s *StepSourceAMIInfo) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say("Inspecting the source AMI...") - imageResp, err := ec2conn.Images([]string{s.SourceAmi}, ec2.NewFilter()) + imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ImageIDs: []*string{&s.SourceAmi}}) if err != nil { err := fmt.Errorf("Error querying AMI: %s", err) state.Put("error", err) @@ -38,11 +38,11 @@ func (s *StepSourceAMIInfo) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - image := &imageResp.Images[0] + image := imageResp.Images[0] // Enhanced Networking (SriovNetSupport) can only be enabled on HVM AMIs. // See http://goo.gl/icuXh5 - if s.EnhancedNetworking && image.VirtualizationType != "hvm" { + if s.EnhancedNetworking && *image.VirtualizationType != "hvm" { err := fmt.Errorf("Cannot enable enhanced networking, source AMI '%s' is not HVM", s.SourceAmi) state.Put("error", err) ui.Error(err.Error()) diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 889cc7b60..619ad0ff6 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -9,7 +9,7 @@ import ( "fmt" "log" - "github.com/mitchellh/goamz/ec2" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/common" @@ -63,17 +63,12 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { - region, err := b.config.Region() + config, err := b.config.Config() if err != nil { return nil, err } - auth, err := b.config.AccessConfig.Auth() - if err != nil { - return nil, err - } - - ec2conn := ec2.New(auth, region) + ec2conn := ec2.New(config) // Setup the state bag and initial state for the steps state := new(multistep.BasicStateBag) diff --git a/builder/amazon/ebs/builder_test.go b/builder/amazon/ebs/builder_test.go index 6f777afa9..664c751fa 100644 --- a/builder/amazon/ebs/builder_test.go +++ b/builder/amazon/ebs/builder_test.go @@ -1,8 +1,9 @@ package ebs import ( - "github.com/mitchellh/packer/packer" "testing" + + "github.com/mitchellh/packer/packer" ) func testConfig() map[string]interface{} { diff --git a/builder/amazon/ebs/step_create_ami.go b/builder/amazon/ebs/step_create_ami.go index f380ea0b1..8b86f1d25 100644 --- a/builder/amazon/ebs/step_create_ami.go +++ b/builder/amazon/ebs/step_create_ami.go @@ -2,7 +2,8 @@ package ebs import ( "fmt" - "github.com/mitchellh/goamz/ec2" + + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" @@ -20,10 +21,10 @@ func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction { // Create the image ui.Say(fmt.Sprintf("Creating the AMI: %s", config.AMIName)) - createOpts := &ec2.CreateImage{ - InstanceId: instance.InstanceId, - Name: config.AMIName, - BlockDevices: config.BlockDevices.BuildAMIDevices(), + createOpts := &ec2.CreateImageInput{ + InstanceID: instance.InstanceID, + Name: &config.AMIName, + BlockDeviceMappings: config.BlockDevices.BuildAMIDevices(), } createResp, err := ec2conn.CreateImage(createOpts) @@ -35,16 +36,16 @@ func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction { } // Set the AMI ID in the state - ui.Message(fmt.Sprintf("AMI: %s", createResp.ImageId)) + ui.Message(fmt.Sprintf("AMI: %s", *createResp.ImageID)) amis := make(map[string]string) - amis[ec2conn.Region.Name] = createResp.ImageId + amis[ec2conn.Config.Region] = *createResp.ImageID state.Put("amis", amis) // Wait for the image to become ready stateChange := awscommon.StateChangeConf{ Pending: []string{"pending"}, Target: "available", - Refresh: awscommon.AMIStateRefreshFunc(ec2conn, createResp.ImageId), + Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *createResp.ImageID), StepState: state, } @@ -56,14 +57,14 @@ func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - imagesResp, err := ec2conn.Images([]string{createResp.ImageId}, nil) + imagesResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ImageIDs: []*string{createResp.ImageID}}) if err != nil { err := fmt.Errorf("Error searching for AMI: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - s.image = &imagesResp.Images[0] + s.image = imagesResp.Images[0] return multistep.ActionContinue } @@ -83,11 +84,9 @@ func (s *stepCreateAMI) Cleanup(state multistep.StateBag) { ui := state.Get("ui").(packer.Ui) ui.Say("Deregistering the AMI because cancelation or error...") - if resp, err := ec2conn.DeregisterImage(s.image.Id); err != nil { + deregisterOpts := &ec2.DeregisterImageInput{ImageID: s.image.ImageID} + if _, err := ec2conn.DeregisterImage(deregisterOpts); err != nil { ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", err)) return - } else if resp.Return == false { - ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %t", resp.Return)) - return } } diff --git a/builder/amazon/ebs/step_modify_instance.go b/builder/amazon/ebs/step_modify_instance.go index 21c5e7de9..9fe0b40ce 100644 --- a/builder/amazon/ebs/step_modify_instance.go +++ b/builder/amazon/ebs/step_modify_instance.go @@ -3,7 +3,7 @@ package ebs import ( "fmt" - "github.com/mitchellh/goamz/ec2" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) @@ -19,12 +19,13 @@ func (s *stepModifyInstance) Run(state multistep.StateBag) multistep.StepAction // Set SriovNetSupport to "simple". See http://goo.gl/icuXh5 if config.AMIEnhancedNetworking { ui.Say("Enabling Enhanced Networking...") - _, err := ec2conn.ModifyInstance( - instance.InstanceId, - &ec2.ModifyInstance{SriovNetSupport: true}, - ) + simple := "simple" + _, err := ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ + InstanceID: instance.InstanceID, + SRIOVNetSupport: &ec2.AttributeValue{Value: &simple}, + }) if err != nil { - err := fmt.Errorf("Error enabling Enhanced Networking on %s: %s", instance.InstanceId, err) + err := fmt.Errorf("Error enabling Enhanced Networking on %s: %s", *instance.InstanceID, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt diff --git a/builder/amazon/ebs/step_stop_instance.go b/builder/amazon/ebs/step_stop_instance.go index 09c19bddb..c01de8fdc 100644 --- a/builder/amazon/ebs/step_stop_instance.go +++ b/builder/amazon/ebs/step_stop_instance.go @@ -2,7 +2,8 @@ package ebs import ( "fmt" - "github.com/mitchellh/goamz/ec2" + + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" @@ -24,7 +25,9 @@ func (s *stepStopInstance) Run(state multistep.StateBag) multistep.StepAction { // Stop the instance so we can create an AMI from it ui.Say("Stopping the source instance...") - _, err := ec2conn.StopInstances(instance.InstanceId) + _, err := ec2conn.StopInstances(&ec2.StopInstancesInput{ + InstanceIDs: []*string{instance.InstanceID}, + }) if err != nil { err := fmt.Errorf("Error stopping instance: %s", err) state.Put("error", err) diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 63b8442ac..69806c3db 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -9,7 +9,7 @@ import ( "os" "strings" - "github.com/mitchellh/goamz/ec2" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/common" @@ -168,17 +168,12 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { - region, err := b.config.Region() + config, err := b.config.Config() if err != nil { return nil, err } - auth, err := b.config.AccessConfig.Auth() - if err != nil { - return nil, err - } - - ec2conn := ec2.New(auth, region) + ec2conn := ec2.New(config) // Setup the state bag and initial state for the steps state := new(multistep.BasicStateBag) diff --git a/builder/amazon/instance/step_bundle_volume.go b/builder/amazon/instance/step_bundle_volume.go index 736e1adb2..362f949f0 100644 --- a/builder/amazon/instance/step_bundle_volume.go +++ b/builder/amazon/instance/step_bundle_volume.go @@ -3,7 +3,7 @@ package instance import ( "fmt" - "github.com/mitchellh/goamz/ec2" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) @@ -34,7 +34,7 @@ func (s *StepBundleVolume) Run(state multistep.StateBag) multistep.StepAction { var err error config.BundleVolCommand, err = config.tpl.Process(config.BundleVolCommand, bundleCmdData{ AccountId: config.AccountId, - Architecture: instance.Architecture, + Architecture: *instance.Architecture, CertPath: x509RemoteCertPath, Destination: config.BundleDestination, KeyPath: x509RemoteKeyPath, diff --git a/builder/amazon/instance/step_register_ami.go b/builder/amazon/instance/step_register_ami.go index 07040f417..349c7f856 100644 --- a/builder/amazon/instance/step_register_ami.go +++ b/builder/amazon/instance/step_register_ami.go @@ -3,7 +3,7 @@ package instance import ( "fmt" - "github.com/mitchellh/goamz/ec2" + "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" @@ -18,16 +18,17 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say("Registering the AMI...") - registerOpts := &ec2.RegisterImage{ - ImageLocation: manifestPath, - Name: config.AMIName, - BlockDevices: config.BlockDevices.BuildAMIDevices(), - VirtType: config.AMIVirtType, + registerOpts := &ec2.RegisterImageInput{ + ImageLocation: &manifestPath, + Name: &config.AMIName, + BlockDeviceMappings: config.BlockDevices.BuildAMIDevices(), + VirtualizationType: &config.AMIVirtType, } // Set SriovNetSupport to "simple". See http://goo.gl/icuXh5 if config.AMIEnhancedNetworking { - registerOpts.SriovNetSupport = "simple" + simple := "simple" + registerOpts.SRIOVNetSupport = &simple } registerResp, err := ec2conn.RegisterImage(registerOpts) @@ -38,16 +39,16 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { } // Set the AMI ID in the state - ui.Say(fmt.Sprintf("AMI: %s", registerResp.ImageId)) + ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageID)) amis := make(map[string]string) - amis[ec2conn.Region.Name] = registerResp.ImageId + amis[ec2conn.Config.Region] = *registerResp.ImageID state.Put("amis", amis) // Wait for the image to become ready stateChange := awscommon.StateChangeConf{ Pending: []string{"pending"}, Target: "available", - Refresh: awscommon.AMIStateRefreshFunc(ec2conn, registerResp.ImageId), + Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *registerResp.ImageID), StepState: state, } diff --git a/builder/amazon/instance/step_upload_bundle.go b/builder/amazon/instance/step_upload_bundle.go index dbf6a0c29..4b146498c 100644 --- a/builder/amazon/instance/step_upload_bundle.go +++ b/builder/amazon/instance/step_upload_bundle.go @@ -40,7 +40,7 @@ func (s *StepUploadBundle) Run(state multistep.StateBag) multistep.StepAction { BucketName: config.S3Bucket, BundleDirectory: config.BundleDestination, ManifestPath: manifestPath, - Region: region.Name, + Region: region, SecretKey: config.SecretKey, }) if err != nil { From 7bd37b1f7a4e765dd40d89f6947c0e88a8c2bd69 Mon Sep 17 00:00:00 2001 From: Travis Truman Date: Tue, 14 Apr 2015 11:14:58 -0400 Subject: [PATCH 048/956] Tests were failing incorrectly when OpenStack environment variables were set in the environment running the tests --- builder/openstack/access_config_test.go | 40 +++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/builder/openstack/access_config_test.go b/builder/openstack/access_config_test.go index 5c92216e3..cf37448cc 100644 --- a/builder/openstack/access_config_test.go +++ b/builder/openstack/access_config_test.go @@ -1,9 +1,17 @@ package openstack import ( + "os" "testing" ) +func init() { + // Clear out the openstack env vars so they don't + // affect our tests. + os.Setenv("SDK_REGION", "") + os.Setenv("OS_REGION_NAME", "") +} + func testAccessConfig() *AccessConfig { return &AccessConfig{} } @@ -16,6 +24,38 @@ func TestAccessConfigPrepare_NoRegion_Rackspace(t *testing.T) { } } +func TestAccessConfigRegionWithEmptyEnv(t *testing.T) { + c := testAccessConfig() + c.Prepare(nil) + if c.Region() != "" { + t.Fatalf("Region should be empty") + } +} + +func TestAccessConfigRegionWithSdkRegionEnv(t *testing.T) { + c := testAccessConfig() + c.Prepare(nil) + + expectedRegion := "sdk_region" + os.Setenv("SDK_REGION", expectedRegion) + os.Setenv("OS_REGION_NAME", "") + if c.Region() != expectedRegion { + t.Fatalf("Region should be: %s", expectedRegion) + } +} + +func TestAccessConfigRegionWithOsRegionNameEnv(t *testing.T) { + c := testAccessConfig() + c.Prepare(nil) + + expectedRegion := "os_region_name" + os.Setenv("SDK_REGION", "") + os.Setenv("OS_REGION_NAME", expectedRegion) + if c.Region() != expectedRegion { + t.Fatalf("Region should be: %s", expectedRegion) + } +} + func TestAccessConfigPrepare_NoRegion_PrivateCloud(t *testing.T) { c := testAccessConfig() c.Provider = "http://some-keystone-server:5000/v2.0" From 33b4f5cc0a577ee8ac351e855e2713298f10c054 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 15 Apr 2015 11:49:29 -0700 Subject: [PATCH 049/956] Check for EBS being nil before assigning it --- builder/amazon/chroot/step_register_ami.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/builder/amazon/chroot/step_register_ami.go b/builder/amazon/chroot/step_register_ami.go index 88da65e9f..25b87592a 100644 --- a/builder/amazon/chroot/step_register_ami.go +++ b/builder/amazon/chroot/step_register_ami.go @@ -25,7 +25,11 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { for i, device := range image.BlockDeviceMappings { newDevice := device if newDevice.DeviceName == image.RootDeviceName { - newDevice.EBS.SnapshotID = &snapshotId + if newDevice.EBS != nil { + newDevice.EBS.SnapshotID = &snapshotId + } else { + newDevice.EBS = &ec2.EBSBlockDevice{SnapshotID: &snapshotId} + } } blockDevices[i] = newDevice From 65a9347b1ead3084596a076f9a3de4e6dc44f49b Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 15 Apr 2015 12:13:06 -0700 Subject: [PATCH 050/956] Fix potential nil pointer errors in ported code This commit adds extra nil checks for some pointers which were not necessary when using goamz --- builder/amazon/common/ssh.go | 6 +++--- builder/amazon/common/step_run_source_instance.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/builder/amazon/common/ssh.go b/builder/amazon/common/ssh.go index 965a26994..25c71a243 100644 --- a/builder/amazon/common/ssh.go +++ b/builder/amazon/common/ssh.go @@ -17,13 +17,13 @@ func SSHAddress(e *ec2.EC2, port int, private bool) func(multistep.StateBag) (st for j := 0; j < 2; j++ { var host string i := state.Get("instance").(*ec2.Instance) - if *i.VPCID != "" { - if *i.PublicIPAddress != "" && !private { + if i.VPCID != nil && *i.VPCID != "" { + if i.PublicIPAddress != nil && *i.PublicIPAddress != "" && !private { host = *i.PublicIPAddress } else { host = *i.PrivateIPAddress } - } else if *i.PublicDNSName != "" { + } else if i.PublicDNSName != nil && *i.PublicDNSName != "" { host = *i.PublicDNSName } diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index 644734619..702267c97 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -261,15 +261,15 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi } if s.Debug { - if *s.instance.PublicDNSName != "" { + if s.instance.PublicDNSName != nil && *s.instance.PublicDNSName != "" { ui.Message(fmt.Sprintf("Public DNS: %s", *s.instance.PublicDNSName)) } - if *s.instance.PublicIPAddress != "" { + if s.instance.PublicIPAddress != nil && *s.instance.PublicIPAddress != "" { ui.Message(fmt.Sprintf("Public IP: %s", *s.instance.PublicIPAddress)) } - if *s.instance.PrivateIPAddress != "" { + if s.instance.PrivateIPAddress != nil && *s.instance.PrivateIPAddress != "" { ui.Message(fmt.Sprintf("Private IP: %s", *s.instance.PrivateIPAddress)) } } From 6a3bf9d87e563b6960586427d7cfd0d7cb0baa17 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 15 Apr 2015 12:18:33 -0700 Subject: [PATCH 051/956] Remove Go 1.2 from the Travis build --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index b0de812ea..d67d52bfa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,6 @@ sudo: false language: go go: - - 1.2 - 1.3 - 1.4 - tip From 344c741642c930da821694a0b0f94b59c08b4275 Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Wed, 15 Apr 2015 12:53:57 -0700 Subject: [PATCH 052/956] command/push: allow specifying a -name param for push target --- command/push.go | 15 ++++++-- command/push_test.go | 36 +++++++++++++++++++ .../docs/command-line/push.html.markdown | 3 ++ 3 files changed, 52 insertions(+), 2 deletions(-) diff --git a/command/push.go b/command/push.go index 74915de3f..32edfb3ab 100644 --- a/command/push.go +++ b/command/push.go @@ -34,6 +34,7 @@ type pushUploadFn func( func (c *PushCommand) Run(args []string) int { var token string var message string + var name string var create bool f := flag.NewFlagSet("push", flag.ContinueOnError) @@ -41,6 +42,7 @@ func (c *PushCommand) Run(args []string) int { f.StringVar(&token, "token", "", "token") f.StringVar(&message, "m", "", "message") f.StringVar(&message, "message", "", "message") + f.StringVar(&name, "name", "", "name") f.BoolVar(&create, "create", false, "create (deprecated)") if err := f.Parse(args); err != nil { return 1 @@ -65,11 +67,17 @@ func (c *PushCommand) Run(args []string) int { return 1 } + // If we didn't pass name from the CLI, use the template + if name == "" { + name = tpl.Push.Name + } + // Validate some things - if tpl.Push.Name == "" { + if name == "" { c.Ui.Error(fmt.Sprintf( "The 'push' section must be specified in the template with\n" + - "at least the 'name' option set.")) + "at least the 'name' option set. Alternatively, you can pass the\n" + + "name parameter from the CLI.")) return 1 } @@ -245,6 +253,9 @@ Options: -m, -message= A message to identify the purpose or changes in this Packer template much like a VCS commit message + -name= The destination build in Atlas. This is in a format + "username/name". + -token= The access token to use to when uploading ` diff --git a/command/push_test.go b/command/push_test.go index 36e6d11c0..322637049 100644 --- a/command/push_test.go +++ b/command/push_test.go @@ -120,6 +120,42 @@ func TestPush_noName(t *testing.T) { } } +func TestPush_cliName(t *testing.T) { + var actual []string + var actualOpts *uploadOpts + uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) { + actual = testArchive(t, r) + actualOpts = opts + + doneCh := make(chan struct{}) + close(doneCh) + return doneCh, nil, nil + } + + c := &PushCommand{ + Meta: testMeta(t), + uploadFn: uploadFn, + } + + args := []string{ + "-name=foo/bar", + filepath.Join(testFixture("push-no-name"), "template.json"), + } + + if code := c.Run(args); code != 0 { + fatalCommand(t, c.Meta) + } + + expected := []string{ + archiveTemplateEntry, + "template.json", + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + func TestPush_uploadError(t *testing.T) { uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) { return nil, nil, fmt.Errorf("bad") diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index be4040527..5833b917f 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -33,6 +33,9 @@ must be completed within the template. service such as Atlas. This can also be specified within the push configuration in the template. +* `-name` - The name of the build in the service. This typically + looks like `hashicorp/precise64`. + ## Examples Push a Packer template: From 03436a37459e10a070464703c42bdc8bb4931147 Mon Sep 17 00:00:00 2001 From: Dan Schaffer Date: Thu, 16 Apr 2015 12:55:59 -0400 Subject: [PATCH 053/956] builder/amazon/chroot: fix no attachments on volume error. This adds retry logic to the amazon/chroot builder. The builder intermittently fails when ec2conn shows the volume in the attached state but calling Volumes[0].Attachments return an empty array. The fix adds a retry logic for when Volumes[0].Attachments has len 0 sleep for 2 seconds and retry up to 30 times. When the Volumes[0].Attachments is empty I find within 5 seconds the Volumes[0].Attachments contains the correct value. The issue is reported in: https://github.com/mitchellh/packer/issues/1854 --- builder/amazon/chroot/step_attach_volume.go | 29 ++++++++++++++------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/builder/amazon/chroot/step_attach_volume.go b/builder/amazon/chroot/step_attach_volume.go index e67479550..ec3facc10 100644 --- a/builder/amazon/chroot/step_attach_volume.go +++ b/builder/amazon/chroot/step_attach_volume.go @@ -8,6 +8,7 @@ import ( awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" "strings" + "time" ) // StepAttachVolume attaches the previously created volume to an @@ -50,17 +51,25 @@ func (s *StepAttachVolume) Run(state multistep.StateBag) multistep.StepAction { StepState: state, Target: "attached", Refresh: func() (interface{}, string, error) { - resp, err := ec2conn.Volumes([]string{volumeId}, ec2.NewFilter()) - if err != nil { - return nil, "", err + var attempts = 0 + for attempts < 30 { + resp, err := ec2conn.Volumes([]string{volumeId}, ec2.NewFilter()) + if err != nil { + return nil, "", err + } + if len(resp.Volumes[0].Attachments) > 0 { + a := resp.Volumes[0].Attachments[0] + return a, a.Status, nil + } + // When Attachment on volume is not present sleep for 2s and retry + attempts += 1 + ui.Say( + fmt.Sprintf("Warning volume %s show no attachments, Attempt %d/30, Sleeping for 2s and will retry.", + volumeId, attempts)) + time.Sleep(time.Duration(2) * time.Second) } - - if len(resp.Volumes[0].Attachments) == 0 { - return nil, "", errors.New("No attachments on volume.") - } - - a := resp.Volumes[0].Attachments[0] - return a, a.Status, nil + // Attachment on volume is not present after all attempts + return nil, "", errors.New("No attachments on volume.") }, } From 982934dfb99b43fe5c2dc0d880344275bc898646 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Fri, 17 Apr 2015 15:12:39 -0400 Subject: [PATCH 054/956] Use new Google API and OAuth libs, add UserAgent string --- builder/googlecompute/driver_gce.go | 53 +++++++++++++++++++++-------- 1 file changed, 38 insertions(+), 15 deletions(-) diff --git a/builder/googlecompute/driver_gce.go b/builder/googlecompute/driver_gce.go index 13d765ce1..b9ed4693e 100644 --- a/builder/googlecompute/driver_gce.go +++ b/builder/googlecompute/driver_gce.go @@ -4,16 +4,15 @@ import ( "fmt" "log" "net/http" + "runtime" "time" - "code.google.com/p/google-api-go-client/compute/v1" "github.com/mitchellh/packer/packer" - // oauth2 "github.com/rasa/oauth2-fork-b3f9a68" - "github.com/rasa/oauth2-fork-b3f9a68" - - // oauth2 "github.com/rasa/oauth2-fork-b3f9a68/google" - "github.com/rasa/oauth2-fork-b3f9a68/google" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/api/compute/v1" ) // driverGCE is a Driver implementation that actually talks to GCE. @@ -27,9 +26,10 @@ type driverGCE struct { var DriverScopes = []string{"https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/devstorage.full_control"} func NewDriverGCE(ui packer.Ui, p string, a *accountFile) (Driver, error) { - var f *oauth2.Options var err error + var client *http.Client + // Auth with AccountFile first if provided if a.PrivateKey != "" { log.Printf("[INFO] Requesting Google token via AccountFile...") @@ -37,22 +37,45 @@ func NewDriverGCE(ui packer.Ui, p string, a *accountFile) (Driver, error) { log.Printf("[INFO] -- Scopes: %s", DriverScopes) log.Printf("[INFO] -- Private Key Length: %d", len(a.PrivateKey)) - f, err = oauth2.New( - oauth2.JWTClient(a.ClientEmail, []byte(a.PrivateKey)), - oauth2.Scope(DriverScopes...), - google.JWTEndpoint()) + conf := jwt.Config{ + Email: a.ClientEmail, + PrivateKey: []byte(a.PrivateKey), + Scopes: DriverScopes, + TokenURL: "https://accounts.google.com/o/oauth2/token", + } + + // Initiate an http.Client. The following GET request will be + // authorized and authenticated on the behalf of + // your service account. + client = conf.Client(oauth2.NoContext) } else { log.Printf("[INFO] Requesting Google token via GCE Service Role...") - - f, err = oauth2.New(google.ComputeEngineAccount("")) + client = &http.Client{ + Transport: &oauth2.Transport{ + // Fetch from Google Compute Engine's metadata server to retrieve + // an access token for the provided account. + // If no account is specified, "default" is used. + Source: google.ComputeTokenSource(""), + }, + } } if err != nil { return nil, err } - log.Printf("[INFO] Instantiating GCE client using...") - service, err := compute.New(&http.Client{Transport: f.NewTransport()}) + log.Printf("[INFO] Instantiating GCE client...") + service, err := compute.New(client) + // Set UserAgent + versionString := "0.0.0" + // TODO(dcunnin): Use Packer's version code from version.go + // versionString := main.Version + // if main.VersionPrerelease != "" { + // versionString = fmt.Sprintf("%s-%s", versionString, main.VersionPrerelease) + // } + service.UserAgent = fmt.Sprintf( + "(%s %s) Packer/%s", runtime.GOOS, runtime.GOARCH, versionString) + if err != nil { return nil, err } From fd5f4c61ae419f3abe324876a532f011370f1e69 Mon Sep 17 00:00:00 2001 From: Mitch Garnaat Date: Fri, 17 Apr 2015 12:35:26 -0700 Subject: [PATCH 055/956] Added a call to grep for path in /proc/mounts before attempting to umount. If path is not there, it is already unmounted. --- builder/amazon/chroot/step_mount_extra.go | 30 +++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/builder/amazon/chroot/step_mount_extra.go b/builder/amazon/chroot/step_mount_extra.go index d589d6c74..aa63b4b61 100644 --- a/builder/amazon/chroot/step_mount_extra.go +++ b/builder/amazon/chroot/step_mount_extra.go @@ -6,6 +6,8 @@ import ( "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" "os" + "os/exec" + "syscall" ) // StepMountExtra mounts the attached device. @@ -90,13 +92,37 @@ func (s *StepMountExtra) CleanupFunc(state multistep.StateBag) error { var path string lastIndex := len(s.mounts) - 1 path, s.mounts = s.mounts[lastIndex], s.mounts[:lastIndex] + + grepCommand, err := wrappedCommand(fmt.Sprintf("grep %s /proc/mounts", path)) + if err != nil { + return fmt.Errorf("Error creating grep command: %s", err) + } + + // Before attempting to unmount, + // check to see if path is already unmounted + stderr := new(bytes.Buffer) + cmd := ShellCommand(grepCommand) + cmd.Stderr = stderr + if err := cmd.Run(); err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + if status, ok := exitError.Sys().(syscall.WaitStatus); ok { + exitStatus := status.ExitStatus() + if exitStatus == 1 { + // path has already been unmounted + // just skip this path + continue + } + } + } + } + unmountCommand, err := wrappedCommand(fmt.Sprintf("umount %s", path)) if err != nil { return fmt.Errorf("Error creating unmount command: %s", err) } - stderr := new(bytes.Buffer) - cmd := ShellCommand(unmountCommand) + stderr = new(bytes.Buffer) + cmd = ShellCommand(unmountCommand) cmd.Stderr = stderr if err := cmd.Run(); err != nil { return fmt.Errorf( From 09f379a928aaea8821e0ebe2e28439c8694d4392 Mon Sep 17 00:00:00 2001 From: FGtatsuro Date: Sat, 18 Apr 2015 13:12:28 +0900 Subject: [PATCH 056/956] Support force option for docker-tag. --- builder/docker/driver.go | 2 +- builder/docker/driver_docker.go | 10 ++++- builder/docker/driver_mock.go | 4 +- post-processor/docker-tag/post-processor.go | 3 +- .../docker-tag/post-processor_test.go | 43 +++++++++++++++++++ 5 files changed, 57 insertions(+), 5 deletions(-) diff --git a/builder/docker/driver.go b/builder/docker/driver.go index 85b87b1d0..bc95b6c01 100644 --- a/builder/docker/driver.go +++ b/builder/docker/driver.go @@ -44,7 +44,7 @@ type Driver interface { StopContainer(id string) error // TagImage tags the image with the given ID - TagImage(id string, repo string) error + TagImage(id string, repo string, force bool) error // Verify verifies that the driver can run Verify() error diff --git a/builder/docker/driver_docker.go b/builder/docker/driver_docker.go index f724e37ec..d7840bba1 100644 --- a/builder/docker/driver_docker.go +++ b/builder/docker/driver_docker.go @@ -235,9 +235,15 @@ func (d *DockerDriver) StopContainer(id string) error { return exec.Command("docker", "rm", id).Run() } -func (d *DockerDriver) TagImage(id string, repo string) error { +func (d *DockerDriver) TagImage(id string, repo string, force bool) error { + args := []string{"tag"} + if force { + args = append(args, "-f") + } + args = append(args, id, repo) + var stderr bytes.Buffer - cmd := exec.Command("docker", "tag", id, repo) + cmd := exec.Command("docker", args...) cmd.Stderr = &stderr if err := cmd.Start(); err != nil { diff --git a/builder/docker/driver_mock.go b/builder/docker/driver_mock.go index 549e79611..01e19b543 100644 --- a/builder/docker/driver_mock.go +++ b/builder/docker/driver_mock.go @@ -44,6 +44,7 @@ type MockDriver struct { TagImageCalled bool TagImageImageId string TagImageRepo string + TagImageForce bool TagImageErr error ExportReader io.Reader @@ -151,10 +152,11 @@ func (d *MockDriver) StopContainer(id string) error { return d.StopError } -func (d *MockDriver) TagImage(id string, repo string) error { +func (d *MockDriver) TagImage(id string, repo string, force bool) error { d.TagImageCalled = true d.TagImageImageId = id d.TagImageRepo = repo + d.TagImageForce = force return d.TagImageErr } diff --git a/post-processor/docker-tag/post-processor.go b/post-processor/docker-tag/post-processor.go index d68b48e4c..850e0e8d0 100644 --- a/post-processor/docker-tag/post-processor.go +++ b/post-processor/docker-tag/post-processor.go @@ -15,6 +15,7 @@ type Config struct { Repository string `mapstructure:"repository"` Tag string `mapstructure:"tag"` + Force bool tpl *packer.ConfigTemplate } @@ -87,7 +88,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Message("Tagging image: " + artifact.Id()) ui.Message("Repository: " + importRepo) - err := driver.TagImage(artifact.Id(), importRepo) + err := driver.TagImage(artifact.Id(), importRepo, p.config.Force) if err != nil { return nil, false, err } diff --git a/post-processor/docker-tag/post-processor_test.go b/post-processor/docker-tag/post-processor_test.go index 925419a10..f09b6f61f 100644 --- a/post-processor/docker-tag/post-processor_test.go +++ b/post-processor/docker-tag/post-processor_test.go @@ -69,4 +69,47 @@ func TestPostProcessor_PostProcess(t *testing.T) { if driver.TagImageRepo != "foo:bar" { t.Fatal("bad repo") } + if driver.TagImageForce { + t.Fatal("bad force. force=false in default") + } +} + +func TestPostProcessor_PostProcess_Force(t *testing.T) { + driver := &docker.MockDriver{} + p := &PostProcessor{Driver: driver} + config := testConfig() + config["force"] = true + _, err := common.DecodeConfig(&p.config, config) + if err != nil { + t.Fatalf("err %s", err) + } + + artifact := &packer.MockArtifact{ + BuilderIdValue: dockerimport.BuilderId, + IdValue: "1234567890abcdef", + } + + result, keep, err := p.PostProcess(testUi(), artifact) + if _, ok := result.(packer.Artifact); !ok { + t.Fatal("should be instance of Artifact") + } + if !keep { + t.Fatal("should keep") + } + if err != nil { + t.Fatalf("err: %s", err) + } + + if !driver.TagImageCalled { + t.Fatal("should call TagImage") + } + if driver.TagImageImageId != "1234567890abcdef" { + t.Fatal("bad image id") + } + if driver.TagImageRepo != "foo:bar" { + t.Fatal("bad repo") + } + if !driver.TagImageForce { + t.Fatal("bad force") + } } From 2c8ddc384b1a5d2637850d17a2ed1710d50f1a36 Mon Sep 17 00:00:00 2001 From: Kevin Fishner Date: Mon, 20 Apr 2015 15:55:16 -0700 Subject: [PATCH 057/956] add packer and the hc ecosystem --- .../assets/images/docs/atlas-workflow.png | Bin 0 -> 146262 bytes .../source/assets/stylesheets/_styles.scss | 4 +++ .../intro/hashicorp-ecosystem.html.markdown | 32 ++++++++++++++++++ website/source/intro/platforms.html.markdown | 4 +-- website/source/layouts/intro.erb | 1 + 5 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 website/source/assets/images/docs/atlas-workflow.png create mode 100644 website/source/intro/hashicorp-ecosystem.html.markdown diff --git a/website/source/assets/images/docs/atlas-workflow.png b/website/source/assets/images/docs/atlas-workflow.png new file mode 100644 index 0000000000000000000000000000000000000000..e519ee00425fbf6095bb13045df6b24be318289c GIT binary patch literal 146262 zcmeFZWmr^e`vb*dB0Tk$-lB5 zwx0ipn&Z5t%ITEm=;+-P-li*_PlLj~8H0kwpRAGYi1EsV6h4DKXQfJIR!!|6P2=Pw zsv?5-v26;Q2G}pxh|Y|O&XjJtL!z2I|6qVT$gBO0?{Y(bZcI zq&`{Q0qMQ_3$cGWn~S^404k_onr-J2MB4N7fZ=CS!-VG}OO}4n37vd!Gg}wqO3iL~ zgJ`omWya;hClj+?)9icp?xVsX6UUuy#^l$TYB#tTn^{*We<%&mwQHK{r{6yKMZd-vl!n~eR9 z=`wXH?;oBYd>}D;H>H~=-haw31D}vLqmG5U+RJ(J+N~p9tq}pb+NY;IeX_`C9~x%N zhXkpWTiK^LJ(4_R)ozH zC*L3QFi8daMbIjgD4&zUgkK?X;|Lq9lw_P78=oHj36y^k8zAG$*WR#4iI@3;y>e7o zofPY5be|teKKdyjdA4(|qW^3jzY$j)B$cg;X+6N{(jEP&8^6SycLRWne33cl-_QH+ zD`vO-_U>1sVPO9vx?G*g2S*yaeoa&0+?%os2dF&(F>!b}^cegqTeWwbhP}w4ly3Xu zF{rumlrjCfQ<3A7hZ=jIcxY))n|&M#bPOlA@8N}6U)}dtw+VK0u-xcJ&O>+iE%#IA zmiy4{3!N_Kj-K6%#~!IzQjR4*k**|S{O8grjxpF*hE`Dzi@N`t15< zqkmKZH$7;-WBv%dx8FW99DH;KbMooW>i@WaqNuUACye4s$fYaiKH^;h00VkkIQDpb z{5sx(j8z)N;BAj4wj^4!6dGkaq_zGAU`UF0KEeO(@Sk}8_vvem9Chs=$+s=Hm{CV& zyKocmB0TP!|7OLHBH#(bb?miZz~|Z>;V0xkZbFX9EGNs&Ou-LSfr`+`4|E)I_<2t8 z7eBNhsZY80SY*mhc3)jecsPHzshKbei7_Q(0rg)%>-viK91|a*){S9%%Ay{$&uzXPp3H=wa;m(RMNyM^8)zso-Y#M)Ym{bTj9$HI@^;ah= z%bM#=c4V5Bme)og*uQ6pwqfFz#Yx5EpM@`oowiXvk?hlWX?Se90(o_0bNXn_$$@TH zt0Pj?09^@yyO)p5`B*ECAfkAxN?l{q@2QnZ2S1xO*qYl}rC+QUAb)6azjXxu>jwuGg2LhFuYtFL{T&4_^Rc z%K@k3&c*Itstf1t`P`6c`i*HU=^+)7%7j|6cn|LM9(b!4{HK!9(_*lo6X^h-KJ zj{%&w-x{$y+39wn@Uk2RvD}`iKQShZ0eipL^pl|??2g1fY8Amea}dK2H{H;Rh<=guoFw8oBIo0==6ql7~h{0^GlJ8{T}#$8nhP zqoQ~;2{Xs}3vC9r5Vn*~{Ji-3mwbPK0mk!pZT>Bro4Ul>7`K;D{&Th0|3$Mi1Iv+y zZvw~Pq!&2u+#|MDB2}@%@em&S!TB$ps5u zYgX_g(Cy@O9?Vo7P|dr1h$IGPOk9tMG5@6#gQ>Ow5Vkhv2F@Qke?K{jt#)JbIj*qs z+3ys$7HPI+6lP__d^e%}OM+xVblZ|5t4n?$20FKE`A0{4#TEAD6TL^1%#tMn2mg8HMZnoKO zm}UiLpDGitetUtFAN7|eikX~W=LkJIq&RLyx`d@vrg||O8v7D{EAYq3DQNUHpeDNT z+==E;Q%NQG60!Fo<2AO3mU3RMknLp(ZnX+4gtcDypP>AgRKafuNVhG@9FNNUu3LDC zRW}i$@5tEcrs0z?-3pXR1pJ(w7t2WjC8+~%)Va29_;=d5z}?luSa{Q$xPNb{6uK5k zyxGyX3_Q&>-EL4eEJ(=eGp2v}QI$#B_{eSnUTIljz=7GZFUt+ltd&4~OMtes{ovQFTKNVI5G2{`m_`9-ZR&~xcL`c!J)P2>21km0Cxf9c zW@YIU@XV^(hxUX#|00OL=d$>Ei1b*4ty&6T@%L@`9 z@#4bOsi3eCYnR3~*DxWI45*&T!#1TlNS-2Pw|c6Dx%-8VoSy#3S_EISPYsuDWMq?= z7??X&)EYIl)*Ww9$%U{E2+>1Yp>l+eIz;~*!~I+i%7M8-L6^e>y#Y_SYoi7dL29Ry znfpo}LS!mkz5NCn#q)Z8fwg8l!I3HSQSztVv4wVWg9>KEbIP=q;F5?hkTOs?r%7#J zC4~vLvx<3-eIu({(s&e(J$wFcl$7nx_8ZByMb)&1cDC}$sdXqmGUx3z3&#+U?Q!X_ zp9j!T>wPdP{@Z0zel-dz7{B~|*kI6FeOeyE|FV@Nh7tXG3K0AU<8jkDr#5U@u#n3f zG;B;oZv*xj!b_@fy~Ldo#0{US*!m2OTH<%9i$Oy79Zj=_mACDPgCfYw3OE>+bV#^HZF=)w`?xI3IN54vZ^pQeZN{Nr6>5-ngI7RB@mhqdip|N5D*iy(AXYEcDj@CCq0LPTWs z?)n9CzAE+#co%O5io9?w7wW1P>w3i;D`dNhOp9!vEa));HRjY)yctQ ze6}&&;ZEs<_MO9APdAG%nsadx|M^Ak}CDQbsjj$#L1`seP5 zGe2Y2XKHFp80It4PrmQu-S~wrVD7`a$R=?ye^BDPY)pLZ4g4-lh?~7O)kdQpGf4tt zc+?mAe8{`bT>%6?+b0(VZ6!74KRbd$ANI>}2H#MD#0Q6S8%rJuJBjx!`vv)t#HhO5 zWYs}F)l3v08%W>@{44P*_?WYE<1AtJ8)Cva$nvTXsR3E|mz&Hb^hyFwBEUZR$-F?4zl;H0C=7_V_`g!0fw|DLap^LznB!Tu}UqMZKl+0aYemKZ~=j&~6}t{*t=>Y=~B+1Tv9^P%J{Jao)_ zp7!$g!mE}t(nxV;aY-J9-qhl(%h7G7rZAa88_UMwf)SrVI6bRp&*w7{-zsG;2YV2n-xDu$XN4TqQ98zZ^c4Adpg_eBsf1dC{T|#TOJoClKk;yG_R%k z%UYQog(5g1MfcafFqrgo`F%*1>L5xka9WZB4BPpEHg|7lFG$RB8;xO)vc8fwG8YL!=^G$R5uN6yf~i@-tLW$G4k3Ga>r}Opw?46*E|abB|t7- z*-Ds_cT61vqFlCG>mwS>Wp*-+I%;;P-JWyo2@#3dJWuAUqoCUUWAm!6<%2HF-q|aM zC^Nh6N6)_4iG&$x=Ox3K9nxTO7bA#WHZnoti|^QUBXcf-drG;2KCu50Lc#5M2P|bV z>td5Fqdt-ll^wAZnJp-ulRKojZQnHKHkyDp(VLmvvT^3Kn_V%+U-#;=9h4pJ2cg%%)Y)tVD_q=@7-5xr;#%^ zl!uF)_8*c;-(AyGkTtffJ`if|7(IFk#GhL}^9hMf@7wjXt z5Tc%kt>u48f3A>`ajxAsO4Yfz-kgC{Xls=$eWGB;j!?X6J%@E|69Sw{tQQh~FJKo} zJ(ET`?~=$69);hw^n>En2XL{|yqft4wWQF`1m8<$rKWT6EbYO_kA_zIRGZ9M*_wF@ z2I{M4QQ;EG23|^yvOV4|k0RQRMxy`JCanNkR29&poF)l!)&j_aDw8CVVfVJICt=!= zO>GhIJ{c<%L$&If&x)dPh2cZ&T*S_z?(7=Vlsh!(}c+5X0zW8z6~SWXr7{ zT-M;m>T7Kj&MlRC<7d2>lbtR=xY!K3H?6pjUOj+?F6I%kTFf{+jZNX6~=Iq4&X zZ(Jhd%!#VT9`&x@F@53!mDcVkFuskdvw8*u6>&*!e|Seeuda(FFmr3{Tjdbg!@%zP z3QDLCbrxaRMz4;DI3g!<{8pmGt`yd0I3?y`$jzEfSu}UQ784Jnkwl0$R_>I(cP31u2ws66Ed820~{PFL4i3UGy}x`*CKxbD+wp%8etS zRliyTod9)=in1BDAoVlxa`7M<)}S0Qm+@?idUEVgAcO}P5p6l91{~q-3f}nRWEjtw zNb|if)$tD?`)}DJ+G7RUVu{N+c)~Z)wCquNgO|{K9BTY!8MJmP5WPwvu;#Ez@%x$L z_2C7$?1jjg9(dH3Ht?d4TIqX}L2IkhBq=@#(TZ$C_hFH?+Ert#-?iG`1(ALq$S~O7 zkG?pxkLbI8wqtZ?e}VH6#CQ@|q@5OtF1V*Jbq2F10_L_{x6|5IeZ|5R8ZGI)9)7jh zxG|YXK5ScGn+j~nb07hev>9ttDz2FrQ>)%|OpIX4u|qUkjigpdIe}%V_28qbOVV#; zB~A6K{!3f?f?aZO{b@yqt#UZi=8IGQ6dW;%R5Z=dsw=Bz_kc)}iS z-^pvjkHHNJJ7jcU&@Pog^s@=&oL~i9K&O3$#?JRpQb`tmJfl?T>AnS}VNr0MMViXs z#JpoUin&GF$>rj577ELwA*0(PH5tJ;%2-&D88}p$H1M{y`HmaNoy;0ccugPub)u@a zEL*BbQj|x#8rQaqT_UdmpEE713l#sm4?4QyhXw{=A)TFrM+QMbG_oPx$fL@d9U~wJ zX2s4;>13#CZ4V^Yc{V08s@A%qmVA{@*T|U9D9|KS%WkZd?30iO@NhDGcZbWDS(hXn z98b_0HsGj)tR8{gLc9}r&_x!N>*0_`%1{??yi)L^3gl*Do!H%{1+ao%f8v`L#A9@qxnubyVW_2A#Oro?jCKm zByd8!AnE0c;=b;U)HaHeE830=!GXs`Au~LHhFj4$^p4sd=g#i*l;x;{GYYXNC?6(& zInG49GBO(y@(DD(*_s{5@X5H591_vO9{)~FHLb|KCm?iLIV)~0lGNI>R~s*H6;
      UpD57{@Q6a{?5?L;*Ou@aynZGU-=pAZe&`q4Qu& z7DXC3)DpEA?l{&3Fzb{Vc0b^X|40>lkqvm%1|sDc(D;xsVu zCU6+Y*3<6jWTkZDCB}in{f9>kD&BHMb>dj+~mnDLTER z-h}v3lZRgDiZ20hfz&{VK@A_Gjp8;OZk#XIS$w>OMq2?I`9|g9eH5b_&R!kY#ie1Q z)o{Y?UNR+9?FtX`bJQN)ocmT=R}I1z7vGpbQE<~K54-9&h6l+PsMP-P6j@aoqnHFv zj8AhUXn?p24@?fFk3p-&W^+{QQ7Ag*d|%G{ytR|5sAG$&TL=D`i9+wUf4=0$qS~8a zF-+2vEuK;lHg4~+fh%xv|GuztdzVH->{6kHG1BuF9Gw^2yyZKqdGYNcdsWc+>+Gep z50fGi9(blI{h2h3-R$kju<*bJVn6Ct@N=FsLl(%X>#{%h>v) zBv-w?S+t-xF@BfPyg-{49k0f$Ut=q+p+D#H5~G9bnrK6AtHklY2)i3w|B(TbQ058QvLUqXyCh(}%p$#VoC*2826C!cR=NHoP~Xxsl+$L)A97h*u;?^fp7PbB z>Z7&1>)A$AL))!6*T1vp4reCxPcE8EJ=GqxNk5bT)iCAcs%d=%YF|PSC~OW|Ncg#U zxnRbxBN#(N%jeUOSYGm$l{=U@45oxUv62n39>g@}Yu#wQ7-O5i5q+F<$voH;@WzcE zN{TG={sh#Y+%ZR&WX+WH;kt%>m?3y<_|5~_I5WkgM^NB}ZRI18g zTYk6q)TnmMbExbR$RlF)CWXRyN$)(~(jIEM^U+?e*Iwf~jfY&LXed-j45!-`K_F`( zjwG1iQJ1M^1K-BZ6O&t(20NwVjt>EKy(+>I{)n+E3~HA8Lx86IoEkjGJV` z$?=Y!8!5T6pHOpbsIpSd5r;mB-p!h~e60nZrrASImf6#({29+oP0hxr(r;QBlo>Q? zBrw|VnV-2{WXee^{6{3m*;%PMOYYZOE1t|df$Zl<^m;jt1J|?qH^2hZ8-I->f-ax0 z7QkoAwl=8bf7+XF(P@!CRaeG|NCy?>#+2G>>#q#N=Lj1=4Hb8FM>;#)tgQYq{zozO z@>O0*e7is$efcMrN~b$Jey)|mbN${73U>x=eGXzC+YW|Sw=LOm@m#=e$1?BzoFjLY z6>*1D!6WfF+yAL9sheQ@qp4Z1cV(q~xQTMh~4cEA19o3P)k{pQ4AmQ}4w0u36A zmGPSU0reHT^)!+Eek{Dl6kSjNEoTO!d;Xq3{=V`Bh~dN;PZize#;BstGeid$!6 zodgg}R-eW8@cm4|vg}FZ3svAH#4mWQ`Oeq4brz#$zL6&iNgyV+hZFaw`=E6dcb>Xy z-_bnQiJpHCtb|1b4IE88Y4r_})zCU$b#u+kw@Uj0{-4I2!HX~94wXY-`+7|4t>2?bn%UZ9Uxt`YTYRw-50M3hV^*IF-Zh?q- z?0pVJWCWXTVgrP6TTwb<@CVxY>rqzociWUwJO6k!5gpSY#c~wmZczxC+Y`#sgS=a8 zEyC>v6)i1@4oU%)8dl%L1s8km=ME1BFTFQi=HstnHLzLR10^E`AqoykipnEtRHr+Y zxGaVf*Jdd=u>*zVqmX?{x5Xi-?Pw7WjX8EAx5TA_Ve!W_-}X4l0V~dqML&nw3MK7G z%-IcA0jaMpeiqHJtz#yqhJFrARg2`EH_#ICp6fOJF#~Q$h{%R=#f$QLf1`}0>R_$u z-)xPX;q>lTXTIR(a?3`*fpycCWO&bG-`Q2#zzB;wffA0c(b@-s4o7rQp50sMg2wr8 zc@RDX^_+_eKkP7P3m4V4%&KVVcQ_hHB*Q9{16oNw7$ok^1wL}k=cw6a^OP0~kDQFn zc5$d2cU_^Y94@#T>gJ`;_z_k=*hQ|-{~S;m!gO+TGw7@28Q(&O`60_5)5-oEW~Bj$ zS>lnw|AV$X0SJ|B?$U`L_H!=ym5DKMV`U;HVRYT9(3M&d1Rt=XmU5v1A{fRD!fb=G za`ls8yKf>i;##p1?mhJyK6+t&!+=X#h3alAS?U4~VY;n-KPmx>Fx0ArD|b7TJcqoS zTFO4gOD56abNUYk7o~R|{-E9k&o{mSV#+;gvl%UV?h8bR)=R4`R;{VPHH}QSB%(Dv zZ5$)9iQ0M9+P5jKyV@14_9W^|hXHx`Dn$DQA)p=GrzAtXh@bvx{dJ44yRDDB@4xfT z=BMjt8YbLJirfLV7{&Xg?^=|jBI((y*q=TiGz0fkiZHFNh6QkE&gQ+6<}YQ3 zkZXwDK^wa?y(a&OdK^6UNQ13F39~gD9$I`fb^H5-$4vILf>G&IF5TfM+sw!>ZH1jHB=~haH*{;5f*T9crgHQ+^KivhF&XCCcZFS)6U;nhZsJVx&fW>ARRdC!hUrM6 zgGIqE?VY3a==tx31@7Y zyhV`0rs7QB>IV7Th=fy14x?hHe70_>8>UO5V1WmqWmkA0jZ;(oQ8Fw;71GF%p}bc! z>i2hj99}W=0%B%!BD45W$zVbD6`DBRx?|{0|4W8_-U`cU+$j-*#{BBRDRC$Oa-L*H`LmdBr ziVX@OY(;G3y3PESyf}7Z>;R9qef;iIEbn&_7wzQ^aa@Q5bIiIOS+efC%!Kk8+AX%n4d|d5IHg0W73Pa4#z+3+;(eNldPg1)Vs|6DXM7R`+wB-L3j%k zf#EjL|6obabQmlJ=7z6YOSyc7A7|!zWmVZTYqfpO7yLuxRwJI0aB<9CDo3dL8rerTUVxm6j^vD1rZ z$3B)RN9EhUY+YTA5)D-@g0mG3P3+ge|7n2K>Ikb-pE|*YvGU& z|0{UDJOChN!L;uR zr#%=BSziD{a6@k*nQ*{*zWD~ZS(&Nn#_W!eB#aG#g)6Lh_jR8TGEVtDp$dBbdu7K= z!tcSX>|^x&od@JCle6%C*sCtVzfEntUy3nmHdvls5d(Z@}a%S#Zd5HUC;Xed> z!40bp$<(>5l;SpX&?bJ+YoMx}wqkg`$gfscU6yju(~!7V37x9}a9-kA2M6jTp6`$gE1t ztx?rVONQmI>x^Z2f_idZ3uCA`nER~RYWi{jL$E2%Ta{OI%?~PfwociAZD4zpIy*Ka zi2c>d%`PyqWrhs4RT6#0p+AB9IGjfV!i3wz;2?LIu(Ran*v9b2?E|og3$zWqV zb=tNSciw##_9Y+nzi*`Tn)oxU&+F<@_o=uCr`Bgoza^F=rbdfQ3WUzCxMmG-_Q9ZLx36KoJE*?7+PgIvKudp9V&K*E9D4S4NT5A@pA3)mg2+H?LJ4)o|~aKaOatt6a19#^Tbg6Pqyr| zaXbc2pPV2~f_B&ZZe5BFzkk1AToS?ID!p#&iI82{-2;Yzk~F>CK_4Dmv#q|1uq6ot zoIuLBvA1%g?Iy*9ypl)+uVCRlT zRV$Vqhij)~Gjy4#?+?C^do9v;+G`sF-7mK^yfE>(YSwf=F)Zw>Bp?4_wETPHCCM3I zL}aImutt1Mmexq4zqn!DoX>-h&$KaxoUYC>6L`@^{oqBLBsTnIAG6Ww?;>L*X)5Jw zYD6A91J_ISH|M^K4i%0ucM;+dq<$LUP|ZZ{&P0UV(pdVIH)J>Xg^mE2tj#69Z-BI* z=nECHyZJ?%Hv3bPz6NG63lx$ zJ&@mecLMLtnN>Vhv;SnJV9@BlDO+t*XEN`-wplou5U=|LZIE~wJ!Z+2FSqY3FyJu1 zzCZRMp@!fbUJj4EUkkdlN|nFZx8f$E62w(l?wr;+C&@rpSpF(7q^Tbrb6iXSwzMd? z7Oxbh-j_ACP40RZS;zT!aj2UiUUVD-Uypiuo)RAO)W-QOaKAB;lTAWNkC|**@6JvY zR?ueI-2roXKbr7Xnz7^Q%OO$B5`DS1(<{CmnbG=??V{R9Lj10);`J&MGgXPZNIpll&NuEpgsznxHp6b%{-Z;e7Cjgn;Nj()EfAF> zHpA|eH>AHewG)h@VdNLHFpe5m2(>Zy{jChf<>h<@6Ikf>VxLw<6Uf`12kAu>Ja zt3XN+^XM9wg~ofbYAp28Hlm}JhT%R$@oqt-2rr(n(?+eS%q|mn`CG52hl6n9lc7Cx zn)dsImeX%!#D_{%vdOTw>kt=$>?g^;KaSGfCDgWn9@Vmwt1zt&hu{yML)BU016 zzk7<#$Wothcp5_B8~t4T1s4H1qjGN&6i^4N=E8em)aS06uK?eFK?G7=$qfOtBQ->7C!MXz?m-g~uhIad7eJ;gqQI8D2 z0xuPV?8l`NT$AM`nex38-;jCM@E)AQ%tXRBoCKGDv&5U{H#_BV!})1;U*pzeb@nWZ zU6x_VS7#=MUuUUm=|>jP4C|0EoTndynIvu2WL1uC16w+7Dtw{oXe?@Q&?hwXk>B^C zu%LnH`vp;wm+ZeiAnzB`*89r2&*?t<5qjB0&SHU#{5v1{Td4_R>auSfj9ndn z{L2lSx%l1R=S?BLjti-%>ODiIPKYHupi&=a`AEs%@W}fZfrr(-KcSBSc%V4 zvj4*d<=4ym%Kfss>-EzZV-LdWncH$C&kxpWSgF2VFM?z>Jk_tftI-7?XH+n->|WFj z;pK^~x(4ozO{{fMhIdJD__vy*Hy2z-}0t_3@AP^yL|t%qs!t} z-}Gp*cm8hp6cIsVUwc$(Cp6?eVD@=zU4xXQ3>d_6S(rd&N5^tPD6%lWj7PmQ%s|4q zTQcs)@H2iM8M^12zngbz9B&Byo5mRGU0~5${mBAlMxmD!Ad)9-K(8Gh|i4Q@3Y+?d^4due1=`xl#$UMk$>4$QRCS1(YgN2$B@ z^*6ruJN4n@w!s-~fBgOyfT-9<4wm-j@4#A%ea9M@osJ+`?eC@JZz%!9L`(aDSRVYR zLy##0)b$VP#Uv4HWzr)R`7wOVm1-hOKxX9Y{-`muCr7mZSc^I2E}9)E;?SC}}T~wD9E9QOLJ_`*o7Y+DE)gCNl)&hM%EgkH*(U#8N zZGWP2Vw8bWV|(^-*ip;!97XgypB#_R%fth`dMB$yiWo1RKW^$*2KNsn|7{RxW(0_a zDna-E#C`HzQd|JfC7HsCQ$+6)cz57 ztPlJ%7k*R+o7I!w`2Sw!{42kqdW)3kqc z*uNS4i=ZsXfqkoo?ZW>hu|GZVFYx;x_)iqn|GRM~G)p&Jhnsj@ zKG2~*_oO$WJzbj@G6?NEB5LeE=jjH8v>NclF`&Qkr8gBSoJ;tYI5tSlSow)XfJ9l> zr}E@iz<$$X3{+_FKzm zIAF^)pO__fR*_kF{e9)@`jfD5(7psd`rHG^cC;GlLSP9U5n5Ri$$+H5o|rEG#ue!1 z?KksT3-2+aLIm*E;m{%x)g%Ig3I+aQ?8*qbN<|CpZc03*x9h`Eogm1J_%td~>a94u zho-fM>am4v?&3;)mzCru@BC{jbvLD637cA%SXaE}YJipS#MT7~PLVqR ztLaXZp;9Td3LgBXaF<)__AY6z2IC-uWM%u#3uCg-Etk!oxxCgdam@Oe-^RX140U++ z1P?&;eJJgB>TF*V^ih`9TwP@JMtYU!hgnd`9im6f`Ck9*|qid)ek|4cBqQ$v+r`NxH7cOqU z7FJFYw6WeHOvJ3e@fumU`TdMvuqZb&Y;E7ja@~r4iF1Vo$ArO~`xB7qr${PZwOTY8zfTlJ>#TC@lbp8~MHaw1)!4&jfP zf)4gq!{Jve&LouK99_SD8uLC>W(w!5=^)AZkaZoTsC7~dms;djHaz1d9c$5>;t zXT@5M-6lU&P5m?xr+6tJRl2w9kx}S!JVkyNj$!V(YgqadYKwctYTY zd()w{6Gh|kKcU<;0=@^MRT(BUaKm7*GQe4oYQDtr0Cx?R#{|zlVgkP6fJGW)dB60 zbzi!EY2Dq<8vDgCg%HacpOl0$!%pMCE;@YKw4q)&{u4|QEFjhWE|6k3?Q!YZPSl0bTX3VG$`_BcR3crpmAA>p=+;N%D!uSbQbz z=0e>jPsbMuF_f*1gbuvvtMT6Yc3;=U)^~T-Z;!q{FH>=*hJB%{hhVAnLF4tRi^4W? zZ5rG;5^~jFf}wNE{WRF6A2dPTyX~=q$_4Px2x#fd7i8B$SgHKMB zJXvO4{gVQA;!~q8at0&YIeYV?6yL=ylYA?(2d`&1HRn--_sh^bVV~?hy10{JoVA8w zXfUT`oL0{}lgnhBbSBOE?rsM@VMj%xqJm74LuYRl%Vvh@noWc-A`|Dd!PsYWtGVg+n zk9Iwvv7GWm;~EJcISsGr_4f`QmEx7PWEo!O@QK;zaYt5m(V3rmu5(>+5sBq#Wn}yI ziUx6x5Zm=o^vWy}Ilfk)?6@Y|;?%;;whJ0lk}x#u46|7Z3fn7YNuW)G7l>waVC3Nj z%i(pZQq~z1WSlnpm?3kpxM0)VY(Jgaz)#YB4qVFf$TN@?;VjV(p^qHtu2Gnjt@Y~l zMKByY<-1=lsGfe!IL*3v4mD6*jq)`3!9YNTe*s>AEvni$N&%cFS z`?Nj)8ZeeajvLwTJsB@m8DywKg?%v)S>N1GG2rLIhwmB{lh5YS?0hmT=$^}_*~tyd zJNVpRo7n7vaT8ZSKxrg?b4Oho&)8};r~zg?@g&BKhI!%C?WPZaljKcqn~UKLV2#Ii;jIM8l|% zT)djpXV)JSejnt!9k=Xe1j9{)d6a?~=$6ap(y~n%jqjOIeo&=|Yi#t~UenpEUSx|Da|VpbSW7w^-TU z>nq=$Y;6WRDe#<}y{QQLrT7%weO|abwC9FE)_IQE>Dv-U5_1JqOwSiv^mPM^brp3% zebx;z^Ub2m5)QLTuU{qSD6*>TsO4QCxcyyEf*TBGC*7goc_Hjp2VP{(e+0*uu&qlb z8_EeoDSd{uvda?dXKY~+p!S7nGR9m>bEt<@reIA&pWI@ybBRK?>Pqc z-Wz$|j@YCvJ;RKOE3<4|4!EebX%(vJ;ymO2`ZK&Ej=FH%QPg-2VoC5ZhEXdjmkgA>VR9fECZzqLRm%L&hQl4^?r=%* zTG3jk?Jk~kFIws*7j?QFd4KYh0Ci=DPm>CvIOE!9Mw<}XL|QQq)QW7z9H)jsPI0`0 zp=otio24Ehr_VZzxV?3@lLjiHB{IqgXP7b4(SUct@~qW zCA(yFUB%aHCVQ-$v;B$hci9^OgAOwlUtAb>sD1OVL`9L|3Jf$1*sP29-2UbBeebUS z4Aw}m6Qst`xT>+MJ7PQK_s4vDNOutawlpI1(4H!UY2wQ3J@&neo^30xEkzylUx}vc z2TYZRtF5!z#nSaim!VPKb{;7E0Ts7Aw_KJzdkw-6-O)A z>Q!G-VTlqEgdBkM-&!$7$=!5pzgYDj*A z{eqqx4;%T;(1EuOl>6vct&vZc5-9B#s?wXn3+4m!yUQPZhY&QojDvCLFJbGA@4_hj z@d86>muJj62gh2RnuEsqxLkzo%1u2c5?ptgM%Nfin`hTQ^xV=noPQ^P?zgGr;Es!r zLeLSGO`nc)?fELn;2>qb)cl?`L45txoZ(Q0kA9Ofjw$W)O+sG>S%YdGaKHi=&b6(S z+0ksNpmy}~ig?V1g=IfZ?W0zBSlPo>69Eo1;%r{_{?ll6AY zo*TA8yby(+^u0YAh1s79^PJM@&$u>G8;@>Zuc;RQu*a~rt>9UPpIwTfLU3qorjvoR z7d@_YaG85ZewcN8(2AxcsNS0{&vyzN(|pJNp0Z3A_;SdO9}(A8lh^HZJ(prgQwjAW zO5OR`#qX$Qau85m)_wvv`sRA8AOaZ9y2Fj(xb3L|+fnr_RPhS}AF{bAhd0Y3>+YK2 z>}BAgb5W;o%ON#vTm7QBbNj|C5))I|9o1oyYQ0v@6|TlsFBA)diKEBv%gx_-f9iUT z)zB-&$QxC*E|+016>Ve5&R+ojtMMmE_kT(^JkZ=cQ0@wkg!E>uILcfd^~`w0Y<`zk zuI`Jh*=$G6TpYHB`CH8PqVQ~`Ud|M5lD3YXzjUS%+%bC8GH8T(i&lYtK&M zo%DxP>DgIbi<&Zv9o-E#O{NP?Un9jYz3q|6uti)Ayb}Gar>$rdCjUZ4+#FwfAl2fz z0*Bzk89QP*J+C5zU0S?_T(GkJHl1t%Vl%3&U_LK3A`_g<0MnJ$iZ5i&4mvpxN+p=~MWsPppQkAP)n*xJ7 zE(;Q!d)XRoTS)CR{mrqvm*v}1LnMc8QfEiibnBb8C<=e(N~|c8N~q+&VJkU=Lp?^h z3p`2evHW;P`GH6CXGqg|RTA+jC%tbrCdngMTWgpx*rM}20gRXVwCR18ln3Wll&Cva zp`FIw=o0h#d9>@>2aoJeUl#Fj0&C{?+_kFyAn{l#D??#R{F4|3(LHAjh;;FVq?HWc-B8! zZ4!4arAS=teM~;I(lL^>iqCKhKLuf3&DToM8+WFy~pKYE#tp%m1w>R^4=A!Qc|9Z;r9>>qut2C+zn7$`{F-uI^Wj@>Vnl-1L z{(acpi=)4gwVqP{o!ULM%rQ0eT1?#_5B4r5QH*-3f|NeavTnrH zSZYi!dK)`jK@XQ0&mHMAYXbTm-=2pFpLvF^rE>{H{kSjDoNbk>lk>8P@w>cTW$_Hl zzUs9ygB~iK?rsn1KR&nAIP6e!dSZ=3JRf4LL*?xuo~OF5zF}v$>99>oG#y24(Nsav z)EDC<_f1uAwMcRpx&KjLFf)o$0U53Ig*#kaJ2(rJ=-d0wsX|$23;yy8_byMtv3O5>h0hyt_BjQ=9vxioi9rv12)67jyUonH^ zJ&w5$^HGj<+Gg1*zm&cOViQTbtNG2gcYruU3hqy$>&2?=ZNIj&(43 zcj!NEx-;cAJK48p@|+vYz8H_)=&y+}s;ti7?liaEw7~zApU{KB?I%rE= z@!x$}wP?7MhcQN#8FVw=NLSUpwM48J>6oZ~Dz_MXir(PfC(g^Fn0tnq=c^gTSUG?< zpd#Sy?o7|>$tbSSeb=Bf8@*%(J^P2w?H6@0qA!#@oG?HZq-N~a_B2jY5tz*9+);?L?Xcp124sO#ME-FlPF<;|M< z`Oc#Fz=aa-tZqZ4(dHl6jD;_?OPqQiZ|f^staY1N4K@=Oh^`TWwNEcp$9ml{PoVt4 zdoQV{Zo0=%Yj#}MQ8*GuUu2KHqL+P_7u1)Y6E2>mpc?bZpy*r$s{LiutoIKN$2M5X zj_05@@x+o(r+Ww?f08SL*kj4Mu579QXOE@tc{pLa&spo7_zyR4bp9|LuC~ne`g~h& z%;9s3*DAvHZBW(j-i~@!!hlar+nwkrGky_IKFS{?7Am|w0)>5-b7~`zdwd;smS!FQ z4`tsS)?~Krttcv@q97{jLjEk}24ioR+~FD0QssewVeKrP_u5a>vvxYpV9RZuzxX>&O0L%QO+-d;tk|;~WKE`{dTA!ie z_%4bp=BUjSC_S`z%rZK5Y6JZ^8Ld)B$7*+zH3_f@K1(sq*YT zpIWzN+(T*9KeH_C-_)*WWA_0`-&Z-^PKOi;0>HQWYmn}xs;GCpLGyA6Bhov9d`3kB435_dt-E zUSl#*c|d^%`e;PvY9G8_tl|^Ngnym_M=G)Fa9&#KP(b71D4b>2F z)2{8GUvW6+kXU3g*1h!#EI$kR(Y=-n$Jxvh{BA7Q*HKRAMr{>ejO|Pr+-5orIQ0yV zIc;!$?TfK#!PxorYzvz9YMH4NstfDOrvS(L`SUQs1vRdjgBZSsnExIt5I)`4`Sz$R z6j=uVe>OU|xy3qs=QzTy<;547bn1|)JHGdTo<7_vLa#i|+AnwRXOu%7G&JUE-h~9D z2XG#R)+(o>6GjgS{&Jn#6SNUI5dbiV@k(!e>H~bwMWqg_D`*20ziY+em$MB5cbE^u zmg^z)y>SSK7-G11^QDy(&?(y^o1$S~G?jgAg(2{eJ}@wSJJ2lqJ11T%+c{OGQAIxb zxE;QJkfgWVTOMj`E9 z>duquU9tAej49Dv`hCqD^TJ5o6!H$s3@?1=dS%VUGM;PIJ4h0^gSU5}EB(ye>b-iN z5Yfha9LditpEPcfjD6Ok8=06<{n0ls>9oB3$J(-8Ore^3PyT{kKpY+SyH%WgrN*TC zCmI)|$79j>IS3Q%Xa|8R0!m|M3XF`=(s-(bJ`$)@R)(S*PloJ@KDQ}Z$P4_FYAKp` zRhb4he!b!PEDue@#RgflVb3AgYU;@b_<_^)`vY5@mHOtooC4K@NQ0spat`p;s>O45 z-33!;j<4)-)ruQ5t^TGm(hE!kr^m4%-pz66UP4Ip?AY?Kq?wtu)Q17?$gz*Z_rNt` zV{U+Vv8wKt7yik!T{-6`KnNxm&k0s(=L#X*y{#?oWZHUYE3Vy|Pt^%!c zB?my$*^4yTMQ?1ONV=Q8RD20rCH*D|lixohuKdt-hXIE@5Ubm%jw}d_JwSR2njUfp{%J*|pk@M~7ndnwyE$ADWR2JE9kC5v~59it=78rI4k;0302FE?Qsee*PIU#U-eLA7`>#v2ziaRw$>om1MA0ryF+t zze^Y4)4Qca9^76}xiPLWQAOGBo*AD$H|RpZVzZuaBo;s8E}B{GKW9Cx?}Ook^QJnO zGLMaG=I0LJze5LR#EUnK2~sxa^phML3kNPbH@21QeO&59HoeW3u5!62@i-r+TUT_2 zBDI@SON!neo^ITNdPGvPq#0~%)|W^(p^qL@a7qmGbJ79kw$)`e(~LgixRY_akJ}GK z)!13Ms|>z*^;RwUpD8pS+f`hG*Q6UV)O{yL_>I?#wx0)htP)IKZ%cF&8*biy|J-pi5pO&SB` zYI*TijZJjVd{Y~=TUfJ78Xv|AIHVr5@W#gQTDZDTiQlpGh+5v(Wco zQuEI@c;YfMPvk*QOa7vCF!WD*TRWWKO9bsj6q z|7M|H*j-7{$2Ixa^3cR|1qnBq9EaYc8N7-h|0ze)LdF zoXfr6^_rhXA5>^3o}7AH3} z#k-oE7kFsvJ;d~iv3ib_q5&m47SH?U6$|QtO?w&C?KUt)C!%}T1!l?Wu=pQvWiQ4 zA5Vu9$%E+7My&yC5LfG)?tH(s*EuTl=|aK@1nQg>sO zXC7sm$?3rK%{mhOLL?D0E9Ww;NsZR(6; zE4oVcr>gs_gNpS}ha${TcuK&(^7>ZFD{FF(KdCR|4rPVoq%&q{DeDzM{6?-*2i|tE ziUgh|UW{!WHCh>zygR<537ON1uQ9i!Rc6nKquRhX3k-V0i&+!p#Sl^fJ{466EGuU| z6KwOF&+8`;1TlVe(71?EbZF<-1*(r z8RQ%8xOaZXi?Qpp#4Yl?2HY~J;NJqzr`cjOc2Ik+U@}vLL)v#xA1T$E0vT+hab2vlMj; zMl0(QeA?gLuB-d@bq_>!BbokFfyXb3dUXoU{R25#C9)-JGf#Deoo|O@u+oQ#I4wH8llSUR&97PB44Z*Ka7(Ah~%3 zth*{>GAKUFrBR9qdVjQ-EBVYd&q+y%%63C}*}c@$!wkmEIMH8APp7{PUh-`G`XnvD z)_C&{)Qa@Q8zxO`7t>?0A#8xrxWeB4xaC2UOQuj`udd7Wl}2l9UB8t@{u{;*^fqrs zR9y?qY5vW{PEqlK+MYA0pS^|_eY7mDQkV;38~4(NNjc;e1PyiRjAK(pVPW-hUC`) zr^IjfXG_FUSCk#xGL;_n&s^YL;4t^gMkC*BTA7G-Gkpo#xa@fUcCu4o#$@+5;eVs~ z(~q2QsE0lpG}*ic6p*VZ9Y(GqG(BX^Pprh}9!|#oX1ir$i9h4eh`>B&98682W2)-x zXetF9dHFYLUy73zEtR2_DDt*^4REPA)l9@T-o@W?Xzk|ny)8;{@#d;)@S?kE`JU>A zlz~7N7yLaez!W!8C!SFqRFdiuIlf-+W&hf7@K}_<2#GD3FCl>3^aX|uVV}JK#i*s` zYBq>feyaXRP(qa3IMb9#Cd?nEQr=o7d)0|!Kxq4x-&*|xL%gyQjgRrtv}-NWEg`Ky zJfUxQ+zI|vJd0zTDPw_MO;r8NMOoe}gP|iA%f@RR0)VjUa5&yjK=ul?l7ZLSZIKDd z7TBX*s;yarhyi9=A>g!On$$EGQ+V`d-?!*Bn3W`dT|chZ@-+Au4w1k@JU@RV#3&Z4 z^b5{kv(jWH08h^FfhFVbtE2$M#;VyoNqQ3V%q2cg@~6O0RE`}HiPz3v=c|_Z0e>jj zHdK^|39g|zs|JPzk5;4~aXEm^TMruJkB|8`Le(eMt)O~bCDbhLUS$W8!syeb_myE| ziGvSz(;MRcaHX$g>xgf(0?IA;M?412`g<_xt7QKQG)}2_D~4;@wK@j7PTQ#7{`Qr4 z9-?qL051I5ZdM|eUton`p+Xd2VBVNruM!_bF)Uo~$-FKFVjv_KDmP@l_@99=#PojK zsKs&r|W&SI{b zE?`R65ZWLkc>tzT)AhENuBWnzw}9>$Q&{t|2m=Nl8ii<20D1>2wt3Z|S;qu+o-WVL zK|z!RrO-i5k2sT6T!BkSWITXnnJgx8#MQP{<*x^XhUINH^s)Mwncnv?-+(Z3q*0Nk zPQ{6pPBZ7KY#ArIYK#F-ePhgH?;-=~nRnJ^E!lH~YQ!r>rbd!bE<|DT+g@AuR)<>O zq(*^|Kn~uu;iwz+=4%tOCa8?T@yZ#gi91U9+(;T%3EKDWct_w59>~$Tz^DIv%Jn~P zde7tqA|G++Q(qo|Vb`%y_nDGM99l$k)UZz*X0(5p(JrtOx(hk-Neh^eSm@ZcUx@4` z01%y3ONSz@fL3#tI%qyGD`Km)tir(hbCsTq4PV%0SDMKj{-tXB$dM8w2!PJ#T{0~% z-gE4#&<^ha)jL7FtI=k^pXb=+I8@xL1J1!Lu>-6s_o!g;VCjyXBbl`Yb-%AqS`BVA zkqf~SMQRJ>TMEcKldPTQI-L6kNfQ+H{&dTxQ5|x0(7;r48;MN4deY7jxQ9&&Y+8&K z0%<#!NsRR^=Azt7k5UMDbx``rH!<5@4G*q%zgUf+;`7xiv3os&Y949N!6e~y6>yWF zCDjVaQz(&`iyWSq7X_Si8~-x-p0eRaouSMlHIPi(B+n9@Yw4(^8{D`I|KN|@VL-!g zZ*>Qm1ZI^>yB1b=@#9mY92dseoeEM@i&!wVBVKJ;-0=*iPActQWA?0ZX~lrY+R8i6 zIflrG7S6KGgMK_P-xLF|7f?ECRMA%Zvofun}5(`n-(6PxJG0s-f10eD!Gilv>0@D^0?Q zamHXch4i>o#ZvixlsT#N7h?nccgC0q0ZD4m+d(YPH_m|MGxR{;HHwmO=fdb7m@|Us z?fi}XZ!-_BUo}*@sb`1$P<_J(PEnE(EPxlCxuKBPGQ`YG%-!e^t1q(|SJ2j4E6r#g zbbk`bpBAY|U|nZbKme#Hhys78cwx`^deU|65?_g>mvJd^<t$RiSF%!a*H%{CdcFzaJFDH2tuZuVY6bquayh zpa-+OkL#iZH|?Z1xAyJn#ehZzX1+Wp9$jsqrWqTO@whp<_%xZ-k?i-$%u+j7elD9@ zROyW#dF%%x=%YUHhv>gKt^Ztk;vFJDyen@FE*dH5Nsr+_zAf^u-K?tQJW4q@-3m2= z=-Ag5HmPOUQg;ow!z*tw#xysNXLuCp_tpN0XyrNvtc*Qz7Xt%hTN~+y9rr(1$nAYP zvo)2l3YhG5G5nCzq?$Sf>RvCrm{!(0M`FkUiaao|t&ggaMSN`8Ef%a;d+DIVwy!pEzu)v!sUB4p0U~A`wkvrJs7s5o%BU4;e!rI>bIE$3yuZz_ zwT+KcfBkf4YYi7I;`S4#6ZIC}K3mF&Fmbb$1-rug``%9FO)Tm2T()}LQWu)FMYcws z7FX^WHqb(|D`k^zT;l7uyj)a3o+?4bPWt4%`$XD4G!oVX|rVI|AhI~*( zRmop)jzp{0yXw#A+kKw(JpnvZdjGW_g)ipL^;@HJ&bY+bOq16A&#+T4_j8kS!U@}J z{WL9T&&gZD>jPrk8U{B;_PyR-`qbpW(u4kLen$x)$6VXFq>g>7kH8mHw~q}fSRV#J z3B%LyDst398ot1293g~2AODCu=0CjpL2>c!N?^rFhIu*OG&pdvoM@NIC}2}*)wR=t zn~e@2mAS4rU=1+=*#^H&UAuOeKRwYlpd;rfX18DoR#wZz|43lmMp&^S)Q4w{@sQyl==|9wtEKsQ6i1oj4To>L$qTmhW;L^H@y7)`>s^1_q<b}`lVJuxFa zJj$(<0ffqY(Ck57*7Ci zfa_}Nx^~(^R9~prctC?9Xo!jDnDJ3Tl#Xqgf0T+eP$QUIO-EhdOT{5o?>(x5g7eCS5Y-k$T;o!lT59V6N&SHo*% zTyV{c;l%D|YUlalri`I_kpg4e>J12rfE!bB)jJbYTTL;V)FLzfF89hQC#3i3+Brcw zSq%QvAF|ex{TOy53~}iBiuLKFRws=!-bYzb^P$~zksEhFgFGYZu{{M}ZcSAc%#}Fh zsXRrz>@^s08a2x?)61!zyq07%95r67au6*RomT-!;0`Pf!4#SFN;LJGgFk} zbGaSurHgB7^4y9?0g7t(9nH0Myo>eaMYunaZZPhBZhEETXimX#K>O__WOKqD%(7XV z!_A)BsO&&`kfI$lAM?28^2p%M!WFn_^yex!mJNf;wb8ZBUagDjpIvQ!*b3eC9wqZX z&G1PG>*^2pbBW=Z*emic2hi|34rg)U_r+|80=T4($*~}f(9Xd98sy$a^#_k-(SHz_ zn|J+xf`;JN(U(;|_CyITcEZ|Qz){|N*r3dU(n~|2?Uc)aFe_`~H z*^C5EGH7@OoYmQJ23mhsp0jVfS0aJ#vtHnW=k9&0Cvra(InjDyT<0Ffjkc3k5RV&d z`2e>$MUx>8KWCjJctj3-Rcu%$CA-}-PMNAblswqKmx8o1(GnQD8Vc>R2c1m_V>f|s z7AG9R_5n+SkEstYY&2TDc2YE79gmamt(T`$lNsZ(S?lw;QvgC(^A{MtK0we;FE6H@ z(9~3~fR-=MT1q$V@l%dx9%sEO^)L28_gI&2j?@!nfB^ejllz&$uSnF(M%E2hax09v zyVs*V_=20ouO~(B&pT9LYDH9lUf$&oPUszqv!*z*z3M>13<47~I)gJc3;WXn)lP_* zw*Rh(ginJ5Z>1P)c!xUnw}7SJ8CE54r*}vUK3$%5jbJ*mW;ZSK=LZtAQ|S1g+MaJz zl~-i?V0=E?bIHY*iSZC5b-#>6g`;3*p-l>6HPt;ZzNM?7V7&noZd}F(QTupK=-JE) zb19&4rVsoMV&fIaxv{Wtd49KYV;l1qx*;C+r!SBB3S4c^G^H>D?O{l)`6cF3hzH7{ zOkICt_CqyV5DqSH=VjEsL%1ZO%a<7s15v4;v z=4avnXO11xRP`PlFYw!tnPBRFhU|YU7xnlAUv|CP6C#4N0T&b#nhlz=u0>sZLUk%$ z$Y8{w5^V&H*ParE2#(eAz$bMet=Pja26EeXbbmh!pkj?bX@x1t=Cw*I2@mwe9Iert z{=j!Sk?&%>dn?%?7n57<&71Xo*qh460IU<-PDvJHjL8(H{DGk=sfUqzn^94Q{6f0% zu3OQ!u)EZA&4_}miPJ*#czmj1Cd>iGq37pv7?7ae;o9XnfWC#DPv4HQZKq13QnVgf zl-+F9%^BoavH`!izUuIDMXU}Kpd%->dAfUcg8(7h!;urV8Yt2DI3@$(_zHTDe|BNN z>6H@^%c~mHiQQ&LU89BzA?a)rJx|df@afU5l2ikYZa&=(nnRlp6^NA=;AO$F!A;5` z@fn%4IOv9R@%Yw^lmAw$p)l&HIgn`xyI2AgU}?e;iScho1Xk*-0-_G3%*BEdD|v7oBGl}MH;_m0!lkgAd_V3jA7*FMUfQaV=JsPzb8Xj^J*bpI4yzvX^2L6C#mFRK3jE5Ab39v%V@uwGIr*Z(p5`~U17+9A3K$DNS#{=u7HSn_`R zMS{rw-x=kL2VPRWze60GwG8gFbWC;hQ3TA*7uE(F|L>&uD}#6o32Os<#aMNlIxm(! zg*4?AE651iArB%pU8$xf?j_G%&fV)Oh$x*FJ0j9I7wzRxjH`{BIax<$UxI(8?yI8x zb?RIbBv3TNq$;7~NqNRb@<>4=K<*7?>H6}`BFCsc(w#TRL)TD%cCp7#Ka;>s@Mf*DX`L+ERWyAaB9C7-|~yR-Zd{DK+lMHH-Up&S^#@>ymyYg7K@ zPU%ZMfqrHm6C`ACf%ct<@Drz8H;O-g@t;MlM}xqD@N#$~4LknRo723K+t(D4J1QM} zco8|+2fj6n?l22tMh5X=CWefSgk8$**zUT`IEOF%asayfv%jvV!7Wtx4GJK(gz{pW zM>)@$Tx&fmqdAcGQJTf^A%Y%vp!j;2sxR*{=J7wj z#~(YvnZM|{3_qq%$THbfG5YMjyAlz&7W^uu(!j^~KIdLEM9%$iG5$8*1a=^6^QmS# zw&V0yWs2~etofB|*cVd(#Q2^gX1O$s@HH-*bD1%ST^ zv+OCUD1|OoW}y@@RALM2I9_&|Bh*|YN#1<@6_fNouN|E8C_cTIM(!%1+qb7s3$pZ9 zWc0~$g^DK^JjmgRbOM%Q&VBh&y$)6MyF1N!&o~O9*)w?Ydj0u0Yp#7g2?R5%x@O3C zZZ;nBsY)L95}&8b8K#<326qM=%4&y+5|Rp+rg(4cVU$9SUS}BxuojXUht&1!xNZSlS!Uou{%Mz$>WHWm4rd=h&~Ofu+j|eS=_3LhE`!|IUD;zkmMuRRRauC256MGS@@N^arau* zJ+bMnD>cw^ZR>CMm{QqoBMApn>%T#cqh7~)kdt)B3aEGl?GRytV=liX8`_p(;qARKY%O0~5;{~}3AMN7KADTl zMM8+@(5Vk<9#-(WC&(t5%gbv*h>yu6F>0ks(pkO>UxL0C+ab=)(9mrZH%~8*T_-#) z#!@)PGewi^c=}FKuVv6PONI2wDbgBc!1Bmv#40j}zvME7Q}gDbGpu(0DhBxdVa26^ zQeeZIU-y-b1mXGwi+XRpe4(s&2ImlMcWRx1OKqt8 zOlnds7#TDQ?|bjSD(^YL1NUWG?B7)OYqDw_^t|rvg?JbQ_jsYxbE>;N4f~{GpfLb& zPiZ|@uYGJ(fd#+rNcj+ecWW!asEl(2a~yC>@kP0}$sX2v`VRj8!2LnuFei7`ckd&> z3(&~*Mu$6!nSx)K-bj}eLyGQWP+t()26s$X-BWH&d4Du~w%Om2#Dltb-8PxZ$m)p@ zr8kO}O}(w;ynydqr@${a;gMspK7=dJBvRdotBzXw`I9T0UtWT*sK3NGVuW-%HwQRt8{N!P z6Rj|G0WSlTYduOQgvC*iXOKvbG$Z7uK$7(2}A*$sr z#Y=rxpJ0Ry6z-<@<88|sO+|7zJ!&<$CMe%%xs&>v5T!JFf7Hs`G7B$Oe{&fHyxrqLB{rj`k zL+Q{@pY#$$M3~J#WDdmU{(1y(U;C$41h{JGV1yL&VUxv$LO!pJuj=Lry<;;U#0aS9 zQejK2Nd0{&kJ)0Yc|z=;MzAb5l~&hw4UU&}2~#*#`!^$}vL(;xKk=%B{(81>?b`Xn zk0~|_{rpuldiFUg(tdJLHG1H1nW05hLdQvCPv=z&o74jmpsUQcdybty+M>bCnG<=T z`Fz#=Nl~(LW-_c|xi$>rolQQw5l*E3Ld`@mHT5k}IOgtnbya$6Ufbcg9?DNG$I{T@ zQQfcP{X>I9LP@4tLPb0L7f%BHuMwIg3~v(RB>p9z?v0#>UD{;mo+9mP`6acl1B+aU zbK}eG_4lOe0v1XMo(RmzgO1~Nkb8Wv090~v zp%dra53maBdefsLS-_3v4=^3seR zPm~RcapY%|k9G5x8R-eAF0UKeif+b5f2Ps%$P5vy_X^0}+w-^mwh7CX(n@^1dQ`~A z-H3K*e@7+W?&fI52@uEyok6n|a>peJRu|qV(0nMEX7D|+qj=<6qPw;5UuA{&=%Q-R zdc$X$?rw`VX?FSfxWEn*Gpr|86&trMwwICGxc7C~e$mHkr}m=2iF>`dIwuVqQ`N0Kxzo@*NZ2Uuu*7a zk2NPD=L+t>n7)?RA4}~SFjjBpRTX|=FO3XOr~6UWt74G*M-++WT&w8A!S*`|G5FG< zcXXdWH1bh&HrdrFO8e$1w)mH}a-}SS0|rFuDE+?s}3*SjV{Pq05S z@cMhGA@FoQ`&hfA`EQBU-6$e-)YBd;O%$wtMM=qJ9uE$+Y!{nhpl+z=p_eu+z?FTR z-TbULjt$vCj_b4XE3H+mL|5`CDu-p(Fn1YeAkNW!2R@gV^&xmb z=Sz+nfO@-)LBLp5MLG-2G{itcut#^|_E?_@V+ZL0kEKu{fZYjOD{fefIV!FU#P7Zs zmM?IaI*xSN^_z|p7#nCid)%Hb$;=r|7|t9hk7zN!ldLJ8Ma^W&?^t6jU79>d-i_V; zoL%$KXMrzMtp3aN!tCz2Ii6l`3EXb71Gx+i2h_%*=Q9hhjmXSrWRHE8&if=v!6{%- z&xL2&+uklr7n;LN;^%T59peJj#Mr?@oAZgk^}n_0=x0%LP5 zdJV{QY8Srm64e#2^|@HO4yXkX|@K7u2D_}I`&5dcC}cj z<|%UJL&)CpdU=WeNxhyNehk^vRTIB;=Mx;(hTbE~ZE7Pjxgc`;*oz#Iv9YvSt5 zU39A0FwNj>-GYWl`IHNo<)rVzV>v*^gWbD-*>g5!O0kNWUcgLj`-EsKlRB_!2Ok|dw z<+=C$d+}Q|-n33-BHJMYuhD-&TJGK>dSc|sdiy@7uxL4pEuX=~($T>eV!39S-Q^*S zBFD`4&BtZO!kWr27_Wr|xHubd<4k3T-}EhZmks4JRWu0P2N}MPHb^12qps6mAcN=o z)bvtNhxVyLSUSDOOwaoPt!#MuQ`sG}npp4KK!=>|!SGF|;th<0l14Xvpu`3X?;qs2 z8|y$>8*c*CPb(c`cld2=_usWE?ENc}xO~R~@releu}{NWN$@Rz3rMlBly#|X9LBL$ zsh{e@;;b_JqD!jTy3`6iQ|7hT?gdAlhR`m5S)qgFc~z8>3X5W>Ieq*$qz&$glKmOb z2>!%0l#$PSdC<^<<;rrsEY4jQE43q?HAlMhWI5mKyqG$qSaOTEKE0U13Op3$bZwzOs}@;KtQJmwe<+e7XIp2 zDRjj-y35rdvOdCH6Ftm|eo5>V&=)hbX^ifK+(r^zPFZZ+rUqG_rA=TPTN_-ERzLPu zYCbyPcwkt_GJ!z%5lQiAGjmnF5%R6my5L>0F-f*QoIc}Kr1`hhL0&hz6PavCOc^_3 z&LDbU`vgEYvts{av#M)=?hucD`sPEneuvCljjmCR+|*ZMUGm{|K(vt$-12&;*H|n^ z@vV05Pt>m0ji8(JAv-Fv_`6rj>bK=TMR+Lu3C#$5|FM$h7P`bu+thoo&8bG^bCZMd zQs(x7v3MWztpRascat3MH-69cQ#VR%oK)e~+*<*Apo=#NhdB3o$hOzpzo}5#TmsuZ zDOOe}Rx!a%<)3Fu0KZZ)_0YkrIb8Kp)3hh?9yYv0#x*>Yf_>?TaR_XrR?JhqV526E zkf!U_xv{Zzx4<{0=)H|J!SoO&+7mhVBR7ZhS$*uzt~>Wq2rZchhM!i1ZxP_G#)vZ2)zTNRhSJw=zT+v!RDRsrJ~pVUE&CVNAU6Ol#*b!IA{AFx~8{Fv|P<} zpI~Bd#`dh2sJ`M*Cqd-8t=GU%0pDjO(a^Zh}qkTX}25 zWV&8OwHCIhm0&T(EwuAVHi1=x9)dt)c2YDaJ#NkSLEy%`^?+PJSiC#;!` z)NFT9$)ence}%yBDiNWtX24&hBI)!muL}uMZWj*eE@JF;tCbDM!AKdI?ejWMWVqml z6d(X@DE%;RGoi8X>0pC*>s6I>O>^H!iObQTx180Ut|o!Wl?Eh9(-UB;Ak4 z=%KtHo^PKzrESiA+uVO!akSjIAqlgOEbqi*WN(gPQ>6O3nyhha^o>V0?JZ|E3fs+m zH%m&rk%Oe2K92%zPQ3@-0=Gqusi*GP-*i5lE}X?UxD#iPU!OGH1GcNCqQJF0j*oMi z-k5Y`CXj2|SAHGb~{DSCh+jHutq7hT}S< zPG^@0b-;g1tX|2Uk*4Ic$@pmm-ocooRCL=-a;OH@6J&flB4gt+I&Gn^OyIPFxzoOV@lj zbudwvrl?M*8<*2avdcisj1}|riM?n^axuZ=tSs9T>9ei+tAM5V5ywbUy(~6XKf5LU ztSU44svTQ!%kilB)i3&-d*4JZz}BDH>cTQg-otRvh?Y`|X5tC_!}N5OYM*ZBi(xgg zD+d?C1u0nnttI_;Bx%3(6-o5e4DLn9W?W5s6V?V!kC*5FVxD@t+z#AETE zNZhIxO&wBh;76I!SLa8yr!N_l+tNAyL@L6xlC6CqJU2h|6f=fl2du3$?|+KtY-8p8 z0P3BoN(VFn2fVe%Hga4qFPm-Ev#I6mm$@nergANy-Z1W@BDiVSOgg+}xKi5^?%Gw) z93)*S9|vsMcxAlkHrEJRM+ItWG4DYJYnk8lZJwmC1Aa~w+nCNL5z1ag#Dxan={IG6 zV%=}AD63AaY+@z`%l^z)Af<|F3+rF6Vb@PC(}Iw^ml|)Fa#LmT>GF-vH@RS8!xu%D zV^b6-dPY9k4~W%Mq|Je=ftyZ-v*X%xDM!Xv6VnmI+zO~9{0l=C)Z`rSZLc5*n(}d+ z*VJLOtRi~nT5V@wU3ltf+aajUWO=X06V&)tM_=G+g1CsH{P10pzLgibrDTFi&(lFq zGPWfzMF%vYt$rIY`kfp1qbA`7D>`FCitIFFO76Ac#Uxvn!hX71p8Mv~92^rovw@~YQz&jE%K-%X>=;)eh1I%0+>QfR zM|CR)&y4WJHu7m4o()Px=j(Nsj`BjWybnsh)3;l-$3X^$u%0JGHB0wrGK5VrXnnl? zR9(%&vBH!m+s0iu&T;i9Hj6zA>ODKNF<~dljeX-$p(ia-<@+w(L{M2-0vCJuh-Saq zg~xf0gN$x=GPlo;rO>DMN3VP6rOQ&~%Z=TNB1@Cxm<%?q>Dte zb-Lb&1L$j=LW$V`ci}sLv;BBM&kMbvviG`Wx;s zMDDaca79X@&L3u6bFNWO)8q7$-k|@|qo2V`m&c!?pnt&>tG2;KG}6k~o^rX&B?*>X zXnKg8{&*g$`Fwsi#qdV8$S3|0%__Z7{^TA){M;{nxnm2oHj|g!&VStsx&#{o*NykC zeX$ak^J-n2*!PI#%C_Apd2la+OUywofaT>KvRx>UT`&|wKm^SI@5mfzOx!)y$v+E5P@T~C63Okb0j?>bUmj!v##h8Q&tY+4zC8oN=Qwtw=igc#>m z;phwKDUN+&&f;npG$EJea=&affPd1ijIaGe3Nu`FPW7T zfR(o}+_hHnF6}ezDIuJuwC=O)(sC_tk~sK07vUZlS#xQYzZ>e1#~(vGHl5Q>pH&py z2N!cW%xw{Fd)b93$6QLngc@saWquW|?tBl78~{#3iu!6rM%ja|OGEEZP0IpHE6jI5 ztW*vBK4FeW>&38+dsZpR#+Um`W~l4fPQO>No0@K&dXDHB7Qz+48+&QG;#qOY8b0^- ze3xwaT#BY2&+w-_G0NEzydE%h!OF|OjWo^MPS(}6J2AU}p;~99v1xzxS(A>97A4!nrc#^tLN? zMu(B%ef?FfZWSXpY5&qXMNp}Qv=Q_|Yk-V@0NEE@y$vxl;|Ao3^%N6gfvcH@DLc8v zpPZdmtb^Ig#y?L^-Z-DhM2lB7?`4B-QNQ$d$Jwp;dIk^pv#kBTmJ%s~&5XM@+MW@B zDyrlJi9T$u9X2u-GOtp4mfVB!!MVI`OO-fbjISW*@!mlnFVCMjPSA?GAJlD-0{Z9sod9ylyejC0SV^B%4@bQuG&Ha zfhl_5G2J3Qf%CT|3n|FnDjtSy|I5ANAE4I2sa?Uzj3jzu*7r5%X(#YH2ssMU&Y^3nLo zb=&=?47Fbso28Yei zfZF;3!Z3VixtWvQeXNXo%UtLAPzhTFAlf^Qn(lz9g8cReobkAlz$+my*Z+foe`6P(U1w1Gxy(PzMPZcN(p0eDVo z%~F=m3gIUI?FDD)SaD8O`Lqg6jf7?in6i1sTp8^+R=wM10mD6~tnXYJ*n_xfqf;6S z+KbG&V(R+rBfC3y(%1nQWx(#%-v+`yUP+0vI+)4TlIHgMQfbCvUvC>U#mZ`eiX@)M zJRz7SkN{)wQ`^CnolZn^5xVDDt{%JbetmpUgpNaPwYCf@$q?08yKWeKjIDN-D$ZO< ze}m41TA`&+yZe?zCi$Z_3O5h%0H_| z8|^0>8ViiUZSLqy*Lj|;u+#ZGBGk?Kn``6@^bxy2wJ)$k3PO^+v6ovk&08kxQ_P)g z?;0)G1Aa#6zqnTa*E~aswG!m9Kn&+@{=(zIeMo2|C0;&ty=Xmv?#+RDqSu}9(izoj z9=)v=P{IIiFYCUVNxph_(cL-4oMwc2eD}?}c|<^Tvba)}DA(UMFMB_YNE?{1wdHl` z9l5+!QI|kY;lN{5*3~p5Z@@_A2=fE`HDU}a=ML0&Zc$Mk1BoiKqjI$<)5BPzV~YMKmIR>}VzSj~}weGChyi^z#H=55u7+yC%3$!TE9LpSdpkSHzoZC$z)Y~MN~SLARDUD%-n09B*YxhVBp;9@R}Bw{@Vyd#azRbV zzSu5*gQ*Re%T3zIHF~=pj=D{B`Sbbi@vA<;OF_r#i$as-D#qiA>D^t2$iJiw=ZK_k z`J#6oMA}Jj)bJenyZcUC-H@mP+QSbjR{hg}qeeHLT1nN@xaNP+fQY?-F4kM-eu3qBdF|du|3+F!#V|VusfPZ^^|(dX$j=}0KLjS z(qFk2L}--?Iiv9i`z#(}XXj-(}Dm|E=C zW!F|Rh)lcOTj~|wB8$G{B4-nK-FGXWxjR|tK9w3vIS_V=aR3NkbNNdlz*_BF{>6d@E>`E|Wt%dCo%qD;5AuGT&pF~?Q`3J@_2$k?5VZf%qlrPJ*4ney zPJm5oZdIsnDImx0Jbf1w{sI=D6OCj~}NxtV> z6~ak$ghqEM{>7J&Qa@ovID?}(3;$e0On`#fsi}ebX^kTHS+Kd;vD$Nt%iE|K9xbtH zW&s4|XNC(vCRzBMq8?+H=f-!2R$Ni4d7EJpq}F5F1mNSj5~iiXV~R(M55Y{~DCNG6 zYN-O!pANoKNb&%{j(00B4Ys&{i6gcCUU6Yqi>K=6pqL0|(nF@VbP+dyT@!n9;Ot(M z*&%d>f*|9<05db1Q+53JBp}7R+=RSPFHxqO%NVQ(XhbCcmgijo=X+cHIr020SsAh= z(hek88!pfNoe}*Ptk?q$pMa^Knf^t1)6@NuLqb6U$EOt*uur+iS=-qT7hW3rtG!(% z4QAo8KAqsXWW7yk2KB2mR>y_vM>+9u39c<%;#aTVEL!SJp+D5R#w)LU5NL zMd9uQcXxMpmjJ;X0t9z=cXtRB?(XjHGL@vezwVh?tA0EdMN#kGBYW?2?s?OLHdf0x z)9q@88I{hSP0L{eM@n`57-={C{3WW6m+v*!&#Q&>tD&0BT`33=Oxi7`kjXzW@MZwo*kPeP6B_H!HgiFEsnRB!Sd> zod@ipl^+Q?)C=|t9U5cG(o{N5$AsT_V8D@xTzb~`A6osbo##{hHbpcMEPyRn6VD{t zZ-|}k*yy6SUv({K;9Br*zum({X~j41cTusdBk$ocT1j)-2`a7w2E0ayk*E5U06))}Y?Chlu!*CcX+BLi! zVa%&(C?aw(Hlf(Y2)I&f^0XK8`KhSUNS8DuuTHNpXbgov%K7>LrTQ4BZ~0$Q_p<4m z3$lBmqCtPQz~#_tudpyUEfh>}7pWEz>bE5u#9{zn9I2CYTUXvd<_0OIolIT7p-u&K!b?5 z(pOfT{H~VvaBF4Zsd02*i>6E+N=UKlgq58;7$Pq-*P_Am25Ky>Y^AzfO~ys_=8V5Bu(E2Qsw1yC+%T)rvHhe=f|R ztNLnCJOA#9xkrSqFpFKT#XPu7etD$I)68D{I~Ei^xRV@QdHk}WlcKWgZ`VBF3yeKU_%)pjdOx0FS5ZXW) zLp38Wh3}SF(lPZ3RLtCty~3W(b<^VS5;wXw1c|o%hQo6+ZASTWEn_3`*_WzZpAzs{ zf1UAUh=9GyF;YYlM5!0Znn5G2Jtr=j| zx*4dJTUtNcI`d7ygz%AXS0a<;8b*1rR`WZ5w>M|2`oHga2LNx4RvFv{?>~v_XF+?9 z>eFF`&5o#F6Mo)CE*8)xsQT7}Yawav%;FCoM`wY!KUw?sq)k5fwHYw@zjAhZAH%V6t}0UFtiNz!VZtP zh2P8eMmU|nOQZ@0fK9Kt+$bfENRDM8bAZcQq92+?fvoS=Y*VGD#&i;Isx=h({Zms( zT)ZKg;y|VEXBN<+he;^p=fx3jow|TT>^yGS7Ypf9yP^n|qLw))%mNgNvay`Hq1J@j z&#sie`XaVhP|qAwuFvu{LuOPRdze4FQ%!vqFPq%YmSYU8rI-Y^&DH_7+RWZ1x{U`_ zzJ3j93n;(rDCg(h^X#&5(~wPHDTQG`Q_q-+91H**!9}8caO!_FOchBoSRXr|3Z6UPc%t}-(7)$?(fa_;jyez_l!|`=BlPoGIO)-T1;(Y`Mo)65L;LqS z&LQVh7Djdl{{YwMj4ZyjtSQbR8h`J{6b-IZEvdZH--zXcw?Bfe`^kTEdi$MDj>1lZ z_G>V3+FF6%Gj#}%AL>^;_%YIxqr6tH?YUkC%`zGlz7quprZeHdrsQ4g^LV-Bpx(7% zVSJ+Ya(_>j?#z%!vU{6+oy6A-q^E?ZhF~IgTqZGVO^&Zo;u8l|s$Cmu-eHw$WeL#H z2?MtDjP}O8Axfk;1v+5H5Vez7=WwahM*Ds7=wcf@{AW{B8&nxcTpXKB18uZBs0F0E zCaW7{>7pc&Mzx|Uxp(dcw+@xs_m4kxoehCq#)}J)Y^;p_-wFB-AMAN}r4V-iL3MGE@uKq4B0>UY`_Zv7@&?`~nj^%dmqK z#WsUBPWb{%CiP_C6hv9|9wf?8t?#V?LIwmH*bT-dISq8vQ79Wi$lEdU8-d41K*3Am ztd)Itf_OMevh#SLEx~jY*!vM~(|70JKf_>b>8*K%xk)uSV7sL{H7O6ECxDr#eVepN z1>f^zwk0FGI&6M77&gD6g-{$V#vra}T(s+$t%-0krl6L!R)v;Sm$zh*-jK(MgbOY? zHX}nZfJ=Rxgh_Gy`rC7sVEB0Pu$BH_ zZ1`<&7w%e1?h7?PF8d&ARC~DQBSUl*0n4K7VJxZWZ2Yk}23$m{jLnfB9URR1dui1tw zIwxg-q?2;0B=UjSVFO~S@K-YrN4|Gf~DlyYZb8YtpL~2;R+jw4Rx(4~pQ&BFqDDWG zzn3QQAgsPYD8PR4@U0E+(Q87wY};lJf*L8Izm=bNTxDgd9;76N!k#7o(?Akl*)?Yd zBx|bj2%&z;$wmr8Dd^>#b}q+*NlVBA4EC~4SLb+d~Cl}++GyHkAV z!e7(p71XysuXbVLZ}Ra(XT`eW_^%-Tc|jx19+<;Wa1X`u9k7{o*6O|a>klsd+KDLN z3v2oytw((>TVC&<<|8iC_xstutTQ&{28=~UQp;crW1Dj+nqT@7<LV>u zPCrG+0!29sS@eWnqe|dRuLV;@l3Lz&kcyPC*RwT33GBhIZ#;_SwN&P2(pi9NtN&Kx zhppHrKuVGHRQN3K?DBpu7vd`~YY1UKjA2MMo>!3ny!`!N`5N$b;gop)vly?S;D{hn zkSv{*F5tI>D*!FN<9CAPxJ`G4w-u1%x+yWV;n=sa9S9Zg`Sybh=_v4TyZyaqaG$+c zuTQJv9K za?Kp&V(MC`xE^ffse^A##O@UhK@19RR55R-DUvBKbM~D~lpq7@cnE-hxyj!q5-JQ4 zz-%n<;V|@Ph zKn9x(KY6puGqYseyG@UT@Y* zy$m!}GDa~tiav%OB78hap^#Y41C4FPQTiMh$HcjnQLDP4h;&EJ@Bk5G3%Aaq2VE90 z;or7()j|k9!-xv6h8uife)!36TPwHn6}2-LFS}yR#7HNKfE%|#f*+kqe4I?Wv|+23 zy-Bg&ol!}*VL6f>P3cM?ydKfNHl{rvAMtKxG9R7o9rXV~3fNOUAVBoDXAXSk2Zecj zr5;?k7Wr27)E-i{TM?Iu!c1B@0sB^Jwa|4b$+5YvZ;4NquCT(Xcdot^c zq3oH^TQMX{%@}QPQZ6tr%~2Y}m{j?D7Jy6w{->Q{g}Oq97h6>A3UrFd@*MIXjJ=+| zP_x6$L!x8+2CglHmTV7>MI!>@E3t<+w9$_n)LMnt06?^(08o*mix*E%6B$hq6%Yhf znz%F_O6wlu*Q3!(c}6M4Ef)@3YiFMo*PTJ7dW&Awm5`Dar+W7`h9uRHYuR}*UIbOu zBVp z_E-`n+zk0-M_oHw9RrA|tkNX*Qk_y~j+CDjSDxs)e7tPNLv;T)BA zFe3~h`|Q2ZyLuZsl-hp>`o9dHrw}Qh-58Z#7yLo>!}M@_JD8@A9Ddsp7kI-K1`xSeEedWs#Wr^%kD>%h*4hxI;>mC--j zT;m)gM)M(zebBu)X(CNj6#M|fZ|$Qq%D7un0D?YK4W(@|{1Q^#Zewi9*vxIoolbf&xMtM^{u%!Ki}wTCBd zkSZ??jf&}Ntt^avuVvVDqBgSe+^<=#li$GMn5uDOQ&n|AhK~wnS-QLm>ZrQ9;&P(` z*5u1Px-1{AAG1_kguE^~+JC$%)Dl92lK&_yky;^5lCTVTU*>iGc`dV2FR7*4q9!02 zm<xcLA#=!?WwjSn|*2$<|Ta`nm znpI=~iUe%1H~%E#4sVQ~!8XJ9-i?nMs2MYZRYMm-OGlU%?#p~ND2YK{ekapVAh`xD z!9IXZIopUmMk}s5 z;v-@D{8?07csVFRsjqh_p*c{FPjmR#oXK;=*C+dPin~`@U9WfL8(`?d1zS_rRW@bJ zJ#o%vrkVGSR&Cf0L(=6QqQla zY{)hV*)4D_xhoAI9K?Or*&K_rn(YNAEL^AP$A^$lgob=zf4NL+FHMUY+HkOdFX_)B zI~Upd?}mG?7z%*`X(j(M0b$>7odd`ovi;l?#Poce)Lq0fgi{&d3vBZXV9KQ7 z0d%3L`?IAWyu1}O>O5e`&F0ul6gnV3N`DF>u4@Hz`$Ab)0tXkhzY_TF( zICE6yivtiVCb2xNs}AJK!&G)ul#Njp-6!E2^NVep;+7wpNX6wz-UmCIE^%`l4jZ8A z7@*y0v}G!$wtqkh4*EHSE^#=1eVw)>-!KFOMAH!tnzCd@LWO3Z^dj1%1g`OB>6SFS z=#h$H_KeC#&TOe<^aCI11$)@+ntI(cK*N$@%T0ZJ0bDeJ1KEgaja_&qtrx|3M42ry zTB|?e58+4&X1+mOTPS z*9HC_6EegU;rp=(ItB950uBx>{5xK*<$v%Pfs zfvoY!51mYZS;yc135l;A<2?F{FL(Ih=~@u_&v=+}*Rr?|CCsqqoe_k#4j(Oygy=bo zlWT2<1q{7nTdC(C)*_lO*NAY})7)hEt}Wcg#8Q%jC(sG+MFa;~K)g}0atI>Px63O_ zlVjOdL61~~1R_{-@)(D4V78zcAr^m@ucyYaZwVV4?JgJF(`UM9?l*4d{bW`$mG6%O zT8B2V8(LYMbOyTK72;sllxHR4f(^qpA}-tt-g4&V;AF{yABEt9!yU5s(HJ5W#%Tc8^Csz`X-`nM6y$&T3Ik!ri2Nmw2h93?i1NpDC z_-ii>pje0=%}*O*F$D3CRky^3>hNkgus(%g1PiKjREW_-7xjvVYx&AK{S-?|+pb-| zjU-&J^dZn?7EwblJ5cH#zUXD}2p91+=tVT5P>Qn=I{`f`GhP4^OfoVt3_q;fl7#E@ z##^!IT>YC3Wg?aH({!IF zGD^$S$PkEl<_ldwhp{1ljm;ZdosEgi`_6caELg_0?v|gW+Rf`97NByP<1z%OF0SZk z#R~3iQRa|alMm=%Yqq2u^rKKTh*@AXwPLs_vX{K2Oz~-z|IuMPe!SVUwiSA0fbtPV z5UQS`6Oe9j-u0fy##ja zKQ5_F1FihNinjXgUv@$i!=n%)I7Co@uV0+g0JD`FF$td4fltE${k6e0jJm}jdwFhl zsCYK;Egj^APWliD(&z9_m&>?QVK?3y4`HCyt@Z8d(*fg&82Lpc1G$HLVzkb5o`G zncJwO?!EVIj-W&cuGNo&I%rhogXvM-T1#6jlevq+2UTo!7(AU?mfqYlg^{GOaiT*wOPoAtruYCvox>Ft9Yzb{a=*bZ#9)CtNivVzvxw1`b_1l1UM}9k?hO)+$ob49AJe9?$z_N)aQ00O0Z?NJ{pJ;F&j^ahy(lN$`DIRP%g&GZz7>MZ zOfG(-XqEUFjr{K%SSb65PTC~_eg!gTF?|(zIKM{GC-77j(^4?@gaDI7;#}lA}luxGRFe(L$ zHa0WcU`{NUucwm3?!C1xjLp&u`L9^^OQ_(6D2{_ck#G>SULW{?6dS}Y=h>q=&46U) z2b*)xjQ=v0a#$Rrg!;+Hpq!?N8wt>QV1BIDMucMo(hjIW+#XWkcL-?Kw--&ST7vvOf%*XmyT*Utj z#j)fV7+d#g!hv-H;;9ADl>9tf?sMIAe{1ZLp1W3u2mt!%&7HCf?IQgLH^K=)xa0su zZQ})$98lIW3og~|P6Y6`Y!d1|S8nf*=hqT;KLa9Rbbr80Y$c@X;p&BUbvXt=gAzM~ zyK6FtI&EZ~^2@~mMapKVu_zK%(vBGcs%AqH5-Ns{(`Cxc*6Kr)q@I*hrB$2AJYoO6 zk<(u{;@oTew?Y5T)Ni3UW?W~9fzebWj++s}Itk31g^Dqu!tW9s6SqE!&z}Jl)|XHpuo!A3jEg!%7t7 zi$UBF_DVu*JYS24^e8F@b<^$h99kDV_h1`m7v;nJ$uRfF#=6}&sQ0wHZTCqCsoqgg)ODg=tiM6B?pc?lfq+T5h+3W)!_75sS_kk~5x z6G{nI-tNqr&nHMJn3?F(Ml6PGa+9n z!xi{&)$j=L!<&XWD+y6LE0JtXY2p5ZZ_sHVs;K4rV^JSlh~aToa%G4!A)HECuuroX~BrO?GhTB>@%^W)}7hI z4qu%f&R^EyPw^TGYp2oS0^@~K z2%Olydrm1FQf!R!-G?_2|Fz0jkeP@-DdddDJJZQTf`ioNQkglCBJ#P5WyY#kOAj+w z&#l#>?4#Fv4MS@^*kT#6r;G=)wZkyp8aj4xwon`RW6wAzTfzBqm!ao(4tw>Uul2QH-#4YVz_J1{l;m}a8VCah$!pC4e)T{UNu2*cJ@9f#@~oNczQwEi zu50ZSD~Y{OD5GYvc|3{3P?UDwM?t(^y@no*dNvE(ZmmgWxZxi2n8BgLqb}fqB}rTe zRLBbp%Mmk7h=49(mc>CyUY6zebltU(;-GY97rxGO#T3WT0w>D*yLR@!!&p21$JH`f zfeDlW9HVE+sW{wEu@nr3wl4>JVhb$qApH}0PUE_zZy=BQ1k(Gp6UQb|Jbqe$N-n!# zWk(o^z7o&=%Q?Y&9lD=>5~`wYI$GkgHxv$-U{YZ{G-b3%G6m6=+|)+p%#ukQ55U=& zA2aPZ$e_S3MX}Q#YYyo;)tOx1_Tf17ko_C^g0lrpF;rl^NzaZvsz8F(=W1r->Lb6O zmIid+*c^do9H`<|WSP|saq$P}AKq-G@iJJ%G-#2|TC`?Wc($-NND=Aihwd3AEYKc< zktkTaE52SG5<|%A_!L*6JfaW>Dh-~gy&Pitx#Ikq3y-=PvY)` zR-Op7bRYb(H1$R4C0xS;5uXMIn)q7G@Dl{4C7*|GNI>gDb9C!Pay0G= z_H!2}f=a0W0y&@OKcD2!>h(3m=UKklyEp%lVgeDN_At+|jAbHeLpV@xZs79c#|(vH zG9#sOG*Web7oaZIKDhpn%REmZu~biGgc)>7shE6>E+!S5g2xoTdK9kAsLlQNHvZ9O z!3=^dDQ}cjt44P1(}e|Oom|${i)}RM6w!XxS$U@V%EfVb+#7dtX49(L4@cWP#v}I4 zeU>9hqf3P7WdNqZ!(#Au`YQHrdf0OV4(|ON#c}y{X?)pu#Hu((?^`UMbZW@pDf>ne zBfwHBcTy$gHfdO`)6|5N8M9#V^~zD-F6px056d&pm{`&iPtE#xOV%@{8fOip2v&AI z?57%Ml&!*TM?zHG>@ZLB#Vfeft$S0xE7=;Um52WBwAInpF-;jG5+MC9ydPM$Jn$BXGgU zG7S(MA`Ml$Z&hzm%Svp~1f=d+@p-V9K6;BLrJapd42|^>cGlBdO7};iY);P=k1f}^ zzD6CF5J8&z%1htyV^1KlsHXf=)P1Y`SFl!Kj<3SX9+;ehIXB@m!!ySLi?d>iN|C$s z(cx^wX{gx+zljOBzkJ;sz_B)wo7+lW*cGtSbLRJc1mLXw{%GEPQ^fy!nB zCmh))IR8s6Ujxb7Zb7iYgmOV>O1R-~=eKaY-g1+j-#v6+>mc8kH#?4S!v68%y zW6OC7r%!{h*sg-Ev@Kj~boUxvO!I5z*+j`ya^q&hl{*jJW;+HyBK@og?a%ZMNEdPB zEqD4x0$7;rr^fGxF{4@!XeT0tib&PPBdW2}ahBk#o0@`b<*_6T>uRJ7qzI5=`-c!k z-ZvICMN;)A=HmrB{U_JI^3oC2-r4~dJ=M<_1Bv8x2#_O9L%3ka74ya(4&|p6v0iwb zTGuniSw(s9i69~pjEdw3*K0Nwnz_#8U}4ks-w{{wWE9i#6Q+}=W8y}c)Pr}1a0I1=?%OQ?{vp=5U| z~J%4~r2sbS+Tq)>6O(m(j>l{OlWmo5)Iq=Q%+CJ@T@dny1gGhxftLa`yvaiGA2jph;B_oWd1*>7 zBiWP)OZ?LIlOtI7{X2;KB(T=pzU4wN#A1(m%0JG^57%(*abi5m#Inl+4+L^I#CULg zsdY}!BtC2zpOQ=~^9Z0CN*30lh{vk+BxCw|v-$qAY~1c^CvD3U1Zp=pL$^S7B0r#GJ9{RF(kT08rpK% zIT|0vGM$5~yEZ93Ty9SkHmjV}ly~B%E2UtGYn<2RZJh}0h9Sk{`FM~hxV8ijrF&IpXXPHO zI5=EiG76r(UeFlu)L(?#*T>_x2W!H?RVb;+GrRntZw^Q(|L6$Wh+c1o4ew9xOR5{6 zjMZB#w|>OM;kx-f%T`87N?~DL#NV(L?ui|GPwyV(-i3R|Z@n+#@hkG{JEL1ts0<$7 zv`?v6-<#&UEJ?g^x3nA^H|Q;V^Y8`CXtWtVD*yDPh|Na(%wbOoJJ`Kv`@V@kIK7Zk zbYRPuLelGvzUf`=A_hZ8?=>s7UI{=0L|)9^VmW`L55o8j>%W18Bt)5sGfK_*huXNt zd)kQk0TcR_=a3{rkHJn$1JsXR;v8!Rq&Hbf4D(NHA*IO1TI%hN&4)TN;f%U$Bcrua zxTp8yZjEewH^fFQXYqno&tnqcYSt}wZueWZ)cm=f90{q!u0~O9#j^dkYAj zn-a5k;@pn)Z3x#6Orsotx+J**L9?1Q&3XY9VEep)JtR#vYu2m^=F6WzP zd}}U0&?KdIi;Bz{n)ov=amB7{EyDn_-cQd6OPB9nMvo~`E5mNX#ER{=*Q@pu6&xU)iDO z%yXg+W_yCPl5~PtvJAu3N1Ht_`fK*xE6&kMZt1q_&xBq!iXOfm=={j+>*`upBukOp zaa=>phkZAT?K4feHZWU^DN~o)7aq~4mU>1wRc5(+*Xk`~%fIaE6=alnducvH(9-ax zgUmLhYF}DkSuGwZ=?f>;o=kD>dwzr73Mxrz?2kGsm}wyuoTYZ>PU?$Wh`#~}jVzS& z(qVl6ux=|FcQueVA5*eyrv0S_R2CWx1?Hn%wDpke~+1uZzCY+v&B~b z8%!J>q#xO3hho(;$SrnpNs>CWYHJLs>^rPmqYPDyTHyXYin{w5dB+1}rz3H02Lm1E;lbeSH4m`ug&ILZ zEoZFQj+N7NgcGSHh05`qy)wuz%_>~IBArX|FK`p84iN=Yb+!2u2T$-F1d-pbrV(qB zDs286O`|*$Jrq~xtbpY;(##Lc&g3B_4SDCaNoZ>B4`CX}9djiSY$il6`NIZh@z2t6 zyIIedBJ8jwk2yGsegkum8}E0n%xUD$K{M($(4b93q`aVSW=905E|iLUA)w;RNks(C z>JEV`AQ=U$scO}N<=*SA8#d(-V>K)z%&#* zuE49gmjQbOtFT?<2fQmVok(%?UaNg>aicR!6P>2P7)6Dekq!+U#kKM;6}PEM4X~4RMnoRJNN-_{C|A zakP5)2M_Q&-#lFu+uu9GYIry?Hx<8itBdyh+S0HhR^;Tmdu(=esf9K>OIezvdK-Z8 zFWuzhXMEnGh(=RnPV?Cq?w?ea=e&O`6%Od-$#4W;ud+`$hXT8$df2M;x=Alcdv0JM zl-9p)C6PJgd9vPzja6Y=ILs7P7gR*ty(TB?{lW1k>zIIn&l1!88!#yFLK6G7Q?qbO z)Dl)D11D-t9|fqGqQT=PI}$Y0N9^U^0Jf@`2KpRCaXVS%dpJ{lfY|9kum8G*%pBLc z=!_TooB{twWzQhc-ZD4Ta^nh1eW;*c6;<`{_lsRIzhceaZ~3@(AGDUp$u(ERt0~H zFe@K;Il9uek*kngd3N3a;2c;l$q1B6DZC#S_t>)`tr2n&75BBSJA00^v>Pn}l>mVu zvgLShzI!BV2UKte1P+8Mhz5@g9IYh}>oQkKn9q5Dx>_REsFQ~;UNlWN?9&wQ^{9`~ z7rMbJ-#?i7?~BWrZ`sJ!Jtpn2+P=ApbVFHM#XZo`09ZB+=rl6%S#62Yc zn4>l%(kk~A;UmBRDLT7JbD*;*CarFq#IfvY6{(eZ_#LGlDFwsvrx;Xq@a*Ut*4-HP zrRqD3s>l#Ye(c)P+lCYpaPLnlbMxi1k0q*#iwb|P4DF}5hMKis+StBUBdec_zF_Y&MqE`$Q0b$ePw$A5@x})3ao$AS z{8A@bu#}APv7h37=bI0zb`b%~ElKd+yrUQgU=0>rHOLZFCXv((>V9N@-&<>X&2opI z6F;nJnfgV;?Wb6pN`6$zPI$^%VgbMZL|!FhiN~0-5L}5eJVo$lzfDpW%I(U2RM!`4 z+qAEVLwkTaMN#kEZaoBUFOtrBk(ka@^qMa z-?xMyZ^Pl5_HPtXAG{$yg}vKG;4Kdd6jK^ha$jE`O4z>B-_Pg(tY6V&4G<90ylfPTuwcx?`;khQdhx6X@UrP)qQwbe}Ia97H@@j08G03|Uz4v0*$ zD_alFtk_Ie7PcGwvyvX;HgfZTm;LNV_6Hju?)$@KzqB!l{oPHd zhV?7k%&F)bn?uz7fksn<#ygURTz;M38PN6Pkk$ z^~0Bs&-u<`3-1_+r2>>mSk5D@t#|LNMA_;`i-j(FYk(??fdiX~3}eF<@)-~f^EoG< zCiQy&9y;`E!8IKLp0Q@;t7!sMVV9EZT34JUhY8TRYFfQuN2sh-yX*B(Q`G~PuE4T( zz*kI_A-iurOK+>+|EoGfBMx=AYnjc6Lu#zhTh9|ApGr+ukz2J}%vo-cO$C>eFCM>b zcmJz5>~~zshVfE=Zj@%=WGR?jDjlmR@!_tE0W^83rJ!(%fUNixhxIx`0sU&dn5 zIPBS)Dq{v^33DrxE5>6QE2 z6;}M#HuRrR^6E1SU{Wp|D2_aqj>I-hX2S#!(WigGjm?Rwo$MDrEA>*|0OhcGHQ@xy zI>9MmIOeg9uYmE#&&7d~j?E1#d``&*Su0uwSto6)!HKa|ZAZaS7J`v}=7)$o-ix_y zEH)hfPju(`2yMP!NDmFl-j_Ao&PR;>jHH)V=&h%cR=$S+C-H*4?rBi_-h8if4T04U zMV!`u*F0V2&+j6IdDLq^2BoE?LWZkY@=T zUq_QAmX3OZD1naW;5N~TgCCba)KV`-JI&-1t4DbWndo~LL*!L2oSfPO3}lUrV6VV$ zT39-ZS|9~dq-0f=10O>o0()_vP3>h8G^Nxi$518fT<#SB5^4q+Z_EbuCcwQiQTMjf zcc`vcb;qZZ>8=D_4{9$5nRjW@CjD1Tt`F47ACX@}kn`f-&WZ-xvrIMJjmf+Oj9|1b zcD6(q_VdFYarWQ&2eg%jCf=Q71g^g0RdzgaMQADS;xm$v+p8&K*=^cUHK9@yC1EFi z%7?`Ee#Ac2`YWydFYbA*@umCY6-J7}E+Eu;qnG-S>!3C_?(}hHwpO@;_d!wLGk& zv6Zx}2+#>sD2KyC#}qAh9;^FcNW^%25m>%my}{Re2%9G|(R@Li{U(sn!&^r47y0pW#)fK# z`WyC%gv#t|$EPic&60X&zHy}GKB@#WoUw9310ayQ<)yt`be~95O4wxQlN9lJeZ=}e zZa|XR;*R;Wl3uGA9$3W(taG89SB6ByRVZG>7u~gpg^ZLow}l(zOv4T6*(CP9L|^l< z8UYm9I4z3H(pWjRQMnMBSf2Joj*V#Xii9YvPNeqb)?~UlE?M?XbC@Vw`>meP|6;Uo zRuB`cL(u~8!($=&#UUwNzp45k@_`SD=_V_;{po!hb!dt?fzf-|)azc)ww9d4{04tx zbC+`5l=Au<$KIC-4enyC6AOpuE<2d6hiK!Tu)Zg3+X3AfM#oCt8WdGt6Uu{dXA>B+ zU!3Dl#AZqP!Jg_kmd0h0MapHA*H&ut0BIOAgvokqE{>5kDgl;4#(=O(HPaFY2zkR3 z{E8vG!>_BA+|pEj7SV+4He&!%*t>X{sTAGI;v8M}l*$_NKex*Fb?w>mX#G=Y`ridU z$!iL3E+8+3Uf?NND&pFB%-j~kLZlQRq(4)Da^^<=fPih#JBwe}pEHhXIGHD>HXRCh z=^Vi{M@og<@k0nUt=RD_YnWZ-t6^xtH8!}eVyX?6p(j0L+E~kRExtg?#~Daut8gqY zXL<{mO4Z@!vPInKbZ_nTaJ^0oyzMRlsxjKVs2`JW>O`t$-x^(T6&lf|&}O&x>XoNCR$ zPeH=*E=?EtomQ?O+4>@Lu`^qA@EJ-xr7nFH0^#uyxnGM=l5?xr!OeB_ipvX+tC3(* zDqzM8ZLz*fFr>jg7ClU;8rk`|wLS|d)m;KsgP3W89hG0hf}9JzEgP9{ z5w)u;@2@#Zne%*abzTuZn}ome`hgFYQ4y1KEj`dLpAk4O1B!t5ob1u=0dnk(w|rg0 zc({@tD~ek=2QORMDO3JZs|347gc73nKQqmB2^QOL8e+Kz@#Z$qT1d7iE@NU88-HAU zM%y8?Kgg0Cms2Gf_x$np7fI_NJlzN>23KY~UmgkJPqb5B|Iq>daOwgPZESX>y-a*} z$n|Cv3d73X15;EZ1GUFJ3|9(jOFgZDBmwX-r`?!qz1L@Q)q4Dgj&jFviF>QfYP7tN z7IrvqTa~giOE{35s+PgbM4oE8wn`=6;dRKOEAr@`gSY|H3R%36Rjtz40*(R@^p=*{TaR^qV z%o05v6=^P{v(xf0+vKzRxlskNB^$O{KL!8asyxjg6j%+bnL&7LzNEo&>^^2t>NhM8 zgnEzlTmStI4Q!M^av37`Sp+6xK|*kIFFjh1<1-{olBlvED_chBC4@;-()S@M9E(0a z@DYWKkxa>1^H&lTC0lS`;ZRTb7unf$=%dII|7@nS!I z)#yQ`ws7I1Wws`j+ToFBQdWYS7kTG(%D&K_kK%ACx_W4WdViP0bU(f!Iqxi<#lQs{ z4g1MdkI~E^npjY{g-k~#PFxz-0`6LqnOcvi>U9Xw;_{6qVjf*sDd&(VXn?XhmvC%v z>h%FG;FlinMFSBP#&ZrL`kzz(YTZGC$g?fv*K?k`DjHw1T+~;MOj$rSn(ij461N0+*WSjo@6to`tnVQuwiZbbWu|8{tHJGB3G$3f;1FIUVh91^b zLz6PU0RfVZ$o>Q-y z$-tfsL6t2Y^-s(bJCh>oJ3GG`yMf_m^e2gQH5~!^*)Ns^X(Ps&bgb!98=l>65<)d1 z%CVp)RIlaQWB+p3EQ_ByG8y$#dl_dnMC)JvMuq<)ukE`;+BFoL>uv`h@97mdbLXKu zMw_46fLydZ(nQ~-Kii<UnX z1W(*&x`B(S+Bx$jXc4CC{b3O5OMGy2F=F^+kI-C)xUT`MC;>2}`~AXycILNQEF1R? zDp%2(O+c@Cq%+;n>Jknh#u}rWpKorbgd+mzain|{nJ=itZj4PC;;$#BO`T9J6{pT% zlo_X@($)Eqx-2u`qd6=_S>Q3T`%c3lDhWhm1e;V8_*@OwV~tJRh=O=*&)!kvqDdus zV4o`BYi4u%yDna1O!2PL_RPQqs=-7=Wh?Em1*GltkTd?t+K7|^;%u#Tb&_xgSK!V? zRA9OIxG5&i;x+BT=w+JglJmbC0{;Lf^ae=cCtXK^v5u?fhuf&@3ZFE!VA3N!vY1C% zKWD%I^^zISgG?VhSCM-QIE`tS^m%Oj+6CR6g8+z2%>EC0%8UaK;BVv(D|r6ig3Cckgf> zq34lFRdNn=7b>BXOX*U7u1|JhBq#96`jKD`r92~C$CfU0uOgSAZT+FA|JgfjGU&nD zoe5IA0SRw%n@QJ_C+A7a9J|IISmmDvp+>3*W3H&u0_u%mnm|X>!pNDtY z_}f2;w3Dop=y9w#5@&)>I@QmSi_4mc!u)hGdF#~$P@$<|5*bzj7YTou2;aDLPl-m< zVh6_4$r^sRuB;c;bU|fj^n<$8Z@!PeaPc%2UOI#N$r~{-<7b9?Gc^>!)Hbjqqu$>< zNTyo+e<=G3sJN0X+6h5JNRU7vXwcvUcMGn;9fG^NhY*6hdvJHx#@*fB8keTghToa@ zW?p7q=KuHCVzG)#L-(y)b!4A?c5Oq_2NKcew5&$KNW(%lSkznZ+j2}(;=54IvjX5y zgv{*}MbaKZ=YL=88h(LW6R9o%wY5L{{u2UYW6vtgJmXnYrxOSu`DZ zhRk#El;Y_A>Ni#cY*o5&$;<-AzP|Fs>bbr{iQPLC7b&iC7#Na}-%8H~cr)U!uo>A^n!Nh z0FNmvV`K@O%`WVX$_kp=rbi~H0r03+uNkAoBXAQEf1c)}DCA!g^t;vnS6uez_XrOd zwZm4jFm;`gWNEhD;nTnfkg_JX{++?#+x@Lq;o}>EG6<%x)Q*5w`$~$rvm}nCRy>7e zQNDyXEpuzdIVY$HmFT{{aqO0Xxa!5IXtD2wnB$8RJ`FM|ky#m@NpbiG4M@d49}7zR zb^(XA*)nOHsUnx?ATU>3kuZim_yO-QIh-__H9)lh%` z4uww0>GjO%!=f<-#uXuQ^c>gPkh$|~7I-!Nz;o4XWp7ed*-9j%D%-o_3)*TR<>>do z0fXn;7WpJ{?nL0Z{THS&J(FmjXgfD=+P(RSgb~{+ED&X;uF{;xuSToJm5__?&z<=v znbY4DrTQ789NsG}=8iw8htA-iPryGg7;l!r3lljLs-2qUpu67J^B$QsmKJ0-vT%&lECK6okKhJ7mgL4(a8P+|R#sxYcYd4tjLA z_mW*09=pA(qyv(L>Kf11uH$jdBbBltaOkggpGgm-^%ihlvWF@}woW-!tN3KZ@Q`e& ztHzW1v$qib)u#E3i+loGoF6UiafqsRtRBzL>!vWc zR@t@Son*!rDZfhgrZ0%7=SEBn?j2 z+iP|kBPvjT&XT$qv?22n0xp>&{Om@Ztd>u9$z$W|3ui@ut~(osp{+x>TM6R|Bt@pB zRsn{365Niy)6cf=1}mU_K6qX$Qz0fJKVB7ds=mZ>h9t!qE-+C`QAstKXThJ|g6Q6QxE zcN*eza-Zazi`$(X-+AH%n4k8Km?4b9d?d|EL`)!8BQDTyrgI8wQ@fkg>A3RROX|-2 z^|(rAsb;TQeJ=9gxoWxLZlY4&+dAP~JtoH%CXsYd$9cUfTT>>mgHbl2T82LH+MR{V zcKE%?AXeq+z9*KPQIZt@@oo}VGN%y}C@xMFyAFhMw1t z^g*&z#k2#)FU%6E?cXI)js`3L8Cs_G^@t*c3L_*mU%sbEwYXqw<&wA+5f8tCR`WfN zq^KdE@kd%aWsEB})*Y=Yp6>oblA8@AQ=t2n=)-sxAyXm0_j;lE!zEC3t)NQET z=-41Z#zS~0pBFIXb(s`Q5V)q8OGtmh=QS^#%nmp|qAmdi532IaVh})&4yo&q)N$?T zcf8><=+3bmNLV9$i8cx8ouuPThcjWLsjT!(3T5P&&SP(;FUMNV%$8*SHsw0B7B#cCo!9+b z&Vr@Rbk0!yl_;xz(1<>Ft3mgBl~GbjSwFv}yt3CrIeF}sCVtdGDDIVSWXsb!M`pCO zc8Qf{2bdNX*;{sjz;>KVCU*RtCTOa|YDs$+S`n#W_6F`tkLNG!yqGl1xy5{*_sREO zz`Qi=bYyS&mK$~IBMSAj-LoN-@NeeuE7w`Uxup(c!&jVpz!NUusc0me{^m|O$A~rl zJwNWaIUu-1D_|*kO&QGyQ%3GxY88y*8?Q#=Ei@L1T*ilX_kD`u>P{)Mp|8N-nlQ{1+grM>9^1W98WkB<-P{h&uxsV0>4qRwBs=eQi4T>`z95#UIKQ{Omr%zl6F zuWxOMdlh(kl$_EduSa9t(N|54G<&n;^s=AYU%^45aO4GnpkbAWBk3`!ZJ#UNW+T@z$7VlB5*Vqn-VQ$2&`g!Zq-6mdt#$Wd{yE*#|EUk$#a z!B|hM*U7nxp{_BC%FeV{Q%r5~19f9(=rL@z3dLVld_HCm9_*>oPbP>`2F={7kwP%@ zN{@yrc|`~>^%2fnqz@LCI+I4sqRXWcMk{wG>11@?ykV`3sILXCZzF(a&d&ZQnEiw- zMhdU(Dbrhew@atr8Nb0--OVUGdZT1VUgf|Vg2T2bs1T=`?~&*SZr;u@_TD3vg)I7lxEYqG3V0qj!&KBcLR1g(JLLDMa1*mN^m??Lkb?i(kz75u|f$RDx=NRK2NU- z=^1MMp#?D4xUh-QtOemhXo09uII^NitLIN7)~ShuOKzI{m6r=A@{X$~xu7^}W9J*S zx!&B*RYj?GtrB@!{h3uN+#qt~&DPV1_8#tReawRvhc$dQPcSb6z5&FKfpcWR@A(Wq zVwRl|vmefpMkmhH0~2Fb1l7|btr8(;@5W%>2L&#zGkdP!p=Jk{tavjg_kt{etQ7gQ z3*D05dr3vAq>Qn&*_K?zY@eX=-(WfuKD*}HU^_dII!MdQb1uKJERs6>0EUJ!Rg$V- zliPHMiwhQ#K5?Bfl2}hWUCKP#ZFRj#_szOKV=LO*~?)49LDwIynCyD^}lm0KkBYmoQwhb*l zdy!=vrsLDfNsg`Ev#X%PPk~^!%%>@=AYmtF3^`mVk{$ZPSGn};-+UOgQpyjBEh7#K zVpEoqGwqbKjwzUTF&E(YDQNsNM(|me;+Mw^GEbzNi)xKKy~Ui51_sXf+B)*Y!)f-I z==sw*6U!FjyxiHQ8^(VhyJdb9%e<1*O-%791$>3Y_LHI~y2 z+4h6`%`BZ0wf%TPf68UZu%Y1Hefv|IAdDvqLVk=K`Wm7wBB3;2?ERT9drms1tuj10 zUHP+iy|#==fvl5ib#Gr-RP>PRUvGB5MG?WC`$n-;y^i!4mV)j#;WCh&dPm*C1dEu> z_`?^R9hUHHA}S@qZ~dbTB~f1heR)4O8+Q~v;~lU zsu)X7QUTsyb6-;TJdqjN6(#-&U56U&2m=`UI{Ylklt%fb0g!5pwip8Y^yZG0R~L&& z04j|F5~mZ-s@qp5@^Ks~>sF16YrFxgkz|2MwI$N`JxL;LJ0qvGl7&*Xj8y|!U2o5H zcg3B!i;(A&Ql^~<00E~fN9+O}M-EsCew+#7l2pMX$_6oGFizlGjZG^oqqFBO&$>pA zMRW$_#C;2T&q3&JwMXTx5*TWy#?4-4b3h9~5m`AQ^1qo#fB2YnSR;&vE2=n+PVhSm z*3jR4UXhIL-1Xys2tn;l=<2p$$c*-`Bv{32p+);XU2%}m9EC}&cOcgG{N1>dMj?w? zJCk;x-iI4qiQhUZ1-Rd1I=+D%VzNQD^l-N;RS#P)3Dmtwxnunh2b85M#)c&oqdkVHV7<__fu1kTBBgn)=W<4-*CxvJj&1z!0L){Wx(y-;Lwt@laGEAYQ1r=S=g zD(@iME%x*Mfb^A}3{IKAn+B$0hpW6J#l>7XGHTr-4aok2EyuG^tVTL#Dqv=1%d#}Dt#Yeb9*j)?{4fWzncAAt;AVkC&7`uEX7I{| z4=}shxo>*kJ#fq-j3k|ITQM;zQz5(pN`dOoD|VjSroq(vphBw^!I8F=6cKSHOhczJ z2vhmITjD~EKhK}N*YXe#+nK!u*zl2~CM5X{XvO@tl= z`W9dQZr`t#M2^d$Z(d^3O}A#q+90r&HHI-q3Qf z2{*rEUNC($S$xw{&Bscbkp_YF#iSpd6Zu_0w;?r0Y#zbjM6hQEqxd5wrPyVJ-3E>S z-<{ZhEOsmE=kj9usVIk`R`pQu^a}tpGQmuAOXKV4FmcY|zsUIfk!IZr@qbmvP?J*r z4v*g_B<0XHkH?Z~0A+-auvN3RCy5*07dq78)`GolYkByHsG=8EHbEf;=ur03jE#5M zU@F$0U4@y;xo9|ULK56N$j~gD^ z#T_OX86P-)z)qchp6q|5mp@kOqY#iQKjq$N8eY{uwOz&7GtO@Qk*IEQUM!T7m2UDg z6mt8UIU+)h{q2z^JFj&&`|{C2nAR8n#|t8!Lst%&FYyoUv|oJrJR!0PN$t@#=d&v0 zh!!@S-IgK&sKlDSrf?^9E4^eeg@3>B3d@Jq=2Zf6k1FmUooTljh#8up`C10pGY#&6 z?>QXEgOhj4lN0NWL*?*ex;<;gPm{eEpOqV6E0y+zSuLyAXWhY?Gx+5`Y5z_ibddCZ z=WEJljSV%^M*DxU(n8+BoZCBInQL>l4&O5@1iT@Ihw@e#Xb_1ocgopO_Wrvb@OwBX zM1JZdiW&G}j@jMn`{#gep`lARcf(J~tweg&C|Whu>@8xeLg_|t7wW?>Kvcm~&Ac~i zpW;p5uIl9>SCNkuguQQ-e>7QfZ!$m?$-I znTR?+#n&~iF_89tU4eL9cxcDDW0l30C{EGT0tJB^Q{y>Ssx@xWDM>Aj{AZ0FXbwjdC$x z&6?`39w79o`aF3{v{59gLA8eqcOeFKpX_W|Ayg@}4gr%o-+G`b1txS{sySDPnKUWr z7b!#XN$nfeB{V_k>ygc*&*s#m@CJ6O5~RSAYDhOHvCwD>&oS+HPI5dp**x*3}BJDy; z#I3gXRjxYUm!QXd92MslSt@iUsUy@?GLZi=!~1=GvqJ+CbmMP-Er$JJCip*nM5?aD zo!O+>Y9zBZ`mTlcSzKahmCb^}c7{=es|>0f8C-gVrU|?g@*pd75&Ww2XtqK~PBj^h zDYuA*RfCf{1@b&=-pzNqwK#r`6_1}etmSxyhh7m9u#rv2Q>o_REIuRD_|6`t#wAs8 z)Rappt{=0qeE8i^^to3l=4}QV^tiaK?4|GJQihRBnIOT{L6>ak5(9EPUAiTF+?Q^rFaB$5{}FPa#;yNXGVpI(^NOh4e^R!!mpCsLhsfX^`%M{{L^X-s@}|UvO(86H_JJ?{lgd^G8MEdotW|z+mXRAY zPIyfbej4(5DwBI!PKeRx8xiCkr4HQ#gH$R98kw-c88ru?A@Tn(BvP0=d`n`h%&FFdfy{Bj>&8MC2nw6r_EsQoTE@j0zySLY>1_=)q(UeRB{=AR9=j~5!$ zN9tVLF#H$Q>-Q-D$*fVzT)rza_=?z_w$->5D;8jKFdp|T&JitieJV1F^S$qKmr%s? zh+w&hH5-Q-DHioPEtCSs^&R;3g&9#Yjg4rk%-i}dT z^@q}#HcqS!5(cOqtYPgC|2mZl7dyPkr=JBl{xn6<0TB+A_9qE1JOyoT{9{Z1JjnXo z){g35Nd^>-Da}U;^)OXOldd*`3oEdxjmYj3A{AMq`5+N2{Z z>=L!-{BtP3ejB1SijUnuG7avL-@?$%;g{q1l5;EF!EmdHQdC=r7m z5_C_5wlpwZiWbwud6q8!&Rd1FtGt7w2t{P?03C4Yt6DrYlQ^ z3SAvgn=kmpCcr`^;Ebs*1a`&iTN495{Y#nV=c)eJ`2RWKU-@&IQjhfo=4f4+D6>R0 z$v=!5i&hE){cPp(ZCEGwYxNb6fFVWP&)|@@)DD?f-;o0RhXX5fbrWw#m#JHA6rmja$4F{)~}m7$p| zmE1hU%NT{FN)qpvq5SW+86^$PAPq2tJ%3(H{~0qpL#*$UBy>szwe)B(nHDFBG^Bv# z@oLWJTBLO09n2 zAJs}}-v0Uf|8+q01CJrLZ&yaej)a}C5R!#T5?gx@aT53X3;cTd{qua1`a>tOe?34Q zs!AcnHGa)q?|^bK`StjJ|34x8Fn-(5LK9QA+`*b!;|_jyy_bNMC%{!i?r`jhJrXGA zZI5d=%dV@Awr=7j{b$M9eC`|Uf3{Jff;k|(A`~tm=&JE`87*C~%==HyA@I(Hy7yf${|MCih9U{LftKA5h$ON)a zO7(+=4p^S&r?&Hdp2WX=_eVjMfo^Ua@;OE*Wy8+O z@SpR9zr6F$ACTVzx=(|{C(G%S;)*`XDX%t~Y*T;t2oc=03!s4vVR5iVn?M1DMDU&` zP*&$sUFq1^Gz>6$TdR8V6sT&X5=j2~oni`p(d(y8Itidl&WN;2zk{|g#F{_bxBv02 zfBqOyfh5c)2On9u7Zzv@)@QTWxb`lg73FF3;+r&-XPjMwdq>7_ZP|3x*ti7x!1c2hk^c z-eYqT0`0_e1o>J9#Q_WZRqXyK7-!pAFWLJ$^M)XRvk)~vrg}!1=Jt0u%x8qjZFskd z;`mjMqer}MEvz8+FyQCcq=PK-gsMNQD*tky{OzKRDKttwm6 zRGQL=Mc=L?;5%$`Poq}{mJjxF=00;8F+1YAY6wELc#E7{n?icqWf$7oD3`bU5(hM# z*D^I>F0Yo>@r5e(Sl?%<6S^YY>D>}V`NhZ@tI|EYOp(?N(r_9DS%igFTYY@KO~2H> z?7@udCubx?v+4H`n!EBpaJXI8_VucoluV_0SEM^zdBdH~_1YY?dc~=>wp%rW@N)N) zR`O}|W|E2YhW-IKlqXIHAd)A0(NK}q{vF`5BI@YTEN#V_^D)ERxL~GYUi~DG&vQUh zo8;W2^vUFhF8VVfUm#OQ1##KRH*;JH!Tii58ecUt_? z{|v{2mTUfKW0(OSQfM(3tuk_Pk?ZZn=&fs1$*udHX=UXT52EH0)Mjvn_nt&*!E5nI zW;MkFL1ibTJL=$ezcHg9@Q}l9mvvK5 zCDF{5dp8PQ?|rY+9?!=jYsXu&9XWTXAYxy)*-9r(FBM~XufHdcycn3^hc=1ExE96!U|M#%+cK$ zo%#Z62Sm;>i-{@(2V8fuW%5bI~Pb3U0{8O zldmZgXNH?3@JCPXF56ypN_Rk==vpJ?vss4|gxeoYvH#nbSZLc6<}o6;4yX183+erU!ZZLyUH+-p(0p1b_>B*wHlPA*k_D637IgI3~ zW(YEC1qWWCrIfhYIa5ZaK_h+Kg7zw*&|(aii(PFqo+$NIpR`jxL@6n# z(@O2#Jf{=ooT=iP6P>R)e0_3D<6P#_>79a~lQdT#mC}7aXL-mq>dGMlRf(`^A`Eo& zNJ<^z9oVRW+s5m~CMD2lBQAeAG^-l0g@P74zqk<2=Q=2=V!O0;6wK@vBP<^&kx6E) zk*^_Rw2^?NjgH_-XJ2!7hkf$+KpxEtDffr(As0{|7DBx$!{fPmw6eEh&XIYEK2jgKdb<$wMITCD0z&f(0f%@_Axe|Gt9mNlZ`eZb8N|+<@R)y9a?zjgMe`f z&JkQ=x5|n3M+n(XZ_Q%O*zQaZtUYLI&nHonbEOKmNXu*9EQ2j6Xd8xQNAF|(Z`Ge} z#!tO-n0=Y5$gGsnGKyXOD9qYfz(yG#JzjWyd%57woYcr2?R5di=9q$4^fsXRbqZcw zkJs!{+2l6_hlNXbU#D}HJMGq5ouP{ogVe5xm*ya#KjYpF;kC(Xl>mRHG>wU?HV4yP z^7BIV%(1(0+~!9DGyN5+;NgK?%IZC%RNgy2oa&o+_!bCJ@-k`boj}$a<&WncA6hPj zR}@u1#%HuxWScKK?|P_#*Xy#W8LPcTI8tFITmvmYDBbpJXb<`n`RBcoR5ZFGvyCLC z(O_3I)Sb^SH+cex3)=m@y=iKxi?+l=&1_f6lWkm$?2g#z=p*v7QbUA0Y0aGpHS{Nq zb>r#hCs`yk?5}K>gw~T}Soe_s*D##U5ayy(fmUvwi^jn1VpUL~NoY#xDlEPBenOO` zs0LLov5W0ty)$vEkKFB?i-7Z5e$VkR&X*kDYg#s04X>?$?sDBXy4GtxQK~1kQYpYFZb=Ae*AAH10tWy%l7nD%lFUqp|Avz}Epo_N^hap+Fv zT)x#xollriZMQtdgQYWq*(~a4NazolTuSHtRQ|3%JDfhe40s1yXKPPzc5)^1(WyHj zojpZd3G>Ar=Zm^jUdq#32U!1#b#juID2Fh)RL^2}zJYbw#%_khowUKV?cw7)W_4&P zr7jy3rqtu{8)HiNwWCgaK(|-UP|iD}docwVR&fN^Y~s<9yw?W!Dz>`Mzc)>AsyDEQ zWnZY`pA*!-XEQ`!+s?!HrWQML8SwzvC2mR53l|RgOtaHE4j*OSk9X=ggrmj6U6Ifk z(nCt(N#7_);0>qU0tnnT4pI)Maa6G$9uqyeSE}HLTZ8l&Ig*$nB~$?RrHb>?QaS#B z{TI+b9$mNa6$h+Cu&!!D)9qo)f^P2Nv5HM;PFzB+v@{1Yr;jN-t1skyOCcnl(57&8 z4H^WW;6VOcO#0W8D>|PWz*U)798R;1nG!h-hU4A|aogxxY@#{w*9Z<*AMG%X)`koU zDjDSFB5OZyIN1q+Y@ceNi{qlz)RdP~%Uc%7dTB@A2C~kIS#Hu|R45{yRQYPnkPAW? z64Hgle7q+HyG_6Z0Y9~8kS)50?h`4egRKQw=F3ZO)zFq0^Ge6t;d`V=J zfz!?sY!97^nKX_{8KO>pQ~?<6x-TBNqX|{pYqH*+`g$EzU;BGP z{dHJvqJhxOd_q95>gn8gnrO0lHMk;9@@rm9eS!4XDE5zNyP^K&hbkH!W7ESEaaaiv83KF7#L%A{G{)|sEkY3-)V0XNMnbB2UmIxTqx63!-D zjNo%dipm#m%Pq)1g4*F6rJO zz}`)N`rb~tFRq9=R-bNg9*3IV_nP|rWx)(@rnBrDP;4k?i{p~4wA@l3Z-aamGz9m2 zI0HQhiW`+yfN%8FXIDl$Dx3RyH#M&|n?~JfubM_nC->4Y-+0>P$dq`Jw|LAE1#0rl z5ZoA#XNygrET6};WQ%S`H}msgy8qk8`gxQ621|$PLkj@Q^*@irTFspx1jLBDr!$&7 zO7fcVVrwxgb1P*W@9Z5+R#JB^`{xy<+OL`(s%UmE`3gm&i@{Rs=_anfZG2QYJdOT@ zX$QE$6rwU8yw8x2fOY3*LQPPGM%hjLy_+Z_Tv+etdf8tvX{v(Hwj0UN-EYM5%OydT zzo!Z%}pX zyGYuKH(ddE92mkB)VVEy3Fgi3$#30k)2IhYu=YYP=#AmWM*#N(>j6qPNj=AoI*syq zi|lpknuoks`TmFOgMlN@FaAG*B-u3};B>5YSF!QDinF9uG;8|GY-(1a`cCqWTv_(;Y+Qwn)8wg!j1>nC zYUJ=XVf_Xxc&(}Rz20}g-N)}o={%bm@h|*?M$nf-&5a5`kVQObM!*AlFpXxsd7hx1 z{gJQ80=G$8Vzw;OVTfhiGb#UwN_8ry@>NrYc}$_Dc8`eBrt&5CN?jUe9wQc=cE%(p zFMxG6$Eh`Bk#zk1|GND?w{f7cLnA+mI@fn~F(wa=oWo7pIV|Z@#ZUu%3(O&O?VA?O zevXiqm=#~D@W%OE&<|_5cv6!f?{f``G+xpCAf3t6l;WBGZ`8iaLeqm$@fjg1fN>{A zsPQJI3s&kF-WojA#tOvu!kk^yujpu$Oy)md9gk-Xzi4y@ESitKX59nXI?-178J;ig zveAyKof-_x-|Ulz0q)C=T4RI_CsrQ~DXQ|oIC1$>+D)pe?%A~9mr|zjQaR?O{UID^ zi=|0#u8%ZdgOCdnwf3_~I~t(;oV>^cx=GO z6?^0|)T<*Td`RSU`5M1^4=Xmci*e5zutpx}jmJHB7Oq68owqYw`k3T9vs|V|Tr~U8 zE=f^)SK{UCBX++@yDB62jZqr{FH3dUutPeKCgk8|ZISwwJ`=^x& zC2uaOzPQUgUO<-xq>2;3Gf=iUT0EX_K9+6#A_ShI?#%YG(o&73SBYG{0dG2GxF8o@ zH_77iIh+?7_}ta+=e~ts&cmMqZl@Zj(~*4mu5zP|U>d94VG?ZDOp2wUkMqKbdM*aGMLWtYeX2d&My z4UTs%y!oVR1D&r?DvJxtJO=o2FY-#%PYetispJ=T2LVA- zd&o3`gSuKtr9IZ!^_iQLl_%#;(o(>g#!EML5{}I!XNUQi->O_l1}ajNvnM_*dN6O{ zThfZ!@@e=yyDPBmG9;scxgT{;iHYZE^kf{3v(GM^ik!Iyphg2 zEj42jGb|IP*39t(#E|-7UfV<^g+vgNAr8wHv8Cx*;}uQ-vWI8sGZ|QXHyo$eo%bL3 z7Q$(4>wX|d%vq{$7fYy`)2(?nb7sSCfu%W|JYAVNDp0fE1V|Ln0Hg_&d2{%tT~|jx z99bcFLf)j#NGXU1v#PrE3Y1N<%zmdEy#Xd#MH|JvQ$!`7Na_NxcgApWOxy%W=CS*pgjbR2$yyiNTM2L| zfq6tW;}`+N<_&vS2bl{#qE0t^cXb@|>jT*USF-b$WvnCIUZ+~Vt>prFfJcV$(bco@ zK$++in%q?R{4o@J#aqPSZf$mhG1=Ag@vQWXr9ng%fTp2bj3A#&x1@XWz2XmeI!v%( ziJMO$LwsIndTEug+;<^wb`QkMl>z}O5Jb(52vKXTssmm*or}=V+6PRrQ)3@icDT zN(5G2M{wVF*l|&}r!VbhamDX}`kp@Hsh;lrA{c$UB&1|Vvs>tJ!5!)aN{SE9OUIH? zF8CTAtwn5w;kfR-e2^W_E^*o40zd6|TaX+cX#x0hbug#`jQj13Yt&sW!o)gI^mkOq z#{^dDYf{IZ3+bFE2w#!8I)m*&BdsEZC+uRbYQoG- zdWK7>>8FGhIHIy-)|K-g`=Ac@rym zM1kHE!EM7@?r8-So0UtqxEI`jDRuyB4BN&w7z#lw4c&;zzabpwbkeLe&XA_^S))cORHMIF z11x6qY35CaK2rGd)icoUQiqRb*B#u0E-82%%E?ZBEfy)ULbpSMBC}-!0Wt4-M|pPM zqIrwshoMKmJ4;AcFM)dheb5mu9!T1WQ!;nN5$dU8#TNZJv=xO^ylwnM0Ih69sC$ly zpyX6|WH35L?EGf4?T{>O3&8=qhgFLRCe2iI)Ps-#xKpJCBnF$u=!qCV=hVl<}{ zgqiMIPGw?)i*;LWG+Yl$+X&+&#GjbnV`}$M^1jq0kPm9mSZSq_Wg7sozaTVpO1D ziI6jF0kdY(qPJMC>6Ty%vjCB92wW!T*@;VUIx1l!GLZfVq1wp!iJBtzW&jQoO#ra` zTLr)nO5JV8Yzxoof(_oejG9}40Yc%Hh(rD}iaFn6IB{B<_6aPZVoBE=;Fa1aad!nE4VD;8A53`123U^5Xv@2~<8CB|acb#-A`WSWv z)F9Z$pON|+-VB9At!^<=NmuMrpR|psXpxapH?ZE^>SXW|FqJB7UeRK>D7|>hCj4$%9#i@2`lsGU`M9)a1iMEZq zPdIAbm}(eOYLszESu^#3i2I(%>&d)Gd$DG@@bdq2t;N5<8=PdoS1C{UwqLHZSZ zDkQF}GVt=E9K2;u*u+}TInosLI1z4~ZAb_QAIcR3cJ9lPdq*ym!k9fTP1Z7t19k?k z<7*BTcH8D^!BGxS__h1@b4xWPU_l(y&0ObPf^YedQ-_Xu89CkhTnrIRrwwCV7uB^{ zsIFUL!1#BJai2N1$ggmlN-z`IO7UEIYsu2FZW^{US^PosN=T_B#63~=gH4wa+Y@n~ ztD#7iuc4i3Vmz+SQ&9l#DO=ehDzepf1(IP!EPD>+Au^sYhKbG#26y2x72hFW=qqtqu0~=!yt1qXb>Cu9N-*IVae`MOQm|fB`US`J20VZ^F;J_(H@R+^U z!`3!CDj}Sdu@an7O2t#h+BTFSmJU84m>73XI$?Wu_c_UZ=2fV5$mkkw3i4bmy<>#| z@<|a?SXZ9Iw=nag6@Rb}G3((Qlh9R$d|^{BmJ{d0WWdsj0J?wkTOB^vW;&}A(max6 z4DA6*Hl_+(05|VzM^Gx#8;(=OIrBo_lv@UO-WL@E#^pHt*1prE>E5|p=VuZ%jR=dT zWB4wkv|l8<2Ux{x*ew2~&&D8!B$kq;nR5*)W7=CA^jf%x319BLXPy&|!=0_he4_>F z=I@xE^aj3V5O|C-lg*DN)F z?N&>9Glw5_$CT11_p+LoH6D+1!W8bgQWsy_7S1-~vNUeUV5PPgjOBWRCyc!@?yky* zYtm09#Z&-}8W4w+=w;r^kMOCVQhgDRM^0<5En35PU40~2ijNP*2lZnF8I9(*AI11?KLi?hP7zW6zl!?1Y^ z-@oj~bRrE_c@w-dbgJKhn|LppKhGv+q(aEGgA(aaqSoSm@Gcm7+yV24zfeE;8xdo+ zWwSV{_tdKg-3Y~o)QTHS%t$?wD#%n1*l4Ci>y5GRKNf|x!rk>I&Pm64T1vN|ez(zuHfvOkkcG2c}(Pyc`B-ZwH7VEJI{v2Koq6D}$esgLw_dk^!et zY+oqF59FeuAboP_U6gNbiM%Ng;f5^@>zaGUKq2K;%gZeY&J;lhPbs^l+q2<1={ojN zG;ee@LN{<)XpJ1EM4x8H?H0aCu*gB_1SlZ z_q)8D*w1_Z4^{cjie^d7BcUA9_qw;>enE|#?STd z%4Pi&Vjq6%MP3vjFTJ@bJo0Kx9C47YMn7fm!Qd?U!d%S-J45OvKp_6Y*Uw+Xp2D@3 zfOPj;H<~&;miS@b_?U>LWB=n)=Y-P2 zl${Y4MDVLn;w?D68gFFi*h>;I{xUK&V`JPw8XoeOq^FJH3EB!xve(lV&ucVvmF(Iy zg*a4%&vf`OR9r5B2g)vq9`;GJ=okWW%yRPCgT)i+3c>XJ4to|T=s9?9@P>O&+LMMl zJ9pFZsoiP;iQQqtq#f15(<;Kix8IY*%D=6vX-ZG(%>*?jg#eejH0&qQe8R2`@Kf(`rt|r%KpIT-4DLvb+C;-mGM+st_(|5S*B2^dv3TqEGo?Yf%Eoa z`8s3o^Q1$)oE_nZvFX*plZESeo9fHTmKb@|>j-uik3d0Yt{W?0iqr(TEpKHmTv=_} z-F&JK6R)Kx z9MJ(qAs<(=@=8){(c(Eu6ELC0bhC`-uL0}BAzqREjpx%Wl^=`S_m%KQQ*g0_vEP=9 z?Znzbo_S@QvgUEHUgTp=7YH@jbD4<>Pz$l|dg2KCJB3E#vaV;gp**^lk)UKXVI zq619NuVRJ5ZWo(4f~uzNoVm&{+CTlAH^$@pJP&K5?xV_)aT49%G#NTQzk^Ke?f^VxufJ-nE{I!OAMs#FAWvKm#NYp(xis)TOd=43!~HTbx!l(EmP4kRecub#I=b(| z%PI1W-BqN}Gb5Yo3@xwPRa7}en%oIxaOd{XX5I8ier7@F1)q^K?)f__)x|JAbJ}D% zr*3SvridP0dV|)3oh8g#=7xAoKPFST1fu?4xkTf}FnO)PyA-pHjf9yGhp&|1C>-Wr zwCb>&97nWoryL$yXY13r?Gu=gtTzK5P}Es2Y9pK?wfpKzc=C#s&@bjjN7QMTdrhAp zmlzZanOkkEWAzNnR^NzmCEndpXyXNzyn3<{GGRz67umkbN4#>2(|}(j>kFaQn6Z zT-KMJXq+e9ak^KN3`rXJ(lW1m?5Eu+jDi=e(F+kb07mv)%~_U)klQX96{8C*R!0fn zqse;ISvL`)p-2c(|0^q8J0}|(Mp9cgmS6NniwX=@BkuO5itI8SP~kM^zQy0_Dr+Z| za*kR~U%`rY%S!%EzKFlz+xC?V$|?NH!wgJS)$$jAT$SX#Ez-cd_kkb(pnJMN4(MQqQ|Tto#zvyD%a+998N+v;Z>V0usOZ84^E_O5j1$ z4Sls5TP#hT7g7`-)sab2szJoBUMyEt_bQ4%liNbT^W{@hPBJsR6unT4wJbFvd70eMg6u?1dD;ymKaVph9ttx4mXs|`%z?yyz zz~iAfybR>M)o4;xtc~qLk^Ca?Uvo zk_ACQB^n96)l0|FHMj`>N;ed%W-Syx;B@o@T9{Ufor{s;;iCu09An zvhlI@JLXN?j)%=tu5oLgELcpZq}ms@F|<=%w?sl@G1=*n5;Qvu&@DBV^*n|YJ&&sjR52H6)`#zv z-DOCC5cv&7z;;|pWs0MFMU)Fu<9!QBNAklMi|=f((&x{%FXk+ZNQRsf_8IKWE^NfU z&nOnlCMot7vFZb_Y3M0iNb0Dhopl0LLX}%mvK~1{E#Y2h7}0-;b^jk(|-=XwRc&MJx^aJO=_?zx6I(eHw%y?;y`D{hi=nro2Vci+v8QNa_I(* z5qbMj-+K%cQZ;I*A)24Y$JldSo zFv}%X3dc|?c&xw@XEDsg&xLYt=jaz_=6&!o%ZWs*@*_dRaP zaJJXMd~LP@2TbxeF%aAOl7w)zG1W%igcuj6n=7LW_^P2-m-qM#jc$QHoj#Ti8Z_!< zb1q|`%!<+AXBV@Qg{wn|G4B`rrjsd%5v>TG7}wmB^kWp97S;pk7;xKb+Pp}da_3j}({rRZ35+ws zEL=8bQtLBwR&Vb1(G16kH(M-9j&RsV&O}PkcSr~-`Hgtcx{b11WLA~UM2f-F8<1Y} zRDIUF_c1GmD3@H9DT1p-H{q@_hhK1Ru!t@E#4}dLB426ojuQl-I&6;5gtb(^ZsoZH z)WSm;ez)kcu)=CTGgzIKDk58Q+=6mN>5V(&VUpf9hQ=XgLIpBq&ZpIZkPOE>Dd!1? zeAae+208xB2~W^hYDef1MWD$H&lBM5C3?U!>l5%=`DaEut&V}nJ#TB$Ue)HtS2n|2 ztGnVlqi>IWamZ+|$khyxhUoV)IaePRfE86(qn5ZxRjKB<%hv5A zip={sl)n7z29bh$XhB%*{^@bI?pH{{$JKD!lH^wu7-5RmU&E95J>M=xClP?fAcZY5 zJ+*jU&l8fI_DglnTjigd8ZbToYeFL9sfmiG(-O{0ajU5YAaL?*&rt%~gMI#y;ke;>G zWRJR^a=UbB-;HV|trc5YT9>Q#68Ufp-E+ofn(8lh9e78yqu(3jsP42NY#?Fk5fn4q z;6!ax_}5JQ=luz4n}q9{Um35`37ye& zE1E$nhm2pld0+MzpuBW`56fxC_M|v<_#<_c9`81MfiAjBc$P=;7}GPxw^z8CoV04^ zn-bk1iwBs*$ZDwn8%L0hV(}d*Z(}?miUTxp!VTefGfpRc4RdCc?$t-t;E z$L@~TowEGf7LuzL&S{B-kN?<0(N4iaU!(JT$tqnQdh!ybiG4TqTN}HLv2aLtOg z(My`$=YeGU7$NL-5?NW2O3?A{N=O}rF*6vQ^3@9(3uY9*aSh27vKC+mLveZ1Wdd)S z4E+*&i@x=AGH@NL9JUhKx}^3hEnc7`BWh+)7(c%+nvj&Xm>u&0Ti|tAe2^F^dmi-d z=Pb!GP?-#0nLkZw-%d!_plK#VZ$*78o6`uMi=(rW z)B2=VcJ4LlF$x35aR-xaVS50%={`^R4It;>Oh|*Kl;n`XLxmC}w2V8USC_fEd0UEb zEzNuZ9=CBiuxuwJLD+_Tcmvt@Tx=oNF+^X!yUr60fmc)1QR~(s!EmY z!U~jK%_F&0nv zrWIvDUyqH;3K=N@Mv0h0+VJO>jw~uly$B`opAm4=MuL2;^((`ZQhA~zYaySr01phL=2!;calyZNNt&uU0%e`KJ>ZxEn`RVWOp{I zM7l)tSUrWXgsm;fS^z$Wch2vo#}1~C9thvibz^uM1&pZop492!H42wMv83XsOS1*y zvMAA9y1fkYcrE5vk;@c#0p`8Q4XNyKdRcB8P)c|SmTy9a-HT%Mk6bi5Hs3p0rAqKMCfx1jG!8H&rb>&EFXKJ z=u&l!pal-SMR(Tb#!_=!g9!?05NYAV0)_LEavHs;68lJB8&X9>ssOtNZZPJ>J^j*N?*I&f0WV{XhW*^P4uo8Hx#J6#1r-LyMCdXUQ zHr3JS!}g)~WD7#7!cND^62nHnYBR;qGn?_Kb*Np?3&N{cAU%kGLY%oYmEHxRA!qv4|ys-9!b)*cI z{7IaJ?9tuY4{>@1_g)Rt!0uBajLa@15ev>N{Cp%O9t(aVvG*Y{``^iL;ci9F4@8Bz zYP`iP;sy($s_|BSrd}tCh#YbfmeSuBgV!W1BabHMh^)s#@kmWEbYcqXe26Mm!CL`X zQu`ZxC=L?s%SFB|;(VHhC|#~p_lQr;1ihXXbHA*!$qjLBbGi^~VbJ)*cidNNEc>qG zc!N0ceQi=mM(~b-1VjLAcpSu41q5?aHZn7EhXaK#GtB_gdJl^0UUW(ILJ&YF>ac5J zYD$DxeZ6GM#1L#dOLarPc^La~>aOwoBZc1g)@@7}mvCRF?m|pd5I+hE5RJ7)5`r3r zxiI-QigTkf(Wwn{Kif$q+@|e5?rwti8groJ!ogSv0zbw%g}f&2#aZA3@6e*MA)_Le zrdXt+*k-LZONT|vHK!ZuBw`CC7oFTlkvd(5DFH^Q5C z^e14K6a((xNF(nmH_*^VYSY+(yIGePo&}Ecg5(|O(1tK0i?#*uPI)>vR#z*SM-E|D&0T0BK68hoiHILA!XQj#MGfC6Bp;aLq zv|Gm$FGRwE!n;^Dtu?6IKF>^7fz=()VLGuY5TB8Zx`W^&&OH5)qqlW82d6w_2pM8} z+(Z+jHBDoFcbGIg>UPY=n@>Iz9dIQn+ff#LDi?1)!mM@v-W`dtmR}i>L;5HY7SM86 zx-~rXv}-;TED67UtCGRe8B;!XCv=yhdOro3Yx`*6`GMD>;z!O-wh2s*rrIMbaGam7 z>>?)?jSW4PV65Y_aOYL{}5e)1<@u)rW<>&+*HLAYo0zx-j= z3&@-t-ygKY0JsI%QV(6nEuYH9(ZaM1aLP-&Y@Ir-gX;r!m5g)(H}v*EY8=2-nXu_6 zm-ZK>&G?~zCJNtrOoql}| zgMkQz!esFrh9%>6oM% z{}zYf7$b8@hGTtw7oNL69$fOIMr`h(4xJ~fsqAcg0>_v>Rvq8l1YxF0G=7^WgXtJNv1_l_6s zoF&uyutT@|Oal2l=4*Y1FwmnX5~LV4;Mswdv%g%S_fTuJ4x|tbcc~vhC9-_U#2AIe zn>X_13xtVO182U*l)u(sq&w$iR$K(9FRMS>KbGgO9`P%bln9)_8M1<=yv#$Ay=J*= z72_WiZk7_lyqNT8#q{cc!s8IKsyL5P1v5YDF|>$-5Z%5}#8Xfz9bd3k?^BQhxb0Vo zxTY&8D&m6IkO1rIEL+DR29~&u6Dv<48O2&u=j)Gc<%(zeNG+z=#zyAy0)^`pZiQ{E zf`u6@3|GV$7bOi@9c>8>>CPS?5PL~T8d}o~QlWH7`lad*sBpn&VjNroalg~`8tgW; z^9KleG-s)#-1ur*Twy#`J1^$yu#|*TITgh1Qwyb)X?W^@gd+IUnVdn{JjA6b(S%(_ zA)#2#-x@8qcnPgh(LZwJ(H`D&s186bC%-@9kuhVdj>V*G-2tXV4elbx`%jHrw`wysT1&l`n6(w@h0M&AuF^#}^!`{v-c$XFv zSvW`O6HBsfZyPVC?;kT3Vd=Qu?#VmR@*=YqAKN|3qH1>U?~IS&Ny)UkF${u3BK<08 zFFXsmFW4fmA<6z~0Eq)4UOl!Ur~Co+y+f0gWeg53zMcETY7Qec!TC6WlBJzE717vG z<^aZKMac!~aX?_b!iS*ZncDX)R+fR6;hbQ zPrhG{8%XQDP(Q6SDR25&^x_#2oCsBDQm~_p8f=3?sawCBZsZr^;^;^DdN zX0a@9>$RmmhKvx@o@IH7=zn}Rm#zeQi(3H{k3@=UzGE8O^r;yeW0D*Kr~0Uap@S<|EndnMw(NgR9~*Pqlc&xJssOD6;Bja(Q;yzu*3Z4xx0&zBbstgx=fUlP*OE;3-0ihQNTRVOlAeHQn&4i#pp#OVM#5%~@w zHKyx{BdDQXt`s76@8hXaKy-jOy`S!a8Tj$sPYNpoX{BuoV=Fh$DZ7H=DGsrX1hZqI z1`jKXL^kDD`NPT!3|EGZDYXgx^0BgJdLE+^PJ1Pi)@KPwbvH{$H^B!?#K< zARv{D8vD1ZgBQFp3gctJZ^#!Jb{85FdLURsOJpZg7Zi?OhiCHQI*!Hjq3f?P#=(uO zxU1_dr}r$c`$ritX0r@nk4ApeBmh zd4hZ%H%hN^$WJKYiegSEk>eQ50GW03^tW4cOMHs{LontYL@#9Dr{=HEn9clz&zu^C z8S-GU6pKU{huw)aq{blj>{Vo83?K1n0}fGwPtQ~evE(VM-{Dwy&re*x23eD0IF|{< z&V-mVB4B6VGn&m13}k!EV3Wl)QWq548|dbNKh1FJMC&F^|45js43d;AJ)1NiTGSFj z6A>yVP=K+Sp)pn!cdnrk{%tW{jI=BpP&TNLz;%wW7dS{6Nv249z(qVoE%+WAs%-<` zD}y^@h(uq_3K}4Jp0qgRRRe5v-wZ{|u!)qPhtx$%)<)t=)lQl+>Spt1YWi2^Uq*s`q_wVeIrqyt;!n{bZuDzDkof8$Zb)*sbwnZBwxHBXi)53(c z$k#WaeK^I{HsA02U1I-~;cOgzLSjhar}+wU2FXaU|AT<__U6Wt&p8DoCuZCo)ko;~ zZN1jQFStc*hBG+R{lc(4UP-G;i42do1Ya%SkwV~3nzgTItizWK_(yrHr|Xo1v=W>Q zw!x`9)nZAljuXXk5o}T4zv!#TP za$oFP*6BrQ^sqvZum%Q%v$gL=e)rv68ts>eAy$`}y`tiL6b6J^_v7w%)MZ7q2$SmD zJB=pGLKoL z;&jbwN&uPriu~zY$&?gBJdl+Ap(9=Klo+-am4BA7Sn0~TRkm+i)VNfTRj?1E5qNkB zx~hQRqS3iSb;<sZty>TV_y^jzOl z{w_+N?MFS8u!%|<7=V%~D8R@pyLOi~!FUS9(i!=B`xW6a3|Snnt^QLXy=DiK`6gE+ zv|2gsx|mayl`55Fewd?Qk|FSQkNvW+DS?9uia*IV?ZN?D2j z2afg5O5zne-pV2afYgYw^a|~y^38`80ml@JAPVLP2RkRH!ghtoU;OYhw|;mI*~7nL zAV{ny(EGuc5gPF_mVtS!oCN*GJ0-^&@ilNGEzFLh?db<@Gt{9FV-QB9O1_r7MBB?D zL+KX=YqEi%hefF>E73<#fjCX+7!T_JoH!HKVsQ3B_=vPOKS8x7P`SIT*Dz&j%`|m6f0Z|F`TCd+ z1OUL$S8#3{-fM3^a0R23J_)i$sR zTSo=*nh)nei}s|2h&iPfAZZSSnSh@y{0?fmY7rSM(X6mdm_V}x68Bz&rQnEH6Q>WZ z;jmaPB6pswOB&JpPfHi0cX!A}gqdjen1YNQV>NO^X+B1Z)0tU*kte#KUkIDxHU%;a|RE0 zLK6y*4}}Xk=64oOJGWh*E2&C5g`9_gK{Ob0QgnVdb&Avb%_}hbwFU1FjVM{XK#Ru4IbED0GO(Mp)P_ZPA2&DHHD(EAU(m{^RZc@6O6tqLiQv$R1 zs8j?v0>Ur*vqpjgKWLq*oUZd)cREv~h+-5ZfJ`DNlwfJ~j_;ZSEJz=94U0Ra75OKB zLvzQZd!#c_NoZq^@FP#2ibK`eZ1}dWtN3M_u{wA91Ol zFW6FjKPGGIy|Qn=Xq~X}lHmg?Khj~9X$qv`*0jeDzGt_-&nT=cvDuXtgDI6casgg@Z(Q6iE>URe|`P+7})4&A}qtn@ErW6YOkG|VhE)p?3`#*c_B3s=*N4&s{w z;9JIJ2EZ`<`}nZOxaxt5@T^^$YtA+gVuW_)NAwawX&DzI36W2G=9>0m)~IQVAM~#& zsmbIS_vO30?0Gv6uOhA*$LkC%kf8AYf@@FYtWS}_S>`nv05sFZcevv4enod2(-e7s zOMxo1q0UNsoBH{7_8xP{Kj9WwDF($?#A$T0x$H|oDjvb~c)1%(AxV-Zjt9jO#%TU0 zX&-pp?B@^F^v;1ZPR9B%!tk!kgWnlORYay2H)B}Y9F34^?C_lFPG1pQI}E_L>Wal| z`X?=E{RbpSIgtYG36?rsAw9)QqCw@bwEXLLsZO7zNb@n3n>xjd^Bnd$X);N5xdA`j z2jFRs1z*<92oDhdU|%;lQO(1T2&HDTpwn~FyE(EaZ3X(OMGC7*P%Lex>jTA;a8LW^ zxT`xbKoh|z_)-a!bQZF9`EA(h6k`{gKOg#e;-$#&7}#NI$9tS_!Gk}-~9 zjFYYvd@<*$Ma**Z^CKk~oc1@}YJkU}?fYM1p%e_e-;Tp+@m<{s(Jmo+6@!*nG_r$b5r^Aqj3mBLZ(ah;Z)9pDj{ z1>ZU1QE`RjSJwL2Rhh~`=fqBr*snu!5NCPCkxz*;@;6+&TCiu$DFI|f8Dk7pjMM! zMGg72%)dW)#Z0cs_I?V)qhBS_v*NZmF1utOhr_m^Ui?mDbkPOX7rhsEswEaQTFYTO zZ3xMwv>6k(&*Dn=zBHQ519C-5130QHxZnNY#QY=i*3zrwK8MIjmuG;siVmORR)f#_ z+}mrwTDwC>8;%?XO#%O}1)ruEYCN0M8NSYnRm;@A)=Y{!9<6_EX6WHdVYD~b=5tc| zcCz%aH-svedQ2!ljk5|GZ_DDJwFi@>qm-XiF6vTdR#apSs=BWVOc^l}QIe=J zQXuPHsqc+u7o4gAYlYw)okeGk1QB~XTBU=w#~mz>5?$Ptn0oTmML+hLFPx6KPmX=3 z%*(0-R){zDu^idCB*lLhF#h>o!Y0Id@CW%HQ>V_SP?6|VYj2?EwRu+OX4$V$1 zg7g^ukHzlFs=w80Cf-rg>x4WE*JEh=WUJ=k$bVLSjyaAX^^@NUo-DutffQN=S*R_s zxo)GU$_BZ4Y+vP2$BPev@YEPZnjLbTKGxD^w>{?dkCdwRY!2ws&SbMW3l-&Vl_PBL zJP32~&}1@M^=*ook&z3NNG^iiupR7v0Y>EHArFhiwO77KDa%>)=(YuN!>=FY{lK|2 zAscDIo0(O2^{)0&(9d)Gmxt8;9<10apL)C#IVk@$DAA=G4htS@VPD2FHeIj{o?pfY z5fx`*AM@@#_=HjOyiX6yn0S$})GjNu+iv`6oGRjuscS?TYN^+Kcz$J81coR0S3+s4 z`cVIvIBdv;9N|mqzz-;0RL?yz>BFvTNy`qr-uw6<#fC|Y06Lfa!Li7wj7!co^Q^VN z1_Y%)x8NEmK=XB%6X8Ae2PqMq9Iy1L3088XJD2DDqb_z^hg>8l&%nmQuB;F+8p4ub z!*WoNt9!vnX~z0PztPYDH-$47O?gqPl8`64u1y0W3tK7MsTE#lQvBJjC=nUJ*RdVb z2E@y)w7kY=V($y7DCf|ZmyE?}G#oy|s(U?(GZ*)8Y8cUWgd|pJ4k9pL#Buti=a7dE zwuB__Q`Vb&+vgjR92$fMpd3g6wkpmrF=NC!}S${Ha&@g^sZECn$B)nob7 z4?y0gG}x;MW?RWY!htDG@@gK2A((Z7OS#m0;o1$UeBKjrq@#w+Mk;m%>Y)4djYHTE zFYDP>&G##2C2Z`>|4gsq!#L zn-6~qUVx*H64z6b%H)zFUEe@5w^E1wC)UHYgEc-ny5JkAZleCwM?LHgNvNBzfSE!p z`(!G;J%I-1MGSJkR~wr9bC<4mGqP=e;|pI6(z2*$7fYDdEz;G8(WKK{KnUcr-d z;(Fa<{QvfA5eESdCn}21&ug&Y=_x`xg^801a+tk=T^9}y~ zG4C3~lGHN#!x5Z5{4yv1v~~CoZHq;`OWgc0Zt04wP(;1?HHLNsn@%A<^^N9z*#iG{ zcm~M_YLd<;X@c8VA#DtOWK|YlUYA->*4i}^R}N6runq|xHfOS|AE~~tyG}d7oRx+HYn4-?#FiQJdJ_D!x?VLZ^AIbZ zJ^V6?U%C39pWLDb6x@H}*Z=ffK>s$W?JDyQh`;!CYOHR=^T|!ngbnNs zim<_wd-BWQW6h6WQ2&59&Vk}k_fP?c`f$R5tMoYL(BIK*&E@Uqza1j~8wM~vglh_{ zzSG>x;{YxLF6{FQDNTa?z`my&u#s#GDMq<09>#=P$~k3&x>p939adCo#*aeG`GNeU z+eLrxru_CW;2DX3n|&EaywmZaEHED)@p*hrVg4Ie2qo49-wv7??l;4!zAMqIZF&o;69@x5aKr>L;@ zH;^8GQg~kQ6o=CM(UF<``5{39g!SnmUf)Gl{t$UsoRp5uY-zE=Z9a~Y(pedg^YEw| z3|8|baQ=WY`{rTei}!VBB~hx*QVwqV$afdKLe%`vNp{MN>CI3*9xv-MVchpwrZLt zzU_+9Ncm-U7$7j#_h`T%mSXh0aVZg9R^_|8Oaj}+LCvgL^kL=EQmRzA6KiqPc)hn@ zr`OuRxE?$LvQ0HYqsP+FPC_O6xR6R%0kLmBpNYJ|*CHG9bJ*%X67xsmd&7@1CgQlU#ON-l0@T`#R^Xwgv5Rl~uLv%=T1N z!H~>FSB50TR!HhxCUr=Il0G3N6!+!Z;*XN+vhlqKA^dF03u+?rh{#N)C5Ubyq_tjy*S|lFc7KSm zd(yr;xj-V{U+vS{#hYc;*N~0-F+}ueI8LE>=Ac1m~@~M&BgOJ#(Pm zt0dgN$eQZ2>HQE@L+M^9>f?h$V*L>F)baxD(~9H8)!<08$Nm?}mv=S9;kVCXy>%R7 zE>@hmen+p=k2zopu*@45gc?S%)ZeHpA$m&5zbJ=)PAB7jolG56hlty!Vu$HN@7sY=k}^ovn0Rtw8Q_FC9t7Z=s#36H8UD zPrD7s4V-0N7qD~Y-`GQ?3f*)?MQMaSJnr$@xScTso_eYTUKUb(VH9~Y*wd=6yqe4wm13z{cFHoaKWXOG>0>&*Ep+I|B?bv}q{U(82Yd~U_*GUD;-cV1x;+W>! zJ$MmFJJ}9jYusiyd`!E@Onk+ynO0+ypd0fH*$O;jpSTv>yD)j70*=JHXo^*6 zyVyCG`zFOeu4BUGlNs?uI8V=debte*h9_@QbEjD4C3op?thN zgy%pR?sCNa7Z-oT@_#H3iq1nZBI(bV*4U-rO*&1=FcgTJW#%YEiwyI!-ldFK#3tv1 z0)pUq`}VLRC*;kHT+T%y_@iSb;*Z=mBoZvuvSgj=Ufm}1hNPv>JpXwq`+HMtQGI{U$&5Ji0}IQx;(1L-9Kr^zS-1{4S0N#A_t*JkGA!;BqE~1n4_~O$&=-ay zNzjOPg;)a&E_Sa?#Hae|XSR1jEvw;wkF_TdP*#kQJzrH;ZQGWVAT<(U+RpX5ot7DF zjdovp?6!xBrm98e4y<47I7*ufi{S5&>tDkFcsep&c2 z>hNsB#ra}b>mupyER#ohrHkVGJMbPU6$>!5u=nhBHpU(7Wfq|4oSDROdx{`;??>@n zl)rERDisJ^YW9xT@9o7G+qJnOwO;G8BB`#>={rT%Qy*;0@gg8@YHIIzP<- zkr%Z>_f3EI`to}yQiOq?;hcx_9AimZPCaLQA}qljS6MSwrhae8$9^tfj$kejTDAJ1 zjyLvtF@S!fTuWW79^Z_RFt3gql8BVxwvO7f;a`p`zpKq>q3M_=?dT%0do{6oSVa6e zLoF%#`Wg2Y+0ca5wWQs7hhDnM3#AL)|>qx@s#QpNZ{_hJZNwuEE&ycsD}-=0n-)zL`MEUQ{#^T6uGE34IGfqJt~F z`6sPW`sNdr;(6$)jOc?~gs}z-FnD!7Dw3DJ%uM}*#ea12A>!rSR|)}jbG(V5s$y;E zt~p~mzpH_r^O{{7LTlMBO=fi>Z8|p@*4U2FN=Y4k6uStd03b9*LB)|?wyI7sH9>r{ zWbB;cXyT#mZii@Upr+=&PoK9@{REfJQ=^<;f40Ea=&C>>LWzyEfXc&AySTxw#7LQ&BjVSCH?_s# zy>p}3o(7`sRUt^_Ehij9T6L8aZbv)oTamiX+A=iTyToQm_?K~bxKP@SIgt#GdWDK{ znPdnYMaBS6*w!n%ONHc|?DTyhsBUBk1)V)QNg+Gp;_iicrE(eeBvK?>1Jw(ZP}4x- z1S5Wdf=frCd$}gSs9>bP<~{{EP&AuJzXQ!h2e4v=e2l}h3L{H@EqZ0w1^4rCL{Jui zs1P#tq7yE(9}1iDzJ6xhYcz?ipz=m#^AS|{(zJ2V-Nm7IPZ191JDw{zS(kEh&3!=+ z&>44)ovGl@{@_sEy9OxN+tw2pAQOHI{lek7(dtlN{7g7QU_-WD5M5eQLCloi1!$vy ztbH@bF#ySRWLgv294`?_l`C{1RL6{K1k z-QiNt9BcV0kGI&|{N^k|v`$v#Gj*X=3wv_?c_lox9ITpMrGElHF3@$%%nC8Mb`6EB zNXl5=xhvhLGcqY5Z-T>VQPgKI0BZhR>yxAr@n@K&TN9|voCpIxs0LcR_PW>lH*J(C zL?%7%+tV@%N@rMXZU`vY?Sn6S_T|3?NaZ1P;fkpij1+q;9PI60hTM2%jm3<%)i7rQ zzH?y~H^1PPb>qP)CMmo`XaO>x7~afP3K0aYc%ZlGdUuwOkqQ~`$WQRBJxIJ-)8*^Z z$C}a(G^jkjp1&JydwkB_6sl1+~nECU7d{jZG!C5j!HY);71S+c_QZoo}Yaet;0`S`n;SKGsI>{QTcj`fm#URiGmyDKs~j4v_c<*8ElP zUu^NL!a)Zsl?Syp|MKFWhE~XofY7DXI+OKVd__jjQiF5q{|4~S-2Oif{1epv(k}l8 zp?_4ge+n%eLe#_C-2`uS+Z@vmKOG$St1grsRURr>0VD86oJ~`$rsXA`?9ydUtg6S^`jwmFFG&551l5i}xm`m%;FJSo3JIaa zHMnk`pa%FP8aSLB(&yf6n6|MF=@lUv9-l9e(YPJ2>N;;3oE`4a#SVG6IM_KKls1_t z>_a!!!*7k9|ERXX5eQN_wxg7_DF{s=h}&dybQz4ZT(P~}bu?F#IEz@yhn`R4|6?og zFEwD^c3Zcf^vLH(0u$lDa-J(jxD-bTxq{+!ScyUtCd}ZxEAk+@(%O-L5P# znAs0VHTpe_1m@k&;#)J;sWbJ}>MBD(HP>*0ods3&-vB^EEDkzqi6QaTAi%YRb5v%Q z%EUzA=+%FwABjLR8lv~Tk#9oa9{IQDqeVgeL^2Ud2h80GzeBYM{%BuZNUH2k|WFAJOKxiRJ$u z@7~?=E~m}tNZ$(Rw&fX9z6Q<)u2Hr{qzehH97b@1HonxZbZ(7ONtkHsryCln|DISx zs=-yKElL6O+G<==a`&opoi?s?<6j2&Pt5ptbx{}&H@70?LJ^4~ItFhHRB&Dy@`kmoNpzqVw4V_A{8LD%#Ju1Jfa_C`h;L58S+T%X_Orb^DezQ6L*Eoj$NH%b2UsP2@n#^%MO( zTd@jp43~K2=}7;bUH{#a2HA(m0B%~ofZqZkT>9R^Sx&GUql)o!Uw`VwkFNqHlC2f; z*fhf%dV(T`ZWUNo*@4L+-W(%*^%<;JxmFTfnFacZ9>1XJZ`p3>ymge$aJCd7XjWaC!|e-Ltp+wmr4bH_ zs+rY{>q4`*^P?&l&c1cdxV_8}`K2$PQNrbm)CuZ24R>2?&!PhiNdEZQ%Q%5!gF;qk zSUp1osB$7><@rwoW37P8#Iwu0YCk{zn@ayyz@>$2=P$aOzcTTc!T!aq~3Qv`D5VG((~c|b{oq^>@tpKDqXhjY;G!U4nH#1(-}VVDecO%U;bCG{^+phFnN1d z<{uv#g$gutt?irPH%$H{dD;|k{g>xm#q`tDU;6gf=p!|>vxcMY`+po1E8;I*8A}gr z%or{~+;9S|$Po~3OTAyYM-$PNU239nSpQ-rF3_!P;fxe5>c0HVdYuC8lk{U(M^n1o zAg#6VQja=!v!cWXXzA&2Ud(@@TP~)j5Lxx0v6KCvIZEX#Q^v!zn&JE~bzIvb7h@Oa ztlf+3oIT9D_bLTP7f{ePwS^L(iwammw`L*2oP@GSDP7||^Le94H$M)LG9pdsG z2-%Zy92sG%jMA9}E|#Vyrb+f5`TpJ83)jTy2~8(y1B?A>n`iCSb3+g%uF55qjpj0c z%9*&G#l%?;iE7cEoasq*PO^mR1&SfXI5{N%-q6X3dx_lakgF+|)m2x2@bvIZEQgdsR|LiS!*&$42%oys&bWWKq7{jY>kI7bl2M*Z zJi43}x(SD%2ITEs<}0TM&`(irK8Zw2C*G~Dvz>|HXV56wTcg?mWfOkdw^Z_KnGUt&P(}wSHPMAbgsT>}jgD{JgVt&u z3ent!=40*0_9Tf-WydI)yM@vc#RARGo~S3Gq3y;)Uc;x9E3%8hTCzFchN|5(mTFCG zPesP~GD`=Y(Vd;yg3OC2ZaGzN2c4KA2J^qU&=q7(?tT);))MQfjA4mB!f{Nm6?&q4 z-agAwni#=ep1_{nx4|I6)cdwNQoYSyk=Edeu=R|_gF0LmMkzI_xZT+W(ZMGtnsLsm zGSYoJ?pr0Eke2=V=C|8Y&oZ~oG8*ebc(E)*xv3s3Mu1YWPUJwxGlHVE@L4IRoJyDP zXRV-(ih`|&;_1vUQP-xp8}x^O6*?m=nn{}20E5H8N8p$Dyf`(-3AAa+ zN8yzrEv|8fH7I660?pr+2s)etcj&Vd-j#Pea%Wqyt+Y2>LoxJzM#vj(xpO5s$$P4u zF8#u*h5dCBuaCUEV;4;bumHGCn~c$nUU3KYTvSgD404B|+4@*8Y+}oume=B{KDy#L|3ql7WTu1W z^Sg@WX=*1|4xLbl!Cmn7*^d3Isk5aK%GqLbt+|<#kYNr?nh#6s#yjFsR<357=CeGq zRNNE8&Z;ZU6~jsHB_~zwh{x7kPbY>f$FIH|rYsdmTS#ruo)}%Edf3ZUQ9)C9-I}_L za@f~u&HX#3j@^pn3V>s=3{!{4<8_D2;@ee2|Bt=*3~OrJ+J+TTY>24zxMZ_xJbw$;Fj~ zl{rVf$34be>s6KM@@e$Cr$V~ZBtBMDvaur09_pXa0Zm|UdYE2s=X0{5n2AVj%>Glp z`e*F?ACUk(74?v)B}(-Ce%o6kH>iE(WUAN2x!I%(wZ)**Fend3`fyA&-Q?S}pEBf* zl&LnzlZ&fXfx~o^(D*6`a2EIZX56>0t!$d)@t!I!POeBdkOzmSB88j;CV;4Ca{eCYq=ULzl@ zIFe1~#W4$G-Bw0iyKpAi-)29HvGX-F<9YjK(>%Kh^u|`>ta)zSK+F(+*1!1>@OVdO zgYxQ&S|H`(*q!bgY{qS0q@te~&w-JS6l75v3UW?L9 z5{~Ax!NJxsewJHNqPnZ8!#47P`Vu@!F+;?!T$YHzCL7Vhas|}xz1UdCe~7I?=lMaWWHB`!P66B(&1X_kJ;KN$70 zVvKmyn7Apm_=GMTGzOATd0ZSTcC!9DUe#i#Dayl3^`i9~6>Mk2tu72^7NrTk z8`y4;8^wlt7+?%W?8qNRvZYRGYNZ zv2p@3fP=J+xKt^a4ih@h9yD4^2?f<)8hZ}H2PO|w&Yo%Vn38s{-oV zFt!l*CK))pJMxjBP$-I&!1=85K~n5m317T#j{7|B65-G;b)26w;R3NwF{BTaK&q1i ziJS6=`yLMmQf(j!r=ODKCn|> z^OncE3D0!-1;WuxH(bZ%Pn5NI*|3f?FOw9yl)H)>%6vN#P4e>qzU-^7(|A~W#DE!C zR_-^uwfp38*q;ej@&#FZgf)kjzVghZ+ucPu#LoW4ESvhN*nM8a1J3~bQBjM1pfvqS zndpnNNltTEq=&^}l?U;Kf$l2XgrC_l9Vu}bNOK(c@%N+S9ygxeLKN3pt(2>_tji&Y zW4g6>OcEA(E&vactWcXRE6`sBCx32j!G^|WuPZ`c`8=6oT(CW9fPcZNNwpU&vvd%$ zm&4yCA3Ib$e2duKNqmS|w#v{_GUqDL6;QcZR25+qj^6zvQCo(0@pYqX&bqBSNpTBj zu|)KO>k9mrs^k_9n~PCwuge&zl*M6uF?`dEZk6>4fa&D+h`&>q!M{3Nj*ou}UY9Q0 zkV4P32CGsj6VPM`ScFZ}8BC;N+kuBAzPdQfaPX*oh3M07^`tCj{p0?`%?Yju5Ze%YVwBj0nyjH9Np(Lhl|%xPWl9kB6qzXqLmTf>zV-G-sqVQ!AZe9Ssgduk3l zTT`}?(|y~^%>lZq4!D_G69t=k(~Helin$!fYOl`y2fnvFMX|bMK?j%Q-ip^{2vx-u zT%t&4F+ZotgnwQm!*S1$R@dvXwWPnk-izvmss1RajMf8*O@eM?3l=k+C&o|;q?u$R zEBq%qsIQiTAS8{RLk47*NuYMAB@EW_2Aa(6M%75c-bS*4Y)VrP0&kM_Zqf^;VEu!) z75(1lduyEck!~Qw?Ul`vN??o2{qLJbT&X?0946d{0|QYJ7A-0~(=JgIgh;AA7ZMe3 z0au*@zI&IUt>G#5UTzbYgSSMcq-*OnuCNzfI44Af)>_Jf#2s26(JouJdtcwxl=zr4 z@?Yw(g-K+r;TZfsONbTNmIiC!<}#u?vGvL${1k058Fgs$F7Mt>c>@vg(uS&rqlmNC zkg@)qSO4;hdSDGfJnE45>2G=6*Z_odlrs$#Hp-jtY6@Z!ET zxy7vaigA3YHR!3KS6Ur$ky>=b^fPYXjuhS1(`Y8oX;)NoR6pspjf}@*u>|);IlPte zn&Hj;f-{>Lw-A~cjP=!Sfld=@ycn&E>X$mFs6%v-Rb_#tfuwHYo)087nZ?$-xpK#h z&;^GV7(DZbJ+0-tU5=yYkNzZ{T#g4UXo9O6Za0Yp&G? zH2XZlP=u?HA*9dCg|AvQDu?FXqfQdjZWyD^Ea_lnIpR3^EKtuqy{L`PZu@PN8c!Xz z)2ncLK}uk+8@?XkHK11%<6I`%H}2|x->mQ3hZea-^WRHP(HjS z{SE6%({KAxn@KB905%&pj5CL$tGzuStj4qqiuxkYUm+mIedws~zmnygRbxAUfL_ZmI0y50_yQNbcG4bX zQ@`#L@Fk(=AO>`%?4WzlOH>7(w{$oZYu?xndq@z6GO}5|$Beyq3cFqjahHzUaLVuR zHzo;=^k?HauA)LboG}vDSGJ#xK6>-sDIdPDm};b@Cb&Y3@6j*RqMS22C)~Ta-!)!_ zT&p6c^5{)-*lpG)xQ{TTCk`B9!OdD2M}Sa5PWdnCtN$$qPgnGYfRLv27aDuIXhT7l z#~Y9HYlNP)!Y-ODsGwF1Y4Z>AlTRf~rFOzOdf|v(nC+tb^S9v1ReLEhxKo5hW%fqw z(As*9|4NAJcyZYVoi5Cg1?3lH?T5Pq_V41GgiH*!#IwWst0-+ozCJB!{b($5iH<7I z|56Y%z({9q`utk{+r^UcM}h7_fi~~&A&0zt%Ek!%UA}4~lYTgkk%_~hS-1J$(HsPc z{OSpU?&3H{SlxTsmR;WkJvxF}1XQ&vdg`Lf!eY3uV8^oMaT0fu(=ic(K^`P5s8q?y z21&2^tLYdwT6`?AM2xNt+toDWVK1N?s?yPSqstQ(hCAdcqM@K*@eSryF8K!sTyQxK(?LSpL#kdvEl8K;{SR{_oaocO!(q z&$hsQqRpqDQ*3c3UV5=xTzuq_Ks5ADa>?)EEay}aumo@j7?c3xt6YE8z4w-dOS!Xf zUGg`wURXPfrHaOu)=<2a(gw(O$Mgd|wR!uclTlke+w-}8=27y|4tGaHS@|jTRi~+J zIhiWlZ~(E<;?Jf@wNJX0Uc33aCV1sljq_nuP4?pNqgO$-Q3oq8zkJxXs;qH7-YZ1O z?51+z=x=RgN}8`cByGh?dFt&;87LrW^SR&(xpA{l?3YvMp%A8#&l`#TV~{C4ay+Tv znm{DKc-Is*dNcY&b=XLMh(E{V`lhG}Ybp5y8-|k<2dobgRW2KQRxZ_~^O#&EQz3eq zTt(DA_#0UP?Wx$)(!0hy2&P*sNv_F+uv0NG==qXUcJSH=Xh8!9OeTIlSSD<<7@~pn z@@eYiyIjnPip_{uZ0`)M_BF>EKq;k(%*)e!As7EVpM|ewVqI$<_iqn%PPPh7vu+JdHo6u{0#du(6%A=Z zlvQnj%^@O3EA7#|$r(|OGTx6}j0_?&p`1x$NSVDspAo=_)H7K$tQhLNl;I`lsO(~X z-h!sKK^PZBC#(q?v2nj7nvZwJK7F9*n%oRFcW-oyrAQ3LfJ9|HGKCH;v*?9DC{!|y zTonAnST#{|gyr}NAl;|fW%e~KxC~CUcej2o)aoTrZy=8Sgj^Y(vFi3RzILbIRN$to zM{DLFd*R2d59%-fp{vFY&>TVMIys>OQ9c&pXj=)6hu_iJZAPIL8FzX{O6 zf7-X(Ym3Qk49YI@{7qx544792dv0A;|9ke^kTGQZ>@%s{-PfwN-g`t;_l}3%<1U#q znAaPpgX4A@?n26r;vQmQ;S1AU@Y-g~pkd6=xiTwIZzr#lBU4jr8zwbArz~tO8s*$y z`>uMtt#SWI=LC@J01X3OBoFoKL~6I zN2tbwnVEPf)mJfdl`D}J`FxjGYoctm8WT;^hKcLC-F2PtaBSYtytZ`h25gIM8s?$8 zc$(H4)V>o|4c&;~SEo>>OQ?l7Z^yo(1{g1S*Nk)zI?W;z+mn`5EV%dY{C69=$>^)q zp-3QRy7jt!eIgCAsVbvxPGZs0mv5@m>$k-%eA$ZH8)%v{@Rjq~dXZeL!Hr@}UNpMS zE-Z6ErH}y|Kp$3>-==++WXi@kD`zHKfhEpoyz)!$Q^6(Q@_0N!_PjA8{qx_MED#uJ zZ@K7CXywUqP`#6j64JWhPIhG!BP!+tvE75LMZSeb-3f)wG+Tqp$dv3;c&C zctHpNkw@|AFx`x7ZP@Rf%&3&xSD`I|lzdt+j)H_rnQiy>FRR?1I2Es0?7ZO@JT>E| zyxQ$t3<1|W9=3{_tVhGDd8VnO>XO$2vjc6Wy_}#jg7C~{UV;loMMrBIR3ug>01#of zBJbaXi%bwa8nJ^CON?GU1WVr=dgH(Y!+M?_9z@DtNk+1kk*}t=5tn%|wUI_CQ^OCT zbOm9!R)Z>qyj|E7Pk3wpl8a?m^)LrPL9+uOg5I}c@(Y}Oj+*h)QZ5tsXZyCLisbvv zusLT>G)p}zRUWx_3Z-G|(Q+F$6bX$6nL6k1ZUmw->OGw%M%g=GrMjn?R1%jz9tOr= z^z~*vo*o?u*ooj5zu(!yH*G|Fv9927Fk;`q**Y0<{&|$Btgs~aDRX_n3uNndo8uNIOpied68($Yw=Mn@ax7=ep90SR{XH6MM+uhd^PS=~(f7L@v$vKy?_>Hf1?D+y^ z`fzf(AU%Rs+{d{GR_jRzKWzKU(QYf~Q!MnBe5$eta4u9T_M&KSZRgTynsmyNaRT|#w7B$AjZiQU+x3T$KbT%H*enEn?nlqjPhDw+0 zK94tmm!TePYm-0L=$9AQ=@OheUK5!uersO;m4=%6;5BH?TB7<(?RtEqg6?|ciHiP7 zr%JGD(*j5JWQpeCfKE>VZ<2E+Xd1HC!RK#qz{o6@WMoy+(+fWKLA#Q?jy?LZyu}$G zeb>wrU=tSHILK-Q>vF#%P_a$w#}%+$8clI;Kdsvh>q(z-1x>Lcdz5?bq5b{k<1&zh z7d8AQw$d>6rlv~)Wo|MTI{lV#6Pm_ih@eF9Jtl6dcPe;i)sK0t9-liUG?}!MJY!Cq z&H8@9SPGz0S1`?7Y%iikJh3aKAUMczZ)Q5U|;$? zm|s5qh^LBz(xr|`aS4Bu($56?j$_!~-hbBR|G5Y>e5#o6o=;&Q3wGK=22&hgMs9m< zJ=t4DkA`l_);PPQcz-y}205|hZfzX58NLphjio8&f<58ya`l=RcsDm6)kFI(ck%1q zLQ7^bB(|Ypv8F}{9<}ZuFF>iJZHVdCL01toKhY*iXs9pD>|Yjv^32%X2h`mEGZv8aveXr%L`hC@vF48VHeP! zR(5O7uB^S}3kKNo>xWJ^_9NF{Ukt3v<2&RlNo3)wGfxrk7%#1630({w!p2%OBUf#( z@Y*!G1*f!<6ub&Id*%AAlVU&hMnkMPwv1)kGUeU3LbyW-pIrwuoL5qCOddKsJb%J# z%9~q51n&a8vR^^zYNCWZe>$?cX1Yh6;r_`V(Ks#pmLjT-r&cTdriJ~<*j0JP@Ca7@jKYF?DYU|lj2s2%I$0SAvx;=A2g5c2D zfUS;iuMMS^^>q2hbhQ>10bTrfM|a`5X`k_M9uq2UyaWH3C7n9WryrMiU%95D7rCfJ zJt+Qovx58scr8d2Rt~OMzxB;nr%KX9sCe_%^rbymMqS526?ClQ>hiL3LL+g$wL8hn zHji(5O3WXu^4qhu{0lw+X*#3Y*{qreBCfC@>DE$w!w07FP330E5Dt$Lz8YB|g|k-U z{y{7KX}$?&?09AE23fH({AgSrn{o6X#YqWwb zJ^BhdP}o}gZ5b(^xm+IXYJEjUY&LKOGeZ0bs)vOXhxWkyjHfd)`prqN-$HYxGIROi z)O}Sc_i4ZB&}CtLx(6U1Gwy}Xc7 zg+>X=`<+!l%WO1d?ZL)TH*RdS`b)1lKveIFw}_Zfyo|rf3Ii%RpGiskX!S~ zC=b5)?Z?ph-F>@PvTF(?MZuLLDHC~{`QzM+=l@WPV(x`?o=f@f-CTQwqY-r-<6KAo z{OCt3PJRk>m`?qqQ@dLs*o_wcz{NbP|pb zv&(taDHB@U20yOrkI2wX4tb7t|F^gQwWpsy`VYfj|4YyA&nu820{%LU^UHP{|i@>_4F?vmMVofQ_3c9GRWTsqXkzd6Z)OzZs(c&83OG*LZ-tG@(6j^ z+=%9^S}rKWA^CCnU#@W34g!=;k%WMEbzWIR#)pK( zAsUA1tP6CL#*9Wj9QJ zpFoq0raI%DZ+r$@+C6y&=r^=mlH=5>DL%5k#*^GJ8{uf9ROzddRf7#VB8`iPj+$K} zk`z4oT66Hr$a!X;>;5)J_89IzXRrQ8BV%On52gPPb6yWElBs@5qW{f2)vo{(wXZ&& z{=vpe9a|<`PIy14Lfdo(j@r_=;yrHJ8ey}!Bx0I zB+`nv7w2=u=Eqsn5Zw3KEz&^u#UI=$>E0J-O+O^F*o+f-2x{i2(IP>b+Q=c^)xS_BqZLm{I~6EfE1Ul@ zod5oZAvF*?_}vTsrb%unnS|J*NX)a@RuQ01VTG&|J3bDfrrPk+%{e z`II&a;i#6?u#GZUSurnLT6(8!{chUPAP40>&0EcYEZg z{e8ALXDIePf|&+=@KJA2IupMC%eo?YM1*A*{i!4EOtR;=w1mH0KBVJZn&FB%UCn{I{mksBaGv^~k;d=>S?Uh7sKVEy=drQ- z5jXfxu<+x87XYBw$=Uop9iTj^tQbUA7COpz#{1jcj+exJbV5=8yj{Kb^ zoxR*mehH|to1Ip)8;a6Y7iS(6af^lip%}fJx8sx_Ke2wN+bfZ%GWFPZfx*s$A=N)7IW9Ru|Nb7bm;MN3+<_3vs78RLqH4*sa3tW(5E)G!aMmiRA#I>hg4 z_oCaPq-rMKgi+qgoN@N(zJr5&Q}tZF{U76Tx5E^LHZfDFJ$N{&{up;vfV=g$1;cOd z-Y4en{M?tg#sOUW@?Tf7pJA z)s5S%CbS>pqlRMsK&c_%tM%`O-4C4oy@YLXRWAklO>tj!>CqbeeI0?%EghZu#S!A( zEZ^;{J?AuR{SC*_=Frl<{b8rK67lm#^Ru+GUl|?i9lxvyw?H#+fkd&Jb`AduMt>~m zA9kxvMb<d<0HRSQL@N zHUuj`#r=Et{?_+#^I3EAsg7-hP z&Zi0hvz$rMUp%-0xZzno#~E8k@GlC~@GYP)r7C;C)_-NkA)Ub4>+$-5@t%Jd#sB(F z|ZNy+H3-~NVIN80uL5lsNOYHlbQ<(AnZ?&ZZw&#F7)tLtY1VWKGzxW zLG|_ZnKv9vIkK^}nQiVzMT-#SXML74{5QMG8tOOsiP$?zNpNqjn1&OI)SLXXRvh6&klXCUAEpxA&Dncm*0Z6dZU*sihB zJ-*Mow*gnwYEJMi7s$jIewAjf2pxjg>GH_24v^;JeTZZ3-al{-|H%%3L(r5ohO~D$ z4LVNwF5hKUC7*eBHX+6C%KJN7ogEX`wHHgu%F0MsD>l3#Z6$2QO2|FMu`+3wB1e<6 zl5gp#x1Oj4N`>$vsZUA?Mz>d2=nEvQM6bwTmn7PbFbjl+1=G&S7FPm#du=_iVZvB$ zoNvLD3H!ovO`yxu&^cU;_+MUmnMX zA)LF?Y$RQoMDm&@7@5pvcE@!N8EET)a~*|H{R^ZlOI zzh(EIGdhsZ?5>5pm^$zNB5a43+DA%2BR9stBI$x3{cIT>kO+$TL{2cPK+15@7je06hghHdb(MNLHcPNUI82D z&Ms~f!L_7onQO7)xfzsUK%_G38b0%*Z=;kc)+ zWdqu_`${Xo(xn+vg?X|KU#V!wCp1^b^Y8mPF1hv!d_44gvW=v#6tA9iA%_N5YTr7> zI##qCY$I=a^odQ>%y^>$E6xol6b6}rQtN{@vhRh72OU&r1|pf}wqDFb2)!Pd_38nl zUxbkC2%+?NwWPuc)!@GDZ^@6O%|OD_;yF4|lCrYL21CRgpZHG1vY$f2fVccwc(3EM zCA5uoH^{rRNIG{W*_Gh`i0J5E_BrtHOS*O=y28HR+HtwZjLiez=V03aor|*Xfvg>6!%id?6^ZPXd7FC zNWdLyk@mDGEH9Dr8XAsrl0Fhshy!gvFsdl#Hr7j=F!i2&vn?CM#aCe5MY;<5S{&M^ zKQ^VF@Sg(ozx>>#6ew-`*wdjZ^vR!tc;Ryh_}jF%ay(@EL>2NxQ=xk!G~7`T5Lsd zflT-MtmRbe;8EAID#zG zt)QJZfn$poAJckDGj#G=4yj>(}WM2XSV;d117(pVeZZ9-GA*;B~J6${Gi0D^Q zQON`uFQxpw;1w-^d0L8cbyg=&vkv(4HaZcMo*e${RO4-!Y74v(k~hGC9}9C>zI;ct zHsndIo=tNQej3#ZPFdF@T{fniQ?SZC)3m+m-PR=btU1U(1x#o<+q0UUAMjp@pP!$^ z{j9;uK;j~{VD`IDRR|b&?lP6&qpGz0yJY5u^i1JK0|Sx^5i%dV-uV$-`728qKS}+) zoGloEI(8&$lonVq#dW(iZ$|{jmz9=UFt0C5h$iY)R`RsNl8YWZ-bw@6ED-kM#T+Az zaWQY$M~adPCPKhKhFtRdSAcjIQv0Qr(;#a@oKo`Jue8>q}(*Ih@6jB3yic`WPi%8o{!ce`pdmrI~ zWy$Bp^X;$LQ>IG6a#isQYYt0t6^IqyX)+kj=Q=Y{%kw>>WM-TgpGlA$S%Rus!+}8? zZVZ`FQhO(a?!JDrd!kz!11{w)KYz*9lfS=thPJUYz;+mz2YULe?|fT*)w$bLjhp%f z89r^-1%#-hk5jow?; z#X3U9er+O;b=Ppe&Z;Zwf>mk2zE9QM)J5o6UHVVt^?xwd-vO689dPkF`(kSCSZ--K z8NyPgcdonJBd|}_oIZK1zl1w=A7;omekPYD_q~b@bkstE!*1*O6DfP9A>a_zxEsHS zyxPcqAfLQB-B>yI(vBSz$~)7Kmhz(0wxw+;Uf%A5w1L*;0(Y=AS{_wBznU)nvI z+PJMl{(8*GQEEXJb=WE46qVYhZTcHoB=KMiR}#*IlkF8KGPSVY!FfTEk4??JXFaiD zyKlw_N*2={V-sqD#3=9Ph2K|pZHw`dQjj(xa35ZS#Inv^0A=mk((3_2pm zS5sq)991ig_6f-TME{oR$Usr-JjnUn$A_lZ&OfK%qTwTS+#6bi7dzGIrVAdSzkK=9 zWf{p-A`|5q+=}xn-}MV<-98HTBtL1xaNxGE8|}FfwM&u=+W3`QE?L+BhAT3y?$7ni z#UZrG&76drsJ>_}lMlVE@dQ$JNU(ps%+n~%bBr{J@5FS;Rx4J%YKPU&)z}VXOei4> zI@fylSWik~UgNITE%@O?Yz-ilj2D9G?z#V$y-mF9mqRi*b0Gx4_14C*{)lSfzP zCL2B1Y`xYex4DESRw25W*T30N@`bBw8Qf2Gcg)}U>R8^8ud6e@mH72GP{qJ3_`NNV zZ`%v9v$IK=Eixf*awM4)%n%vvZm6*HJnVDxfv#sA)UPQ~E0Ld!e8lnWC{=J z=#e+hTj1pk0bhBi-wZ}1xPu`x{GOW|=uhdw9Mf3^nL&PUxD*`Y!s!GvxUELTX!9~h zZ50|S+}d9B*dE9vZ&#pxKX;Y(kcErtB>82<6Mz50ZUTtP@_wgNSJQ%T4>?5@14^Zp_Lavl2G}^q3x|%4cqzB8t0@k9 z3AxXflT#-=P=3xUQhTlpRr@)@LMXaIDV%neBXHd)L2onn!Deo5vv&fRS4MR)Rvs&% zwl!ul(NWF!8+M2{>LxVNKjTq_S<53W`6KIIk@KC86qKlcWeX=l(r8{@vxuvfNL!D5 zn5C-YTp3GVUi!MgtZ2ceh-1kjySR}=Js?(7+B>|LM9!&4kzk);VtlGAC=WlkF{~yC z1uR0}RG`}L&CWn1gUHQte2IL61D6hVt;GpukIs#slU^My8d2X7_uJKpPjD39A0Ml+ zAMrU>@fq;$EZN@U3he(tg%t@0JQd(YfOE4$}-*n*F@>l3&kGxggoR0_viHcfFJ>xgxoOvJh#=E1C7 z87?e>UTFRW1zW&vzRI$k9Ltw)cl)~!UV1v+ZAxxL{mw|Yc;j4>(c;yJJOT;)b|g;v z#stL1TxU4-7{^l-evJJFeM|}jgTdZ6tB=7q@di^edMOyb=O8|O`lq$qzJiAsD1*-9 z$HJy$*U#}(Kblne1+xOaG7q##`7V{(kL1(Hla6=8CPNh5a66L4-9C?!8_+|uP2N}! z0Y0cR+-qZM_l6wuv-@0LTEC5c%frh9wr{w!GECEBZDlO?Td)aWqz_);ft_7Z5vvv6;W zo)>fWTwH+T)|dBP4_y{%c{5Fa%{@+p(4Rib7f!7j{!knF^-BA2ySx&a#~geF2}c#_ z6fgjeXvV$ags#yz5PRB7V|Y^}HT{O~TvmQpn)}XnOJSiMoR4$0Wq&hn`>lis4VqIe zfti1Qw013STMVJuRf*E;cRg+P@C4EcMC-W#WRP03XRtJ6t7Rb%z0!?C*U|`c%0nh_ z?V$y_DeQ*aXpa@$-~03*`e9cNkrqnGoeU-qT0OPhsAV>8jb7Kh@?h!w?{OZ{;exI8 z;j<6UDev^_!}ZAGI;X)qP;;9m+Z_b#H`%eD|UMlGJ;H4@KPC9O_|s9I#>|Q{r;xgdH}wDWw-qyVB?BYt{y6-5&*v4N*+dckOCx6D~BDB{4v;d$v`v`WS1OJm;~ z71)`R=k}O62x4ObH{r9^2@OpfvIoBweNU3-TmVAWY}bmqN|V=ypZ05-NhMB0FpRv4 zuVxqK+vbdF6;#8w?$@}s4`$z!>26`RUsg*&_CPLMeIPr_>0`f=btg2+oi1F#^Jp=V z4gsR-Mu@fEkPDotcg^fH=dgW%I=*Wrt!BP|)5`A~kc0+``z2DWr z*@n+@h(C*XB@GK$R6i%M23_u|7F-seY4+Xao!jR0EG-1;{BEDsob}+-*6^0d=g*&? z;ml-|ujqQUG8?hu<<_rXNrNewEkCFn zl9o|8i-$2Uy&3LbwP0$f>x^q1|E}gw=dRv_RP9bmi8@rWHc?33fUFTCHvOishODk2p zY_Q+<7tK+T-6ZRlEN`&;87kI*O7e`f-|m2=r-!J`&Yk0%y5)PyTKQdcOV_iQ3`y5{ zDmQ1r2T%KaHT}VDZ>lRWS4KYxCy758Yvp@oBn6qQB+}5(IIE2ZX;N?VCQN9=1wWsZ zzj-A1@bVNHciS}gLNfxETUO_c3VIQd`~HgVIO4PS9H;pJ!*7JWs5f2y^JMALo{f$} zIZBN6-mi7S_w=yKnZZFd@0`7MeEuh4IT%|k_QWscM#~GA7*vQei9^n|07&o&qd0`# zPt0k?kTm$-13ldFU@Jg1g;#+qc@g1AG(OW}1_C7)nYXoa3kphXOxH;We{{VIeYu~q zty{hkn~DFR`D)+DPf*y{Wo7K$!>c3t8cgYE@zPdZ8we8C(VMzVOpT}IH%bbnl9$Xz zl$th;2ONG4JD1Lcd01vtD@?)aW6@mjyvKG|vLezo=IlI><}KujhgN_$r>xpOKW0+i zug(GT=u%ebC1Lz#lk@el2_H@9L3WTps)TdK*7A+vzP?NaRJZ#4SXi{={Mp=rnFq$D z0|^h7@rzx%%xgorDm|?faOkv+JYi2~w@(6<6v-sh^Fz}}-%Z;WfAJ%VE*k_rZ#hn5J z*)Q+Sbhk;R0C-VgFL~TM5bZNQ;0~<*E{X~V8kZISYvi4$?mi;DF=-1TFkRWI=DqTo zJmqbVf;Kz`o3KL#^W=zsBdqpL9Rx zqf$BRF_y@0Y}E=^rkBQjb15z{AHofrNYAHv+6Ddz>SCNu9x?Rvrb&%71BBiZ9@7?$ zcH3Va6A5GWD@2c#BuagYd8q*J)SXC^a_=>EZlTcuHN_Bx-7*6Y&eblw)zLgh$QhPr zg(?g=DwpimjA-ss@~Q0^<>lpN<7IFwxqT10(yc`cua8YnJGViR@@quo_CFmgO>hvc?YTHXIT4=og*EFb81>+DDF<0ceAz`+8PGeBDq)Q0uR zu;NN{c{a42ypkWn6%5c=cNgqG$8p+Bl)ogIEH(aqhMvu({basYrhL9}om070>=nkn zl|XUy5#was`oTsW>cyCx<)@d03vxrPh)=uh6RmW|rEeITWW#cTE%)3By&?li6XwCQ zc8a#|pq&v6*~==}&DlPH{eg=Q8 zhXTcK!GT3QvO&j(Vpl3XipisA`6mwZtiu9d+-Q6CZH;#vbr8rNd?cf8!-MD*J*9_@ zE3PwIK_#V@SMPoHVY>+}y)^kIFn!4??)dJ!%Q*pK7I*Nq`~SqskJ+Y)C)ybSD_O$7RZE5kD`}ZmU{K-gpNPpWx@o@-1PGwy%DrbZdZ zw8K=BpIE#l5A~#}D{_ncJ!kUUOx_OTEl(DP(Ja`tqx{-n&ZHeqS7LQR*~Sg$2(IWG z|KqIsAwYGV40Lb%S0bo!;j7l-rmc{@m7#0~p12~eWOq;fh~VP|wxwX=a&Sa{z#GTZ z{%^S=N~XbteIe2Z_{~!rbq}gFTaK1e#nM?{E;NYuj|g3ISbac{MOWVqeL7FcQ@u8> zsGXj^`hv;n>}j>BuAB{9ceYjc<@Xe9qSB$%#t<^;rbPJy&-_pKpY=2w905&`UV!9f zId3y0l%QoSok4N5)e_T9|5Zx2I^|6G9MJqcM~KK#D+h9P-;6(|<&mvaNdwK>zf{QG z6d{$!gq^SXa@fLPT$8F9FXy0jUh}}4r?6z0^(T`f#NOmM`Ic@{i1pnNb<*>J zR_hyaC;atCpJLlntkn#luWNYI(5Qh={HK@ zqi=7tBKl{(&Uz?wQ0g}B^zcUZ!q(-O4FKOE*2a)0ITwbZp0Z7}W%8!>W@ZVeG8!OJ#S{5n3 z!z11Fwgqjg-?il&w9AxR&7wU0Y%mOGa=e>Kf_NZRkFcqBb#G#=k^`bT{O|294@gAI z?mU=Ga#^(>e)rH$b_4kFY|sXysNH3rl1|)kUWx*4g#(3QOW%Kx36hvAB;B~u8;Lo6 zb#>Y!NbrWRu+OEj(gQ%yzw?)r*`gz&8sB;Y$zOeKyezWct3-jz;VHTlRDD-rcd~X3 zt0XR~vLRj-{c{-?6G--obRpXam|E=f)o}oqN#WFs< zedNA1E=VL$4sVtoX#Y_dIl)jx&X9j{C5lVrdvx!pWVfE4fj0>hpVR}G!(yr3UXe(mo5ECn z^*g(#Px}SFrf)lJ&NL_|_KkpEiEiB((l|BU90Hze6kSoZ-Nvn%lNfQ}Ja?0d3ak&% zX6qPx!7AtX&LjC@fm{%#Y9{l{=e{&4k!u0q0iJ^Q;NT;K`CUX1Zj41OeV7_nug2}zT-`x2!@UKTRjk>5+n4k2^VP}#ObjC4klukk09dv!RnSo0VA0DZ& zEUHrQzsEj1cUr$-&HuO9?aTHvHq!&@e$KV~%)^;nViuzrLyw7lLqJ6^lNPY^`N=x2 z>p~Zcx58jR6e(Iu=;Qj+3_&p~xYV^izTA$uD4fPUe-nv^2K*|(6##;+g zqIUigv*his$zN>-GN9|=g#%!O!@Y;Qpq>n|N7w2X=y_>4!Er%0eS!UP+iI`YcO*3V zB>my)lw$>DpX^nyL9Se<*~TRRSBbRv?Vsmw!tWpi!=A~hM9@=vnw+kp+Mg3O)XWTr zIGs$lSGR55JtOLPd3Vgf94eY6IcNvvCqXSsK?O^X- zUwF2x4RMfeS{=@7eEA_*f>ITGfd}t=?kaBKB<1fL<7KJ7i}40`gGaS>+N%QP@&7dL ze|XptuQuNC%(n_T@2DSrPKqz9+W&jw?}^$=mY~D0OZllzucZomZw{Mpt?`Mp9WM%g zw4oaTeIf$Qh3SqFII0vRA_5dsl{ioRT9e5rsU>>%P6pe0CnejBp}cojX6O9Y_$P`O zZxy!9jDeKXG!mR8q>a^L;3llgXnKmEz%_cm#7fz%_EV9^+mCzifW2lJ(+Wk9=!P!!LtA zjf8{gK4$_^7Es@Gx*q zPqqPqp>M1=wsU+E(K7royBz zHCOZdo}#?cXmLiXXWuoW$MtfN@u;?K60FW|e>EBC4Ro$twEhgCWD+kGdMWlGuu!cL zIXzxxd=IhZpnZNinB2olDgEg4oT{p7ujhoRZHhS5!IrD0;$*+{TE2irlCv>SCPl+B zklE|MNL_21f`Jhx&s!A28!pBD#q$)|PXk0$-icVApHY*sVxsIQ!>8ud{>!nDtd}|k z{U-T$I1cjPa{~lGZS`{8F*nF;vrm$}VVE&VktLDK3&%1S5A*=^pgLf#B zfqL|JnBT#=J)3Z5FtK3>OdVRe`0@X1?>*z1+P3atr75DQsGtaf0wP`My@-V>NG}OR z2pthZ3nd6P6hV4up(q^;NGQ^=(0eC{LFqlxg;4%0>N!`>IX?HiU*F&LlNz$wd#_o> z9COUI#5Kmnzszc*sR^|VrU#|Q);DN*A_JZ#uYM*KA0aYp2L0&SNiCGMrO-9Q^(+ot=<4o-`;n>@zByci`~&~~cm`H9>6x@qWk0Vas{kd* zsrxX{*)k9G=d|n3&R{Am=_FrtZcHYz=+!WwD=MYhvP29m!l!S=*KLT16SloI zY%)-gj=6V~?zGr#VH$egJIwRFIr?r!C@F{DY+W}aj{re?L)oXxI>F@n`;4hYAXuy5 zC<`@^$lqD9yPi-oKWF2cx%UFf6j=(&>tNN-Xb=1)$_dpg26T>|*6BuC}Y+!Fu23GUFhSME_AdTR#?0;qw>5h2$J7N;5wz@?2;o!EX*O7%|M z7=b92n8|#RU^(^i^c`{QHl}_F%v;0AhDXkc*}z0UOti)`xZ8l#bdB6V@D+$=ZhiGP zUtD{@$0dDxNIYUHHy}2@?Y+oEdlHNLsqF6nAgesbxK7j$=T+Ka#u31^22~ciS20EvuU9Z?H zu+Pw*Bah7$&Vc_GbDP;(_iGn(92_GCtr=iYS%MpaZQ3HVTbox=Zy>Mlq|Ao4k$Dc7 zKPm;LvEPMd#@hMom(TohO-(BB-R<>wAi*hHTINVuul8KL)jw~yN2TQUM+kzQjzOvS zyQzkaOB}BrlqJrMN-E0{J%*egb;f=f_|@4cjwn5<@!K7jWG>R?{AIC2t;ra5(yPa? zB_R0^HbyRxYG3nJ7$o>lM(?`PR9mMy zUnr_N%@&OPiK+fD&QvYX{L``1V)*&V#wu$-PymLSJd6ZT0FNx**g*ttGOd!q z_6m5$i)H~_d~q7U!tQ#k>!2&8CbTg&aO07s+W8LlTD9+lPw&pGM4+N*E=sO!}InMwoxmCqa~2p#>GQnNQ?%b+iZ za1xvBMvj8!bB~+%AKoVlysvj-1nao-Yptyo&%j%inPkEr)mr-3? zYag6Oy&UlsrRjDNk~?^BLhSW3i1!0RNMVP84pI$`@hUnDJc}Tip`T}hDMX7eHLwMn z)Z+^feFYGf$t=cw?tWhD_U}4wD>_2tdfT&~KR+jA*Kz1> zJN*>8vKB9VH^sCX>8=+{EOXGV_yA&83rR-hsS#vkLR7ORA{I&~TSjVwpfEtUw7^hM ztC{{hLQ@{sCGRzd7u%m4jM^V_CQh-x!_&~vkeI9Eaj^Fiq`(FL{D6URJI$@wu~eK? zG(TT8c04&apA|ftF>09h#!esN{Gb#tXgTDUJqUQmGd^DwmLJJ&y%DzSX{efyBxuA5 zH%?88;a`Hbf;m>b&iE0uN_4Zk3W=G|7)6B60z%@c|KQ1z(l1P1)@^xy+x9{r0**qT z_fGHgrJd=@+3b?XT0E2KTbb^zcb(~JzaP^J)9)|N(67z5BUxd} zI*$}%p0dc{x4yhPe3U0AH&>W37bCD8d->iBf+mud)u#4!2MNY%Ag@wH!q@j72K_A!(W)?f_ zN&8m3eM~%a;r)IoWYW#*c9;9z5pA?~EZ52w_mWec+@2fo%}u%rrH6m2@ejMv9J{5n zmO*v6da>^GC7g|BkZgItYn(+|`r2}(W`yRgdToi@7LNYI@IC^9$==t!bkLKQ*?qt~ z<x?N96E(9!WkL-KdzuHk106QB6p}+P++0TCx zBYzJPwzi~qObN`W+GE-NQOQ#@5)}1wmTJVZ_d@{oINhR-Tgu7z+ud;%SRN>aTUTdm zh4+JS+FkD91m9Y$?-wsOD5SBw7*jbdm;zb|$>3hMn`I4b}hCC5<&KGYLe%xX%TlK8Fn`{C~niN(0h_URt01SI9P zq|+eAS-P3YvoMxp^JmLzX_nMa}re)={vf;aN|ZD5nOTOy;QH=*ao5oIe8 z*yHJK73tV~U!V*Jh*Ab)&kS$5q)Zt#EpTTN1n6um30k@q-lr;3kGc59VYsTYEQUWx z)TSeO##MBV0x&psOz*p{BllV&_wISs6QvZJyk=hL-Rs@;sQ|FHp)Rhi8c=EPkj|3cYjc`4kKqd-ufPEHB?_N?S#qA~OM0^=uGwLF={bO7 z9{e{)%c&^Oc+b7M_F=*SKGAZC0dhyd=pH57zIKxop0b>e1X+yM1;2X&7*Okk#`p9+ zvbl{DcgblllA=?ru&tKc%|pqrcXx=2dygYk3AzkwX4b>L>*`+-VKw9D5=Yk6c7RvY z8KoQC!@K$n2vuLLxO1NzpB7aVb8L>`b7Tw_$aKq1)K6H=AVLL(X+#i2=1GaMjPNpP zU%c&|@+#1IIOByY5-=|2E%dpldTWsTuf11KAm&(p5_;{IswDi#6lik|;Aj~5h0ETX zOBM@MeSNE=r&pG}W`8f|uEq!$(V?b+beZrPk1nyZ9&%8tr@UlXo)MR_$SsM^3Ct%e60&U-3r>qPpMX5%@bmE0Tq_)>u6=R>e~UUz9|Jk% zZh}A{3{1|Qng&g~X24e9fwyK3bKr^X#giQ%Ls^5|?Ns-Sg`r6lW^j^NTTFIVHfQTbI zuQJ58%k2(L3Fx8Kt!bpjLKRy`2*I*;Wd}5PVp))1-{|Vy+W~dySd%LIVxO@SOxMg% zvmU`t)Csw#)c<9IKq5V3_lVr-z_}irj#0L>y_ykXFbscrxN`mFn-s}F>;+a^|Fg~ zWDhS*d!Ie(m#FwSlLOh7m62$d=(}c!1psMArD|)bdB)fM=R`*`p)bcWM^VaWehvHb zBV>$`mQMEl7!lytg;=cu(C75(cQ7`}q9S8*?NH##l4IfSycAowJr_T#+n~>qc^nNB|ydp^#TCfP3L4g@hQky2USiH`<7S{rz z?IGp#w6!sLn~Ss5T>d*qNg9@gbg!5SwexXeP77rTNNe0)KFmF5cEE;~L9oMG$<8{{ z+;L5l3RS)WuUReBzA!yG%1v48>fl}NQPG^0@kvm}|5yAR3U+RY9yfU8BK0!>&?J&o zdau*kzX#R%QyHO~2L2R+={L}JB#>i$22@iiX(5nU_U8u*Z+{$wxHNKL1R1xIRSjkw zjSCMzDFi;zH7MpgQrDXqFxHU{ph#5owA3MVBMm7@hCo0BnPa?o^9eATc z8*@(FpNfGWE6%ERH|^p(rG{VqBK+7?PgX?d_ysp=HR}&$ZaJOu`ON~=Lyamk+ko4; z2%@BOZ^~ZE?afvAcvzsiJh>04t-ribPS8tAZdO!Jl*S!ABmG)Zpeuww2@Js8=>MFw zyET~(EqBLMc+!E4B1EWMH(BKx90t+WpNqRGWH+k4);Vzw0G`~F9<(wmn%w{s^lRlZ ziVGpPJmDYR`fCkTM`~SO9j;^?%qU};mm*YX`zoPweM$VR6?ZQGkq+iV`H~Yc7A-@i z@En2}=!K*#x&rW_Hwm-5UcE0vBWa;*NT^-<*d}s?s;2b7Rb`t`;{dNfIVyxqN@>rB zDXk$wp8=eGXPL>RJti*DTzb;e+JV%Q8kIm7{A?ruOCv6X;^ZDTDVp6r^>YJvN&C|f zy!}wQ^EL;(09#~}CK(h>l{kmP*1H`#bV2ESXrr#-ne+UWD_Y$JFx7r^_2x{@#P<5I z$M^%`XHuL~J6>+mW z*SmD!kqH&LNR9mK94O@wR-npr*Sj&d4$8Me_5xp_&7rS=33s%HS(;iqIsh_g*2oSs z{NS+zqXTgyNTBRS@17zOLXJW;T@h@*=d*31^gd})^PJsc(hE5;Bmf+kV<2=o|1ESL zWzBEd_ZRUQtD!$Rt$0m>*Gv}1&K*=a3l+;ZmO&IKTN+iKq6-F;Z;4@n<>T2xk|=|3 za*A92`89t~ro|m)yB72I?PYyKG){?}@th+N55#t!3Y}{`ecXyA$Za=}E#xAQcDSGL z11b;b^x%!L5pfMyCiX#wGM{<#N6hr@W&E|9AVA+Aua8UF3nP`2j3-AA)xDZ+sFf`w zXsibX1+_MWv8jVZ2YKXb*aG<=oV*H3_rv8ccZt=zLLg9^8z3cxgr{b~3>R(r{Wc?d zf=iwx+t|P>i+9m}vVf*i12**z6PEq-O47a!5@jaSo-)wmy0I*^@cP}5`v=Ew@`!o% zoF1OMid&)%17_nVUkEW5{1;$oXrS1-XOCpQ@)ZT0AGQ>JXd2 zw?SOorZkt1=kN9GRu_8^?NHdBqn!}~Mr`fh287ND&efmKA4U=%P{JGr3ma`TA9_3# z&$vd&6$Uli1R!TTXgRkq+k=(f9{n50HwQ3eMWGGA759`0o4U5m!P+Y{DlN6LD_%u*y-g73+7tM>1U37M{nVTZpUOaJ z+8AZowEwx7!rOQ++4u3n0mg}! z+xmRiTu8^}ODE6*sP|)_^Q!qt_iF%03Zs>rV%0xHB9lqv00a@&kt_t&YR9`$aUbGW zt(_2CjukUId!k=CJp{r$x%l*+<<`m&)gi|FRy|80r_tk@NhiRhPTpl~2*SD!o0rpm zf;mI-9e$VSx7*HZRM;TPRqrDY_lfMNl9(I5Qe-1M(~mluN9Ib2%k^j5WocizAw> z2~-_Ez+uc$z4ED*YlA$za?Ga|r4N5KQOe!tNqg0U2&(kf*}Q5KsM5WMNx*G8SeT|9 z?r(0aeQ-_x&=|oPzq>uWE3zr`nbaAuOgG+l&*K-2zQ;e4?CQ;DE=PWjfk)(7x%|`B zD9GPDRC#jUlv4%i6rt6hG^XK6}1P&5-fF3n0wmK_d&- zOJ%hSK#~ZfYv@$E!Z~DfE{9PPJduBRt=V)(Q20d%^OY2#Z6?o8{utIO9M-xVc{9iD zE9E^2w|XMJ%+3CV>U}=Q>-^0g3HU?|4gc$!<0oM4yIqqb;?Jy^bUbd*NdbC@c6-DW zRkq4snQe=9Q#0i=D!d4~r_ zX5Ewr=~!Oz9wX@=f@1-9h;x(L{P?^%bw(A{HrJD-Jym|4TphGYo`!%k+-)zS+FVjc zo1MBROQ^2)6;8v|s&f{qcX#q}U7+B$UrCEMF+~;iEbf3F@wKgm?gF`uMh!IrzS2#M z%~-mf928jux$DgLq|rW6H}rYXx;6NrCmcYw$ER~+jw{j$AQQhliPWC~V&4zNBg@_x=N z)2Max2OKYevLHqND1pub3B!tLkkg0bdRU>xpd1Qr`}#t)ULwAYY6%d%53EAq0bCW> zN4B94cW#MA8H0HTh{8q2FPHpwwjy~;Vw>D2zyi22r8YT}bYYaw0bvzJmIPQ>*vg6K zbcM^HnIi)B47kl(tN3Pr=}vB!*#)Si!99#{4L3@xvL7h0A4FRwUNjFN9E6p%q*jf5 zT#*p-`T8Xv#?RIZ2hyEKWSoT3$;A8`UmNQTsIN`LY` zE3pmP;$M8izQojJ)(LQC+&8fi{c4+Ly3lwfpy%+a57Jh>KUXZbHJ2rweaomq^%oS} zx1W6{DK+y(GaOyI}ecO`l;Va zJZ7x(a=Omse8u2P0DphF>YP$c`Fc3RR#U`QT_g5sc-!O0FPYYeK zrU6P&1DoP<%Y{;a70;FfkSMW0NDg6^iUaVeI{~>lP2KuBQ|KDrrJ2vXecW<^6A!od zI`gISNM5ENB2jUNe-qvgl*x+CFT7E6YjW?-6{fEG^=Mxu{|B~0(xkFclq7R)C zOpWCo?`SVo*kGAbFbCt0W+(C6U&oi08pXWQ)n?0j@p1_mV%2PXXedBtE2WZIb zRyWH{%|~vfif!byN5IzZ^NZ-FZnNg8Yu$b^G`qfZI4CIXP`N$8q0ZH_DrgN(?ToJ? zVWNo_=I!Sp`W46oVcRa%8-s}T@+vrQr^ht0J(V!Lnn>_?X-Vky4mRkDJ!!hJwGo_l z_`cEh~`)KZIUIWPwK|7a?QCC|~W`S|n0=Ftm zwZl@M%{s^d5pYV{AXqf|Yytp#VxWK$F58O4+9ol%G@KhjeH=#@nlPW zDt2>9%bC$8CfFDgylgWnj~Y|0PghNx7AzU;m!t^}DYp@dnm*aXaR>-#8aiBT8$*I2 zvx-hEwxL}>h&R0nK{StP8KK%h<3hWYDK!e9`9lfR&m6BB!;yP{^-Y18D2_r|z15 zegTn=R$O98O|u8Tk7Ku?w}wlgTG~KhHy=+LFp`r*ELI(6mMpwSao2aOhsW)97+39K z&=ehFSnL~*?3WkGA_!&NXhnG?#Y-cW5I17u@ElW_!4#K+BPBOga2S!OJV}hbSjG9> zz^zCM^~y7&5>>VA15?zcYp|+)FD#pUZ%4gjX_}Roj(@zk7Ch|6F_5i8*3A6^B06$5 zmil=PMMG<(H|_&>p-Ly~Yk_Xcgg&`L6Kv-%nO1Jh8bttEiyJD4wcd1v2WvN>?j1Xuw|; zk8q#}V5$oK;R*9ifx09N4eyIR!q*6-9e(Zrtkx4(=`sj8|cN^J(LSj~h za^+NKSU6LOn61 z`eBp>$hrbBk9DV9ZE-t$=j6E}I*K$MNxc$+h6Ip}j5(d#H`PjMwr|X2pn3 z3~I(g*HPAFMg|GswqK|$QEp>jO($taY(mF0Cw&u$+jhdH-E1SatqY|%zP_wB)ihLJ z9e=wx$Y{E;w9)$D(p#B7I%M+)8d&7-M{|*DC*EjYUE{SmHk^9EP z7>g$9f(G@AyupK?T*$JpxW~n?XM5Vu?pAfcB3Enw)^y9J>>_eL;<9_W1+z4`6HRdl z9jmtLf^tbV4!;uKelNxUtzE|UX>SCR$Q+odO22uZOoci>U5nxzaLVlFATE(SH$xR z7y46i(2y_B3U){hVJga4; zY2#VxAy#nCtEha^_oJqbKOc*at>xx8ITQB$`E$7t7P%eoe&xjbRN8!o1*u6C@*mIV zZO^|8mvvg6XbsL#k9Xg^yO-P@S3iOyfN9HF7j$M$s;SRnt>c)3Sd6tGm~QE&<2j5 zp6XWy=a&!sjgIYquN9*!X|7;I*AO}75`%0ud&tk`ue^P1`jA9>`N4F(W5xF~v`Z*% zxrM0V)TLI%6mJy92uFiU-aS8~88;V!C%q+(< z+zX8V8HoON?ffg~_p6!3@~VE-pU#p=#BXL%94jTmO4q*&r@QM~n9$uR!TQEv*>c!q zA^#rlBSbD-I@*JJp(}RSwNB8RC{OmXs#qvt8|>8*-PmH&oyLse;7#Mpqo=2LM=eK7 zz$|=GMn^|55kW z#_4jpR|x__RnX8DHAi<}g~9f^#VFOJVb);E!6(16jKAnf{^Oea*}0Fz)Ck5#%}P&3l2mc` zq{sD}Bp7bQTr>|tvk3f2ZP?IpV z7K2kpH|FZKE3;tn#I` zE|B5zsJyarnH{F#VP&)FUx zX2JKh1r-YT>oRzT$mC3e_N2>$9-$Ai=V4HxcwGIQFSXtW)3*6rlgfimR_8O626D}{ zkf{_FIDc@8&HZ7Z-I?QjKVzXmxlh#_>bT0Jp}YjNrM+f(m2*vFXXLQmjdQ9Urm7Nt z={>liY-`jIK|XeM-c-*-cEM`?Yg1UelWYjr!Y?G~FX88B&tB4G)%1tyZjYjia!CZO zTuv5&eZMm5%?i~=HEbIVWvqlP7Z{bOv|9;tK?%$8^q(esERASoT%-;{{XX#`r22!Cno2O|b7j zyZS6Nw&^+L3a%FJBF8$~OvLSPnH1yPcS!8Pzk2oiuN=v|bcg#bq9~eg?-hK_ake*F z$Tv`oXj9i?PL+q16yuz99Yq~nIz~n_P+IIq(9K^>$c%@4C_gwc0~GfgR8QuIN|!CE z13}Am<|R-JU~`Uf;g9lEmuaXj!nU%@8zM?oeTLV^YS9Uzjz*sWF`abu-HZ8%+AlY# zxxL@FHP>&mKFwqGLoPe3Ik*25N&d9%zx`imHJLvR?<~kVavb8Yw2!dx1#J=I2=x3Q zO0cTgO!5I~vY#Of17q>ktkz3Qrv@trKyt?0R?|j5aO5L^BYosG?-RWc)Mbk9gM0 zrLB*vgG;I)ye#(@PmHxY@rRsRp#Iq=erqg80@;tet`Rg$bSW8X30ga(oPV>jsi{eO zPTJcPRTwF_Jui(bLA^Q19KI*>b@aPP9QHcn4AIGVaK#g?;8elpD+Od81E=zqVO_bN~KZ$1fZhaa7&Yx{POKD?N;EoH-S2mm7<`WR``>0)a;~}Nn2?mC| zBAQflzu=5EI<)OP&>IeaYSg%Zz`;;O*_oN*m#vx^`EGL#LKKA-C3*q0+A+$S+lqZEdLM>WkC- zyH(krEYC{b%zkp#Nl+0-Pi>U#6ua9p=rQjF)}$snAJ?=(VW_pZkD7h6e9n&5$R8WWW{D7(oIPOsMXkJ(P6xXV6k@bR z%dUQamwvG1o`@EJK~+r#>n(@9=PI%#A+?t0v7^}mIoZn`+n3$mT3-)HDfY&C%njTe zX;pQW%q3~j9u~0;ii}rzHRhCThc7*9Y!=9LC?A^a0d*g96ed~FF{d!?83jX?2%xiy zp8iXizrlW_p{5|4_5jUD_+2h2ZFf=Sm$x)B9Q(729y_+c%8 zcivC{XFvXSWAABGf8y4}+moNq{KiB6Vs8Ehlw_sH)A5jxWp1W`$(_A{D9U+Hbl4rlHgzet)?J=`dblVIH|MQ*vP&iRs{mB$mP9+0?mcCT*Eydd6} z3#&4-WMyxLL*ZL=(#GRj=j>Iyy1}UO$^NG>mo^>SXU81$!B!xtH_)$gYi?TuB(ieJ zqAs^&Is6+=mns;uf$1^}G`J*m!~8-jTtP2X$_~sxj}|^lMVr6Ak#YqO<7zk2nk@Eg zk9t+gj)!aAG#O-!Sv>K+-HCfMQ1V|scJlrK1w#c}cqW>pGBu*!6VLZpJNF@r6c~7* zryDvR7p58z{)Aq@#0SG{z(Kr8-dxBZ%Qb6A&6GpS8<7i7!P+NEHc}a=i(n(FLxe&%LSF7S`Q=dOP zbef+uHYt`-(B|5ABK6y&OA&02gf?txQ~iu|3q@;ZP?%MI zp5r9q(^OHzO3W;tGo_Y5y~yBAy5n(`UG9(<6;V&dwFQGdF5S4eW_)&h~i`=VY;bXpn7X*B-`vIe9l5pu}fGA`mV~pQH?raI%Cy@ z#10e;cgyMzYK}U<^bk&F`ckoADsR?P#T{nbAWC;dv`ZVuZ#!$8SaCn1OxnuXdGUc( zhW)haBErDx@NDFr+Di%IV4`o|c-cDQNwf-n*=62WdSfH4r9>h8Dobup+G zaUc3D^|X?g9i#Ov8Yl+Lo%A=h9c(W$R@tom{O*zc>80B=gI~j6(Pg$f;SXK7ee&lw z{LeN5tba+5^@~4D(4ucywz6MwsiIl8PRKYm>~0o;XgKtPc>LA z_oGQqK)qMxC|C{w+eix%^C3hvY6s#AH0nCh|CH(8?X8JU%@Mty2Wx&CQdmtX$yf~Z zdYv(b&Fq;|6bQFhlB+K~qu;o;MVO^c7gX0P+oP9eCQ?_S0dbK2;%8R}QD8e1)sN2x z>eKL&f^36eJCyD;(PM*S5Gd8kiLB<+$;HnB4aRmXTeyz2@l4^X{QNt~$A@i`&aF#W zHH4m>p93#l161K@6tp`!m~o>CZ6+j#zauJb(+g)}x~CpBE5yDX-J3M*K=2!U^zVv| z)XS`#gwv+3EzF- zt2V_e&gj^0q2s$-Ngbhn zM@p_sxy`tudUm6I50z#t06E^4jg$a0QBoVV(tk1QLm~|030nHy9IqXAT_Hlovj@Ar z9vGPvGbW*3nU@C1@<{ zb^#>gg^o|WE<)S3U6n4-V+cr9`i(aXvE`r_Lr8t*DeR_mD-b7n;U{Do1T%p|a7 z!P0L7)h{3TLymqyQ}ZN?O`aa(5OU`zJeY2g{6<2nzB-|BnICZiY$qgyc70A5t$$|9 z)=Zzsy(+`%_O%Up{*9vgm|I}QX2Yx+mq%`eWJ9ySfOzb`a=zJjn7`kz38RS_PAe!_GNkrt?lWtm@2ZPltQtG> z!i&nrhjMAz{@JB}xrviMgsvgBreJAlNsK+lCZ`0>{?$HCOu)a;^v>Y$d9|2)4*#;Z zH#%W7uv7In=??bkE|_N@5A>ekeyP(2<<9fh>JOtES~VYdAQ*F2=usuR;|6vz{M)7r z0({EUedU)it7Xtq3dhwj+;D|sLD1M3@<^S2s9d##gw1QNzc#&odBK0=js|c41ZrMy zAbl6fY&oICKK;zHF1@yK&BfMs+BB6`KOaTGRT=6~iC^Y8>5xV7;85h<%`TKnEbm?yH0tIFD-`XaVAtMa!*z` zn3*cV7uoCc*cMbZv5VWKRk44XuYYAxk2&1%vZICfxS0>kJfkBu_k=-_qlT73d24gQ zTA$6yU)$^d36Jt;sBgytnw-IiF1Eni+}hP-(TbZ7U{cj{%?`K8O{GZ>6@Q5{Lhm};o*Pb?tA+CUrr+B zkN@SQZ@KvYm%u^(<~7XQtKU&Y1Khv&1cJ6^{y)3?{1%K+sN6l(Ox6n(rvLwdQ)IPhO_ihSFCVIGn6{19BEkl6d+AGdKg4}8Ch*YaOf*k7q5=^49U4GD*?uOjOk ze@vPB50Un#7TXLf|L>?u@Vq8_wJkow-xgxxAZj`H{7(&yAU6rum6rY%p8pBDLQj)z zHczars=r8wS;21)9rgHQ%&HhXNio%5_XPT<_u@=l$SXANI1}F#K!z2<9OHWW<|{5-`OtCtlUP2Yu`10Ez|2Zd+FbbN^Mn5#qRQ*|mpw>N zwoueA$qd1rEMndF$2E=s_;|HW|m#>8X%IYVXRfTKTS(MAQFR zOL<{%BhRPr9Q^O*#LB-HdRRNU)?p~~er)qdz>ACDZ{F}ccpuSj`&+-)E`P`4|9LY3 z{H(8F>FP4w`3NYsD7j{idULx2`5h20-2_nIXU^?=*Pa#WPu8u!Q@@_`Rb%Yn@9UW* z{bWQ|7Uy28@+)T7cew9Net%*LZE!KOwbL)ZwMButDySfUAwDXOd|UvWP)2Fx{ZOEN z{6F{g*T3!0m4A0jyC-GnI78p1cyjM!!c5;oku+(`nZr_O<8RkhQcF%wHZxu>qkN(I zRN{)2fH&VOqyM92>A!XC0uPYiH8-gqXnnXi8aq@FB`&E|tv!+p23Ukm@x4a3P>CO>aSgv(KwA6QX6zc%V z^2&E@WJq6@=c8#@iZQlaD;SO;gU3*yDs`J%KQH|-y??Mtip$Bm(C%bi8Ev3vtl~Lx zC_U`^509k)??ent4+(y|?hIudir$BRuT>?@Y)sJDvzO5sI|q6K|Mc>|URwS(wJ_Hu zn4ouTm|+XI%WT{0)87rpMG;uT(A4z>GSVuts#3&7)2rdMt!P85^UE}>E`EsRXnlU_ z_cv^rCSQ3@`?W?XSA^V#kC-m+1>WxQrvkh}Xqauwqp>2MO8pn^ zzjw3%uH@>9+or?9c1BjB`<7{KM803Mlmp!B?Nv^XZ;IT|G%|IX1ERtLJg>|0LnO*- zV!-B7GHf^B@|MMJ_EW$iO zoDB?L+Q(;dgoep5|A#HC(Fk*ka(JWSFK<}PJK~0ojK2S{{B7WC(ec|&&~GfOWkiaYesLQe#Gy82$T&s{OLA=-Rr5+%|D*Q%1Rk4 z<$MoK4N!1lmlCH^p>K|ScfYUzS4@9J4-=aZ3d)h^w%1m)_4mp}!10uwN#_m1y}n5; zgo05d{2+TY!P=ct-1|MW@g4~szvLk%z~h-(*yGBTX;G(g7njIuL$Ch(oXyHZ!Cu@_ zm}SjaoM~UYvA67Hp~R(*7}47A2ojIS+2I(wMJ7D z$CfUuuK(lo0YUzb{DI?c7sP4{u1rKfHBF%Teh;D7NuGSu8lLm**Sw)4y$lf^9?xVo zRK2j3R=$yj{qNUJDM{Wi>=N^ipSOoo_r)WWd%iNie}z60cHwdI^K?scn;8FxA_D=+ z0|93*2+Fgo965fR)%iH@k=okvsu7jyB*M_f#%=oUMdymcsH>LWPS?{cx%;M|Y~6I7 zbXb?>-5O<;yd2q&&#pN~Xb5<7C0lZU^WpFQ!y#1uZz$uoilmYt?vl16j$bV4j*x!N z{{ZE|Xx-P-M(0WY5TvA6_cAGVKEZWnX_FUAqx+kat^BvCnZ|kK_|q?$7B0T3&OGG`njYx z;rrFT-9GoRiw1hQhxRDaw`dj3qPpBaW|aGgOgNgvznfBF_{X!#Smn(;{5QnQe7p>K m%U|PVo^XCYWXj*q-;`@e*Wd2dm4H3qkMh+US1^~&0{$QEX35q7 literal 0 HcmV?d00001 diff --git a/website/source/assets/stylesheets/_styles.scss b/website/source/assets/stylesheets/_styles.scss index 1ce4a566c..2acea83c7 100644 --- a/website/source/assets/stylesheets/_styles.scss +++ b/website/source/assets/stylesheets/_styles.scss @@ -98,6 +98,10 @@ p { color: darken($green, 50%); } } + + img { + width: 100%; + } } a { diff --git a/website/source/intro/hashicorp-ecosystem.html.markdown b/website/source/intro/hashicorp-ecosystem.html.markdown new file mode 100644 index 000000000..d88e492ba --- /dev/null +++ b/website/source/intro/hashicorp-ecosystem.html.markdown @@ -0,0 +1,32 @@ +--- +layout: "intro" +page_title: "Packer and the HashiCorp Ecosystem" +prev_url: "/intro/platforms.html" +next_url: "/intro/getting-started/setup.html" +next_title: "Getting Started: Install Packer" +description: |- + Learn how Packer fits in with the rest of the HashiCorp ecosystem of tools +--- + +# Packer and the HashiCorp Ecosystem + +HashiCorp is the creator of the open source projects Vagrant, Packer, Terraform, Serf, and Consul, and the commercial product Atlas. Packer is just one piece of the ecosystem HashiCorp has built to make application delivery a versioned, auditable, repeatable, and collaborative process. To learn more about our beliefs on the qualities of the modern datacenter and responsible application delivery, read [The Atlas Mindset: Version Control for Infrastructure](https://hashicorp.com/blog/atlas-mindset.html/?utm_source=packer&utm_campaign=HashicorpEcosystem). + +If you are using Packer to build machine images and deployable artifacts, it’s likely that you need a solution for deploying those artifacts. Terraform is our tool for creating, combining, and modifying infrastructure. + +Below are summaries of HashiCorp’s open source projects and a graphic showing how Atlas connects them to create a full application delivery workflow. + +# HashiCorp Ecosystem +![Atlas Workflow](docs/atlas-workflow.png) + +[Atlas](https://atlas.hashicorp.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) is HashiCorp's only commercial product. It unites Packer, Terraform, and Consul to make application delivery a versioned, auditable, repeatable, and collaborative process. + +[Packer](https://packer.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for creating machine images and deployable artifacts such as AMIs, OpenStack images, Docker containers, etc. + +[Terraform](https://terraform.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for creating, combining, and modifying infrastructure. In the Atlas workflow Terraform reads from the artifact registry and provisions infrastructure. + +[Consul](https://consul.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for service discovery, service registry, and health checks. In the Atlas workflow Consul is configured at the Packer build stage and identifies the service(s) contained in each artifact. Since Consul is configured at the build phase with Packer, when the artifact is deployed with Terraform, it is fully configured with dependencies and service discovery pre-baked. This greatly reduces the risk of an unhealthy node in production due to configuration failure at runtime. + +[Serf](https://serfdom.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for cluster membership and failure detection. Consul uses Serf’s gossip protocol as the foundation for service discovery. + +[Vagrant](https://www.vagrantup.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for managing development environments that mirror production. Vagrant environments reduce the friction of developing a project and reduce the risk of unexpected behavior appearing after deployment. Vagrant boxes can be built in parallel with production artifacts with Packer to maintain parity between development and production. diff --git a/website/source/intro/platforms.html.markdown b/website/source/intro/platforms.html.markdown index 75fcf6721..d97756fd7 100644 --- a/website/source/intro/platforms.html.markdown +++ b/website/source/intro/platforms.html.markdown @@ -2,8 +2,8 @@ layout: "intro" page_title: "Supported Platforms" prev_url: "/intro/use-cases.html" -next_url: "/intro/getting-started/setup.html" -next_title: "Getting Started: Install Packer" +next_url: "/intro/hashicorp-ecosystem.html" +next_title: "Packer & the HashiCorp Ecosystem" description: |- Packer can create machine images for any platform. Packer ships with support for a set of platforms, but can be extended through plugins to support any platform. This page documents the list of supported image types that Packer supports creating. --- diff --git a/website/source/layouts/intro.erb b/website/source/layouts/intro.erb index 17e900baf..127d6ab84 100644 --- a/website/source/layouts/intro.erb +++ b/website/source/layouts/intro.erb @@ -8,6 +8,7 @@
    • Why Use Packer?
    • Use Cases
    • Supported Platforms
    • +
    • Packer & the HashiCorp Ecosystem
      From 6d28ee931cf9e0f6fc310f340f59438cbe15e44d Mon Sep 17 00:00:00 2001 From: duftler Date: Tue, 21 Apr 2015 14:40:55 +0000 Subject: [PATCH 058/956] Delete GCE disk on SIGINT. --- builder/googlecompute/step_create_instance.go | 23 +++++++++++++++++++ .../step_create_instance_test.go | 9 +++++++- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/builder/googlecompute/step_create_instance.go b/builder/googlecompute/step_create_instance.go index e572c441d..6bfee5460 100644 --- a/builder/googlecompute/step_create_instance.go +++ b/builder/googlecompute/step_create_instance.go @@ -125,6 +125,29 @@ func (s *StepCreateInstance) Cleanup(state multistep.StateBag) { "Error: %s", name, err)) } + ui.Message("Instance has been deleted!") state.Put("instance_name", "") + + // Deleting the instance does not remove the boot disk. This cleanup removes + // the disk. + ui.Say("Deleting disk...") + errCh, err = driver.DeleteDisk(config.Zone, config.DiskName) + if err == nil { + select { + case err = <-errCh: + case <-time.After(config.stateTimeout): + err = errors.New("time out while waiting for disk to delete") + } + } + + if err != nil { + ui.Error(fmt.Sprintf( + "Error deleting disk. Please delete it manually.\n\n"+ + "Name: %s\n"+ + "Error: %s", config.InstanceName, err)) + } + + ui.Message("Disk has been deleted!") + return } diff --git a/builder/googlecompute/step_create_instance_test.go b/builder/googlecompute/step_create_instance_test.go index baae10849..f2ccbf57c 100644 --- a/builder/googlecompute/step_create_instance_test.go +++ b/builder/googlecompute/step_create_instance_test.go @@ -39,7 +39,14 @@ func TestStepCreateInstance(t *testing.T) { t.Fatal("should've deleted instance") } if driver.DeleteInstanceZone != config.Zone { - t.Fatalf("bad zone: %#v", driver.DeleteInstanceZone) + t.Fatalf("bad instance zone: %#v", driver.DeleteInstanceZone) + } + + if driver.DeleteDiskName != config.InstanceName { + t.Fatal("should've deleted disk") + } + if driver.DeleteDiskZone != config.Zone { + t.Fatalf("bad disk zone: %#v", driver.DeleteDiskZone) } } From cd14cb701243b84180f1a496f9aef8e812f4306f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gr=C3=A9goire=20Pineau?= Date: Thu, 30 Apr 2015 18:33:15 +0200 Subject: [PATCH 059/956] [Provisioner][Ansible] Added support for inventory group --- provisioner/ansible-local/provisioner.go | 20 +++++++++++++++---- .../provisioners/ansible-local.html.markdown | 10 ++++++++++ 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/provisioner/ansible-local/provisioner.go b/provisioner/ansible-local/provisioner.go index 3ab37c086..28249b346 100644 --- a/provisioner/ansible-local/provisioner.go +++ b/provisioner/ansible-local/provisioner.go @@ -47,6 +47,9 @@ type Config struct { // The optional inventory file InventoryFile string `mapstructure:"inventory_file"` + + // The optional inventory groups + InventoryGroups []string `mapstructure:"inventory_groups"` } type Provisioner struct { @@ -99,9 +102,10 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } sliceTemplates := map[string][]string{ - "extra_arguments": p.config.ExtraArguments, - "playbook_paths": p.config.PlaybookPaths, - "role_paths": p.config.RolePaths, + "extra_arguments": p.config.ExtraArguments, + "playbook_paths": p.config.PlaybookPaths, + "role_paths": p.config.RolePaths, + "inventory_groups": p.config.InventoryGroups, } for n, slice := range sliceTemplates { @@ -196,7 +200,15 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return fmt.Errorf("Error preparing inventory file: %s", err) } defer os.Remove(tf.Name()) - _, err = tf.Write([]byte("127.0.0.1")) + if len(p.config.InventoryGroups) != 0 { + content := "" + for _, group := range p.config.InventoryGroups { + content += fmt.Sprintf("[%s]\n127.0.0.1\n", group) + } + _, err = tf.Write([]byte(content)) + } else { + _, err = tf.Write([]byte("127.0.0.1")) + } if err != nil { tf.Close() return fmt.Errorf("Error preparing inventory file: %s", err) diff --git a/website/source/docs/provisioners/ansible-local.html.markdown b/website/source/docs/provisioners/ansible-local.html.markdown index 63b98ade4..0a12dbc5b 100644 --- a/website/source/docs/provisioners/ansible-local.html.markdown +++ b/website/source/docs/provisioners/ansible-local.html.markdown @@ -41,6 +41,16 @@ Optional: * `extra_arguments` (array of strings) - An array of extra arguments to pass to the ansible command. By default, this is empty. +* `inventory_groups` (string) - You can let Packer generate a temporary inventory + for you. It will contains only `127.0.0.1`. Thanks to `inventory_groups`, + packer will set the current machine into different groups and will + generate an inventory like: + + [my_group_1] + 127.0.0.1 + [my_group_2] + 127.0.0.1 + * `inventory_file` (string) - The inventory file to be used by ansible. This file must exist on your local system and will be uploaded to the remote machine. From d903b6d56a25df6eaf49d81682e9ca55575f9d18 Mon Sep 17 00:00:00 2001 From: Ernie Hershey Date: Fri, 1 May 2015 14:34:37 -0400 Subject: [PATCH 060/956] Remove duplicate "directly" --- website/source/docs/provisioners/file.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/provisioners/file.html.markdown b/website/source/docs/provisioners/file.html.markdown index a8d0daf56..68034fe00 100644 --- a/website/source/docs/provisioners/file.html.markdown +++ b/website/source/docs/provisioners/file.html.markdown @@ -61,7 +61,7 @@ machine will be created by Packer. If the source, however, is `/foo/` (a trailing slash is present), and the destination is `/tmp`, then the contents of `/foo` will be uploaded -directly into `/tmp` directly. +into `/tmp` directly. This behavior was adopted from the standard behavior of rsync. Note that under the covers, rsync may or may not be used. From 1365627e3139792b59a042b21950891836ff1575 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Sun, 3 May 2015 11:18:48 +0200 Subject: [PATCH 061/956] Fixes #2080 Add prlctl_post in builder Parallels This adds config option prlctl_post for builder parallels-iso/pvm. It allows additional prlctl commands to run after the VM has been shutdown just before being exported. --- .../parallels/common/prlctl_post_config.go | 28 ++++++++++++++ .../common/prlctl_post_config_test.go | 37 +++++++++++++++++++ builder/parallels/iso/builder.go | 6 +++ builder/parallels/pvm/builder.go | 4 ++ builder/parallels/pvm/config.go | 2 + .../docs/builders/parallels-iso.html.markdown | 4 ++ .../docs/builders/parallels-pvm.html.markdown | 4 ++ 7 files changed, 85 insertions(+) create mode 100644 builder/parallels/common/prlctl_post_config.go create mode 100644 builder/parallels/common/prlctl_post_config_test.go diff --git a/builder/parallels/common/prlctl_post_config.go b/builder/parallels/common/prlctl_post_config.go new file mode 100644 index 000000000..23c2d5520 --- /dev/null +++ b/builder/parallels/common/prlctl_post_config.go @@ -0,0 +1,28 @@ +package common + +import ( + "fmt" + "github.com/mitchellh/packer/packer" +) + +type PrlctlPostConfig struct { + PrlctlPost [][]string `mapstructure:"prlctl_post"` +} + +func (c *PrlctlPostConfig) Prepare(t *packer.ConfigTemplate) []error { + if c.PrlctlPost == nil { + c.PrlctlPost = make([][]string, 0) + } + + errs := make([]error, 0) + for i, args := range c.PrlctlPost { + for j, arg := range args { + if err := t.Validate(arg); err != nil { + errs = append(errs, + fmt.Errorf("Error processing prlctl_post[%d][%d]: %s", i, j, err)) + } + } + } + + return errs +} diff --git a/builder/parallels/common/prlctl_post_config_test.go b/builder/parallels/common/prlctl_post_config_test.go new file mode 100644 index 000000000..c091a1a92 --- /dev/null +++ b/builder/parallels/common/prlctl_post_config_test.go @@ -0,0 +1,37 @@ +package common + +import ( + "reflect" + "testing" +) + +func TestPrlctlPostConfigPrepare_PrlctlPost(t *testing.T) { + // Test with empty + c := new(PrlctlPostConfig) + errs := c.Prepare(testConfigTemplate(t)) + if len(errs) > 0 { + t.Fatalf("err: %#v", errs) + } + + if !reflect.DeepEqual(c.PrlctlPost, [][]string{}) { + t.Fatalf("bad: %#v", c.PrlctlPost) + } + + // Test with a good one + c = new(PrlctlPostConfig) + c.PrlctlPost = [][]string{ + {"foo", "bar", "baz"}, + } + errs = c.Prepare(testConfigTemplate(t)) + if len(errs) > 0 { + t.Fatalf("err: %#v", errs) + } + + expected := [][]string{ + []string{"foo", "bar", "baz"}, + } + + if !reflect.DeepEqual(c.PrlctlPost, expected) { + t.Fatalf("bad: %#v", c.PrlctlPost) + } +} diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index e5e42e8e7..da3615dd3 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -23,6 +23,7 @@ type config struct { parallelscommon.FloppyConfig `mapstructure:",squash"` parallelscommon.OutputConfig `mapstructure:",squash"` parallelscommon.PrlctlConfig `mapstructure:",squash"` + parallelscommon.PrlctlPostConfig `mapstructure:",squash"` parallelscommon.PrlctlVersionConfig `mapstructure:",squash"` parallelscommon.RunConfig `mapstructure:",squash"` parallelscommon.ShutdownConfig `mapstructure:",squash"` @@ -71,6 +72,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { errs, b.config.OutputConfig.Prepare(b.config.tpl, &b.config.PackerConfig)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.PrlctlConfig.Prepare(b.config.tpl)...) + errs = packer.MultiErrorAppend(errs, b.config.PrlctlPostConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.PrlctlVersionConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.ShutdownConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.SSHConfig.Prepare(b.config.tpl)...) @@ -295,6 +297,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Command: b.config.ShutdownCommand, Timeout: b.config.ShutdownTimeout, }, + ¶llelscommon.StepPrlctl{ + Commands: b.config.PrlctlPost, + Tpl: b.config.tpl, + }, } // Setup the state bag diff --git a/builder/parallels/pvm/builder.go b/builder/parallels/pvm/builder.go index 037641619..e1b003406 100644 --- a/builder/parallels/pvm/builder.go +++ b/builder/parallels/pvm/builder.go @@ -99,6 +99,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Command: b.config.ShutdownCommand, Timeout: b.config.ShutdownTimeout, }, + ¶llelscommon.StepPrlctl{ + Commands: b.config.PrlctlPost, + Tpl: b.config.tpl, + }, } // Run the steps. diff --git a/builder/parallels/pvm/config.go b/builder/parallels/pvm/config.go index 83e643111..dcd47fa82 100644 --- a/builder/parallels/pvm/config.go +++ b/builder/parallels/pvm/config.go @@ -15,6 +15,7 @@ type Config struct { parallelscommon.FloppyConfig `mapstructure:",squash"` parallelscommon.OutputConfig `mapstructure:",squash"` parallelscommon.PrlctlConfig `mapstructure:",squash"` + parallelscommon.PrlctlPostConfig `mapstructure:",squash"` parallelscommon.PrlctlVersionConfig `mapstructure:",squash"` parallelscommon.RunConfig `mapstructure:",squash"` parallelscommon.SSHConfig `mapstructure:",squash"` @@ -51,6 +52,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { errs = packer.MultiErrorAppend(errs, c.FloppyConfig.Prepare(c.tpl)...) errs = packer.MultiErrorAppend(errs, c.OutputConfig.Prepare(c.tpl, &c.PackerConfig)...) errs = packer.MultiErrorAppend(errs, c.PrlctlConfig.Prepare(c.tpl)...) + errs = packer.MultiErrorAppend(errs, c.PrlctlPostConfig.Prepare(c.tpl)...) errs = packer.MultiErrorAppend(errs, c.PrlctlVersionConfig.Prepare(c.tpl)...) errs = packer.MultiErrorAppend(errs, c.RunConfig.Prepare(c.tpl)...) errs = packer.MultiErrorAppend(errs, c.ShutdownConfig.Prepare(c.tpl)...) diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index e0b1083be..46bf96b8a 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -170,6 +170,10 @@ each category, the available options are alphabetized and described. where the `Name` variable is replaced with the VM name. More details on how to use `prlctl` are below. +* `prlctl_post` (array of array of strings) - Identical to `prlctl`, + except that it is run after the virtual machine is shutdown, and before the + virtual machine is exported. + * `prlctl_version_file` (string) - The path within the virtual machine to upload a file that contains the `prlctl` version that was used to create the machine. This information can be useful for provisioning. By default this is diff --git a/website/source/docs/builders/parallels-pvm.html.markdown b/website/source/docs/builders/parallels-pvm.html.markdown index 9243d1f10..2a79c6b6b 100644 --- a/website/source/docs/builders/parallels-pvm.html.markdown +++ b/website/source/docs/builders/parallels-pvm.html.markdown @@ -113,6 +113,10 @@ each category, the available options are alphabetized and described. where the `Name` variable is replaced with the VM name. More details on how to use `prlctl` are below. +* `prlctl_post` (array of array of strings) - Identical to `prlctl`, + except that it is run after the virtual machine is shutdown, and before the + virtual machine is exported. + * `prlctl_version_file` (string) - The path within the virtual machine to upload a file that contains the `prlctl` version that was used to create the machine. This information can be useful for provisioning. By default this is From 835aff456214a7a2aea3630e0029bf4c7c5c4fc6 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Tue, 5 May 2015 09:21:32 +0200 Subject: [PATCH 062/956] Fixes #2079 - Documentation parallel_tools_mode / guest_additions_mode --- .../docs/builders/parallels-iso.html.markdown | 18 ++++++++++-------- .../docs/builders/parallels-pvm.html.markdown | 18 ++++++++++-------- .../docs/builders/virtualbox-iso.html.markdown | 9 ++++++--- .../docs/builders/virtualbox-ovf.html.markdown | 9 ++++++--- 4 files changed, 32 insertions(+), 22 deletions(-) diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index e0b1083be..ed7ebd86c 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -148,17 +148,19 @@ each category, the available options are alphabetized and described. By default this is "output-BUILDNAME" where "BUILDNAME" is the name of the build. -* `parallels_tools_guest_path` (string) - The path in the VM to upload Parallels - Tools. This only takes effect if `parallels_tools_mode` is not "disable". +* `parallels_tools_guest_path` (string) - The path in the virtual machine to upload + Parallels Tools. This only takes effect if `parallels_tools_mode` is "upload". This is a [configuration template](/docs/templates/configuration-templates.html) that has a single valid variable: `Flavor`, which will be the value of - `parallels_tools_flavor`. By default the upload path is set to - `prl-tools-{{.Flavor}}.iso`. + `parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso" which + should upload into the login directory of the user. -* `parallels_tools_mode` (string) - The method by which Parallels Tools are - made available to the guest for installation. Valid options are "upload", - "attach", or "disable". The functions of each of these should be - self-explanatory. The default value is "upload". +* `parallels_tools_mode` (string) - The method by which Parallels Tools are made + available to the guest for installation. Valid options are "upload", "attach", + or "disable". If the mode is "attach" the Parallels Tools ISO will be attached + as a CD device to the virtual machine. If the mode is "upload" the Parallels + Tools ISO will be uploaded to the path specified by + `parallels_tools_guest_path`. The default value is "upload". * `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in order to further customize the virtual machine being created. The value of diff --git a/website/source/docs/builders/parallels-pvm.html.markdown b/website/source/docs/builders/parallels-pvm.html.markdown index 9243d1f10..355e325c5 100644 --- a/website/source/docs/builders/parallels-pvm.html.markdown +++ b/website/source/docs/builders/parallels-pvm.html.markdown @@ -91,17 +91,19 @@ each category, the available options are alphabetized and described. By default this is "output-BUILDNAME" where "BUILDNAME" is the name of the build. -* `parallels_tools_guest_path` (string) - The path in the VM to upload Parallels - Tools. This only takes effect if `parallels_tools_mode` is not "disable". +* `parallels_tools_guest_path` (string) - The path in the VM to upload + Parallels Tools. This only takes effect if `parallels_tools_mode` is "upload". This is a [configuration template](/docs/templates/configuration-templates.html) that has a single valid variable: `Flavor`, which will be the value of - `parallels_tools_flavor`. By default the upload path is set to - `prl-tools-{{.Flavor}}.iso`. + `parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso" which + should upload into the login directory of the user. -* `parallels_tools_mode` (string) - The method by which Parallels Tools are - made available to the guest for installation. Valid options are "upload", - "attach", or "disable". The functions of each of these should be - self-explanatory. The default value is "upload". +* `parallels_tools_mode` (string) - The method by which Parallels Tools are made + available to the guest for installation. Valid options are "upload", "attach", + or "disable". If the mode is "attach" the Parallels Tools ISO will be attached + as a CD device to the virtual machine. If the mode is "upload" the Parallels + Tools ISO will be uploaded to the path specified by + `parallels_tools_guest_path`. The default value is "upload". * `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in order to further customize the virtual machine being created. The value of diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index 020222beb..1a856b56d 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -106,9 +106,12 @@ each category, the available options are alphabetized and described. * `guest_additions_mode` (string) - The method by which guest additions are made available to the guest for installation. Valid options are - "upload", "attach", or "disable". The functions of each of these should be - self-explanatory. The default value is "upload". If "disable" is used, - guest additions won't be downloaded, either. + "upload", "attach", or "disable". If the mode is "attach" the guest + additions ISO will be attached as a CD device to the virtual machine. + If the mode is "upload" the guest additions ISO will be uploaded to + the path specified by `guest_additions_path`. The default value is + "upload". If "disable" is used, guest additions won't be downloaded, + either. * `guest_additions_path` (string) - The path on the guest virtual machine where the VirtualBox guest additions ISO will be uploaded. By default this diff --git a/website/source/docs/builders/virtualbox-ovf.html.markdown b/website/source/docs/builders/virtualbox-ovf.html.markdown index 5f9d34b2b..0de796802 100644 --- a/website/source/docs/builders/virtualbox-ovf.html.markdown +++ b/website/source/docs/builders/virtualbox-ovf.html.markdown @@ -96,9 +96,12 @@ each category, the available options are alphabetized and described. * `guest_additions_mode` (string) - The method by which guest additions are made available to the guest for installation. Valid options are - "upload", "attach", or "disable". The functions of each of these should be - self-explanatory. The default value is "upload". If "disable" is used, - guest additions won't be downloaded, either. + "upload", "attach", or "disable". If the mode is "attach" the guest + additions ISO will be attached as a CD device to the virtual machine. + If the mode is "upload" the guest additions ISO will be uploaded to + the path specified by `guest_additions_path`. The default value is + "upload". If "disable" is used, guest additions won't be downloaded, + either. * `guest_additions_path` (string) - The path on the guest virtual machine where the VirtualBox guest additions ISO will be uploaded. By default this From c4cee75b9372a62bee0d7904e656253b488e99db Mon Sep 17 00:00:00 2001 From: Nathan Hartwell Date: Wed, 13 May 2015 08:32:09 -0500 Subject: [PATCH 063/956] Adding tests for disable sudo --- .../salt-masterless/provisioner_test.go | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/provisioner/salt-masterless/provisioner_test.go b/provisioner/salt-masterless/provisioner_test.go index c59ee9463..c15053b21 100644 --- a/provisioner/salt-masterless/provisioner_test.go +++ b/provisioner/salt-masterless/provisioner_test.go @@ -103,3 +103,28 @@ func TestProvisionerPrepare_LocalPillarRoots(t *testing.T) { t.Fatalf("err: %s", err) } } + +func TestProvisionerSudo(t *testing.T) { + var p Provisioner + config := testConfig() + + err := p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + withSudo := p.sudo("echo hello") + if withSudo != "sudo echo hello" { + t.Fatalf("sudo command not generated correctly") + } + + config["disable_sudo"] = true + err = p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + withoutSudo := p.sudo("echo hello") + if withoutSudo != "echo hello" { + t.Fatalf("sudo-less command not generated correctly") + } +} From 8944824fa81dc46918a7897f9f02b0a19b4332c5 Mon Sep 17 00:00:00 2001 From: Romain Bossart Date: Thu, 14 May 2015 21:11:44 +0200 Subject: [PATCH 064/956] doc update: no need for homebrew/binary anymore --- website/source/docs/installation.html.markdown | 5 +---- website/source/intro/getting-started/setup.html.markdown | 4 +--- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/website/source/docs/installation.html.markdown b/website/source/docs/installation.html.markdown index e98a7acae..b24078729 100644 --- a/website/source/docs/installation.html.markdown +++ b/website/source/docs/installation.html.markdown @@ -65,12 +65,9 @@ installation managed by the Packer community: ### Homebrew -If you're using OS X and [Homebrew](http://brew.sh), you can install Packer by -adding the `binary` tap. Remember that this is updated by a 3rd party, so -it may not be the latest available version. +If you're using OS X and [Homebrew](http://brew.sh), you can install Packer: ```text -$ brew tap homebrew/binary $ brew install packer ``` diff --git a/website/source/intro/getting-started/setup.html.markdown b/website/source/intro/getting-started/setup.html.markdown index b60158815..ae14c2748 100644 --- a/website/source/intro/getting-started/setup.html.markdown +++ b/website/source/intro/getting-started/setup.html.markdown @@ -67,10 +67,8 @@ are alternatives available. ### Homebrew -If you're using OS X and [Homebrew](http://brew.sh), you can install Packer by -adding the `binary` tap: +If you're using OS X and [Homebrew](http://brew.sh), you can install Packer: ```text -$ brew tap homebrew/binary $ brew install packer ``` From 849b825d187a62e0f9d72b7150751881a8464aa5 Mon Sep 17 00:00:00 2001 From: Asa Gage Date: Thu, 14 May 2015 17:22:32 -0400 Subject: [PATCH 065/956] Fixed missing comma in remote-builds example code. --- .../source/intro/getting-started/remote-builds.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/intro/getting-started/remote-builds.html.markdown b/website/source/intro/getting-started/remote-builds.html.markdown index dd7afa911..d2f7d3627 100644 --- a/website/source/intro/getting-started/remote-builds.html.markdown +++ b/website/source/intro/getting-started/remote-builds.html.markdown @@ -61,7 +61,7 @@ Now we have Atlas building an AMI with Redis pre-configured. This is great, but "variables": ["..."], "builders": ["..."], "provisioners": ["..."], - "push": ["..."] + "push": ["..."], "post-processors": [ { "type": "atlas", @@ -72,4 +72,4 @@ Now we have Atlas building an AMI with Redis pre-configured. This is great, but } ``` -Update the `post-processors` block with your Atlas username, then `packer push example.json` and watch the build kick off in Atlas! When the build completes, the resulting artifact will be saved and stored in Atlas. \ No newline at end of file +Update the `post-processors` block with your Atlas username, then `packer push example.json` and watch the build kick off in Atlas! When the build completes, the resulting artifact will be saved and stored in Atlas. From c3a6e60e6c3a38564b5e262dbdb341928b0f673e Mon Sep 17 00:00:00 2001 From: Asa Gage Date: Thu, 14 May 2015 17:27:30 -0400 Subject: [PATCH 066/956] removed NL --- .../source/intro/getting-started/remote-builds.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/intro/getting-started/remote-builds.html.markdown b/website/source/intro/getting-started/remote-builds.html.markdown index d2f7d3627..d63bf560b 100644 --- a/website/source/intro/getting-started/remote-builds.html.markdown +++ b/website/source/intro/getting-started/remote-builds.html.markdown @@ -72,4 +72,4 @@ Now we have Atlas building an AMI with Redis pre-configured. This is great, but } ``` -Update the `post-processors` block with your Atlas username, then `packer push example.json` and watch the build kick off in Atlas! When the build completes, the resulting artifact will be saved and stored in Atlas. +Update the `post-processors` block with your Atlas username, then `packer push example.json` and watch the build kick off in Atlas! When the build completes, the resulting artifact will be saved and stored in Atlas. \ No newline at end of file From 758618ecaa059d009716fd778795dd2484b86861 Mon Sep 17 00:00:00 2001 From: Francisco Lopez Date: Thu, 14 May 2015 15:05:44 -0700 Subject: [PATCH 067/956] Add ignore_exit_codes key for provisioner puppet-server --- provisioner/puppet-server/provisioner.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/provisioner/puppet-server/provisioner.go b/provisioner/puppet-server/provisioner.go index de21e0105..e451670d7 100644 --- a/provisioner/puppet-server/provisioner.go +++ b/provisioner/puppet-server/provisioner.go @@ -38,6 +38,10 @@ type Config struct { // The directory where files will be uploaded. Packer requires write // permissions in this directory. StagingDir string `mapstructure:"staging_dir"` + + // If true, packer will ignore all exit-codes from a puppet run + IgnoreExitCodes bool `mapstructure:"ignore_exit_codes"` + } type Provisioner struct { @@ -200,7 +204,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return err } - if cmd.ExitStatus != 0 && cmd.ExitStatus != 2 { + if cmd.ExitStatus != 0 && cmd.ExitStatus != 2 && !p.config.IgnoreExitCodes { return fmt.Errorf("Puppet exited with a non-zero exit status: %d", cmd.ExitStatus) } From 527a9e9bfc80798a4d7207d084cc8c84c350701f Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Fri, 15 May 2015 16:19:35 -0400 Subject: [PATCH 068/956] Update middleman-hashicorp --- .gitignore | 3 +++ website/Gemfile | 6 +++--- website/Gemfile.lock | 32 ++++++++++++++++---------------- website/config.rb | 9 +++++---- 4 files changed, 27 insertions(+), 23 deletions(-) diff --git a/.gitignore b/.gitignore index a0e0b48d8..2965e7967 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,6 @@ .DS_Store .vagrant test/.env + +website/.bundle +website/vendor diff --git a/website/Gemfile b/website/Gemfile index 8fc173cfd..2b35e2810 100644 --- a/website/Gemfile +++ b/website/Gemfile @@ -1,5 +1,5 @@ -source 'https://rubygems.org' +source "https://rubygems.org" -ruby "2.0.0" +ruby "2.2.2" -gem 'middleman-hashicorp', github: 'hashicorp/middleman-hashicorp' +gem "middleman-hashicorp", github: "hashicorp/middleman-hashicorp" diff --git a/website/Gemfile.lock b/website/Gemfile.lock index 6b8e19a04..7366999a6 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -1,6 +1,6 @@ GIT remote: git://github.com/hashicorp/middleman-hashicorp.git - revision: 783fe9517dd02badb85e5ddfeda4d8e35bbd05a8 + revision: 7796ba44d303ac8e1b566e855e2766e6d0f695fc specs: middleman-hashicorp (0.1.0) bootstrap-sass (~> 3.3) @@ -20,13 +20,13 @@ GIT GEM remote: https://rubygems.org/ specs: - activesupport (4.1.9) + activesupport (4.1.10) i18n (~> 0.6, >= 0.6.9) json (~> 1.7, >= 1.7.7) minitest (~> 5.1) thread_safe (~> 0.1) tzinfo (~> 1.1) - autoprefixer-rails (5.1.7.1) + autoprefixer-rails (5.1.11) execjs json bootstrap-sass (3.3.4.1) @@ -36,10 +36,10 @@ GEM celluloid (0.16.0) timers (~> 4.0.0) chunky_png (1.3.4) - coffee-script (2.3.0) + coffee-script (2.4.1) coffee-script-source execjs - coffee-script-source (1.9.1) + coffee-script-source (1.9.1.1) commonjs (0.2.7) compass (1.0.3) chunky_png (~> 1.2) @@ -59,7 +59,7 @@ GEM http_parser.rb (~> 0.6.0) erubis (2.7.0) eventmachine (1.0.7) - execjs (2.4.0) + execjs (2.5.2) ffi (1.9.8) haml (4.0.6) tilt @@ -71,26 +71,26 @@ GEM http_parser.rb (0.6.0) i18n (0.7.0) json (1.8.2) - kramdown (1.6.0) + kramdown (1.7.0) less (2.6.0) commonjs (~> 0.2.7) libv8 (3.16.14.7) - listen (2.9.0) - celluloid (>= 0.15.2) + listen (2.10.0) + celluloid (~> 0.16.0) rb-fsevent (>= 0.9.3) rb-inotify (>= 0.9) - middleman (3.3.10) + middleman (3.3.13) coffee-script (~> 2.2) compass (>= 1.0.0, < 2.0.0) compass-import-once (= 1.0.5) execjs (~> 2.0) haml (>= 4.0.5) kramdown (~> 1.2) - middleman-core (= 3.3.10) + middleman-core (= 3.3.13) middleman-sprockets (>= 3.1.2) sass (>= 3.4.0, < 4.0) uglifier (~> 2.5) - middleman-core (3.3.10) + middleman-core (3.3.13) activesupport (~> 4.1.0) bundler (~> 1.1) erubis @@ -117,7 +117,7 @@ GEM middleman-syntax (2.0.0) middleman-core (~> 3.2) rouge (~> 1.0) - minitest (5.5.1) + minitest (5.6.1) multi_json (1.11.0) padrino-helpers (0.12.5) i18n (~> 0.6, >= 0.6.7) @@ -125,7 +125,7 @@ GEM tilt (~> 1.4.1) padrino-support (0.12.5) activesupport (>= 3.1) - rack (1.6.0) + rack (1.6.1) rack-contrib (1.2.0) rack (>= 0.9.1) rack-livereload (0.3.15) @@ -137,7 +137,7 @@ GEM rb-fsevent (0.9.4) rb-inotify (0.9.5) ffi (>= 0.5.0) - redcarpet (3.2.2) + redcarpet (3.2.3) ref (1.0.5) rouge (1.8.0) sass (3.4.13) @@ -151,7 +151,7 @@ GEM sprockets-sass (1.3.1) sprockets (~> 2.0) tilt (~> 1.1) - therubyracer (0.12.1) + therubyracer (0.12.2) libv8 (~> 3.16.14.0) ref thin (1.6.3) diff --git a/website/config.rb b/website/config.rb index 3e15896af..9c21ff297 100644 --- a/website/config.rb +++ b/website/config.rb @@ -5,8 +5,9 @@ set :base_url, "https://www.packer.io/" activate :hashicorp do |h| - h.version = ENV["PACKER_VERSION"] - h.bintray_repo = 'mitchellh/packer' - h.bintray_user = 'mitchellh' - h.bintray_key = ENV['BINTRAY_API_KEY'] + h.version = ENV["PACKER_VERSION"] + h.bintray_enabled = ENV["BINTRAY_ENABLED"] + h.bintray_repo = "mitchellh/packer" + h.bintray_user = "mitchellh" + h.bintray_key = ENV["BINTRAY_API_KEY"] end From ff6573ce10f96df2d51404d19591dadbb3574cdd Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 15 May 2015 21:05:47 -0700 Subject: [PATCH 069/956] template/interpolate: basic + some funcs --- template/interpolate/funcs.go | 46 +++++++++++++++++++++ template/interpolate/funcs_test.go | 66 ++++++++++++++++++++++++++++++ template/interpolate/i.go | 38 +++++++++++++++++ template/interpolate/i_test.go | 32 +++++++++++++++ template/interpolate/parse.go | 42 +++++++++++++++++++ template/interpolate/parse_test.go | 39 ++++++++++++++++++ 6 files changed, 263 insertions(+) create mode 100644 template/interpolate/funcs.go create mode 100644 template/interpolate/funcs_test.go create mode 100644 template/interpolate/i.go create mode 100644 template/interpolate/i_test.go create mode 100644 template/interpolate/parse.go create mode 100644 template/interpolate/parse_test.go diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go new file mode 100644 index 000000000..f3c17d8b7 --- /dev/null +++ b/template/interpolate/funcs.go @@ -0,0 +1,46 @@ +package interpolate + +import ( + "errors" + "os" + "text/template" +) + +// Funcs are the interpolation funcs that are available within interpolations. +var FuncGens = map[string]FuncGenerator{ + "env": funcGenEnv, + "user": funcGenUser, +} + +// FuncGenerator is a function that given a context generates a template +// function for the template. +type FuncGenerator func(*Context) interface{} + +// Funcs returns the functions that can be used for interpolation given +// a context. +func Funcs(ctx *Context) template.FuncMap { + result := make(map[string]interface{}) + for k, v := range FuncGens { + result[k] = v(ctx) + } + + return template.FuncMap(result) +} + +func funcGenEnv(ctx *Context) interface{} { + return func(k string) (string, error) { + if ctx.DisableEnv { + // The error message doesn't have to be that detailed since + // semantic checks should catch this. + return "", errors.New("env vars are not allowed here") + } + + return os.Getenv(k), nil + } +} + +func funcGenUser(ctx *Context) interface{} { + return func() string { + return "" + } +} diff --git a/template/interpolate/funcs_test.go b/template/interpolate/funcs_test.go new file mode 100644 index 000000000..2bc70b0bf --- /dev/null +++ b/template/interpolate/funcs_test.go @@ -0,0 +1,66 @@ +package interpolate + +import ( + "os" + "testing" +) + +func TestFuncEnv(t *testing.T) { + cases := []struct { + Input string + Output string + }{ + { + `{{env "PACKER_TEST_ENV"}}`, + `foo`, + }, + + { + `{{env "PACKER_TEST_ENV_NOPE"}}`, + ``, + }, + } + + os.Setenv("PACKER_TEST_ENV", "foo") + defer os.Setenv("PACKER_TEST_ENV", "") + + ctx := &Context{} + for _, tc := range cases { + i := &I{Value: tc.Input} + result, err := i.Render(ctx) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", tc.Input, err) + } + + if result != tc.Output { + t.Fatalf("Input: %s\n\nGot: %s", tc.Input, result) + } + } +} + +func TestFuncEnv_disable(t *testing.T) { + cases := []struct { + Input string + Output string + Error bool + }{ + { + `{{env "PACKER_TEST_ENV"}}`, + "", + true, + }, + } + + ctx := &Context{DisableEnv: true} + for _, tc := range cases { + i := &I{Value: tc.Input} + result, err := i.Render(ctx) + if (err != nil) != tc.Error { + t.Fatalf("Input: %s\n\nerr: %s", tc.Input, err) + } + + if result != tc.Output { + t.Fatalf("Input: %s\n\nGot: %s", tc.Input, result) + } + } +} diff --git a/template/interpolate/i.go b/template/interpolate/i.go new file mode 100644 index 000000000..68095a03f --- /dev/null +++ b/template/interpolate/i.go @@ -0,0 +1,38 @@ +package interpolate + +import ( + "bytes" + "text/template" +) + +// Context is the context that an interpolation is done in. This defines +// things such as available variables. +type Context struct { + DisableEnv bool +} + +// I stands for "interpolation" and is the main interpolation struct +// in order to render values. +type I struct { + Value string +} + +// Render renders the interpolation with the given context. +func (i *I) Render(ctx *Context) (string, error) { + tpl, err := i.template(ctx) + if err != nil { + return "", err + } + + var result bytes.Buffer + data := map[string]interface{}{} + if err := tpl.Execute(&result, data); err != nil { + return "", err + } + + return result.String(), nil +} + +func (i *I) template(ctx *Context) (*template.Template, error) { + return template.New("root").Funcs(Funcs(ctx)).Parse(i.Value) +} diff --git a/template/interpolate/i_test.go b/template/interpolate/i_test.go new file mode 100644 index 000000000..a678afbc4 --- /dev/null +++ b/template/interpolate/i_test.go @@ -0,0 +1,32 @@ +package interpolate + +import ( + "testing" +) + +func TestIRender(t *testing.T) { + cases := map[string]struct { + Ctx *Context + Value string + Result string + }{ + "basic": { + nil, + "foo", + "foo", + }, + } + + for k, tc := range cases { + i := &I{Value: tc.Value} + result, err := i.Render(tc.Ctx) + if err != nil { + t.Fatalf("%s\n\ninput: %s\n\nerr: %s", k, tc.Value, err) + } + if result != tc.Result { + t.Fatalf( + "%s\n\ninput: %s\n\nexpected: %s\n\ngot: %s", + k, tc.Value, tc.Result, result) + } + } +} diff --git a/template/interpolate/parse.go b/template/interpolate/parse.go new file mode 100644 index 000000000..b18079510 --- /dev/null +++ b/template/interpolate/parse.go @@ -0,0 +1,42 @@ +package interpolate + +import ( + "fmt" + "text/template" + "text/template/parse" +) + +// functionsCalled returns a map (to be used as a set) of the functions +// that are called from the given text template. +func functionsCalled(t *template.Template) map[string]struct{} { + result := make(map[string]struct{}) + functionsCalledWalk(t.Tree.Root, result) + return result +} + +func functionsCalledWalk(raw parse.Node, r map[string]struct{}) { + switch node := raw.(type) { + case *parse.ActionNode: + functionsCalledWalk(node.Pipe, r) + case *parse.CommandNode: + if in, ok := node.Args[0].(*parse.IdentifierNode); ok { + r[in.Ident] = struct{}{} + } + + for _, n := range node.Args[1:] { + functionsCalledWalk(n, r) + } + case *parse.ListNode: + for _, n := range node.Nodes { + functionsCalledWalk(n, r) + } + case *parse.PipeNode: + for _, n := range node.Cmds { + functionsCalledWalk(n, r) + } + case *parse.StringNode, *parse.TextNode: + // Ignore + default: + panic(fmt.Sprintf("unknown type: %T", node)) + } +} diff --git a/template/interpolate/parse_test.go b/template/interpolate/parse_test.go new file mode 100644 index 000000000..3398ddbf1 --- /dev/null +++ b/template/interpolate/parse_test.go @@ -0,0 +1,39 @@ +package interpolate + +import ( + "reflect" + "testing" + "text/template" +) + +func TestFunctionsCalled(t *testing.T) { + cases := []struct { + Input string + Result map[string]struct{} + }{ + { + "foo", + map[string]struct{}{}, + }, + + { + "foo {{user `bar`}}", + map[string]struct{}{ + "user": struct{}{}, + }, + }, + } + + funcs := Funcs(&Context{}) + for _, tc := range cases { + tpl, err := template.New("root").Funcs(funcs).Parse(tc.Input) + if err != nil { + t.Fatalf("err parsing: %v\n\n%s", tc.Input, err) + } + + actual := functionsCalled(tpl) + if !reflect.DeepEqual(actual, tc.Result) { + t.Fatalf("bad: %v\n\ngot: %#v", tc.Input, actual) + } + } +} From 125369d1026718e1ac6bda3adce1dcbcf33e965a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 15 May 2015 21:08:46 -0700 Subject: [PATCH 070/956] template/interpolate: can specify template data --- template/interpolate/i.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/template/interpolate/i.go b/template/interpolate/i.go index 68095a03f..68c60d95c 100644 --- a/template/interpolate/i.go +++ b/template/interpolate/i.go @@ -8,6 +8,10 @@ import ( // Context is the context that an interpolation is done in. This defines // things such as available variables. type Context struct { + // Data is the data for the template that is available + Data interface{} + + // DisableEnv disables the env function DisableEnv bool } @@ -25,7 +29,10 @@ func (i *I) Render(ctx *Context) (string, error) { } var result bytes.Buffer - data := map[string]interface{}{} + var data interface{} + if ctx != nil { + data = ctx.Data + } if err := tpl.Execute(&result, data); err != nil { return "", err } From 5d205ec1fcabd860befe5ed11c04a6750d0323be Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 15 May 2015 21:10:12 -0700 Subject: [PATCH 071/956] template/interpolate: wd --- template/interpolate/funcs.go | 7 +++++++ template/interpolate/funcs_test.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go index f3c17d8b7..602702caf 100644 --- a/template/interpolate/funcs.go +++ b/template/interpolate/funcs.go @@ -9,6 +9,7 @@ import ( // Funcs are the interpolation funcs that are available within interpolations. var FuncGens = map[string]FuncGenerator{ "env": funcGenEnv, + "pwd": funcGenPwd, "user": funcGenUser, } @@ -39,6 +40,12 @@ func funcGenEnv(ctx *Context) interface{} { } } +func funcGenPwd(ctx *Context) interface{} { + return func() (string, error) { + return os.Getwd() + } +} + func funcGenUser(ctx *Context) interface{} { return func() string { return "" diff --git a/template/interpolate/funcs_test.go b/template/interpolate/funcs_test.go index 2bc70b0bf..7bd5c4647 100644 --- a/template/interpolate/funcs_test.go +++ b/template/interpolate/funcs_test.go @@ -64,3 +64,33 @@ func TestFuncEnv_disable(t *testing.T) { } } } + +func TestFuncPwd(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + + cases := []struct { + Input string + Output string + }{ + { + `{{pwd}}`, + wd, + }, + } + + ctx := &Context{} + for _, tc := range cases { + i := &I{Value: tc.Input} + result, err := i.Render(ctx) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", tc.Input, err) + } + + if result != tc.Output { + t.Fatalf("Input: %s\n\nGot: %s", tc.Input, result) + } + } +} From b84ec8da4b66d0b6513e6517043b8968e7e9e589 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 15 May 2015 21:12:54 -0700 Subject: [PATCH 072/956] template/interpolate: isotime --- template/interpolate/funcs.go | 23 ++++++++++++++++++++--- template/interpolate/funcs_test.go | 20 ++++++++++++++++++++ 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go index 602702caf..51fcd0911 100644 --- a/template/interpolate/funcs.go +++ b/template/interpolate/funcs.go @@ -2,15 +2,18 @@ package interpolate import ( "errors" + "fmt" "os" "text/template" + "time" ) // Funcs are the interpolation funcs that are available within interpolations. var FuncGens = map[string]FuncGenerator{ - "env": funcGenEnv, - "pwd": funcGenPwd, - "user": funcGenUser, + "env": funcGenEnv, + "isotime": funcGenIsotime, + "pwd": funcGenPwd, + "user": funcGenUser, } // FuncGenerator is a function that given a context generates a template @@ -40,6 +43,20 @@ func funcGenEnv(ctx *Context) interface{} { } } +func funcGenIsotime(ctx *Context) interface{} { + return func(format ...string) (string, error) { + if len(format) == 0 { + return time.Now().UTC().Format(time.RFC3339), nil + } + + if len(format) > 1 { + return "", fmt.Errorf("too many values, 1 needed: %v", format) + } + + return time.Now().UTC().Format(format[0]), nil + } +} + func funcGenPwd(ctx *Context) interface{} { return func() (string, error) { return os.Getwd() diff --git a/template/interpolate/funcs_test.go b/template/interpolate/funcs_test.go index 7bd5c4647..ef0753e4f 100644 --- a/template/interpolate/funcs_test.go +++ b/template/interpolate/funcs_test.go @@ -3,6 +3,7 @@ package interpolate import ( "os" "testing" + "time" ) func TestFuncEnv(t *testing.T) { @@ -65,6 +66,25 @@ func TestFuncEnv_disable(t *testing.T) { } } +func TestFuncIsotime(t *testing.T) { + ctx := &Context{} + i := &I{Value: "{{isotime}}"} + result, err := i.Render(ctx) + if err != nil { + t.Fatalf("err: %s", err) + } + + val, err := time.Parse(time.RFC3339, result) + if err != nil { + t.Fatalf("err: %s", err) + } + + currentTime := time.Now().UTC() + if currentTime.Sub(val) > 2*time.Second { + t.Fatalf("val: %d (current: %d)", val, currentTime) + } +} + func TestFuncPwd(t *testing.T) { wd, err := os.Getwd() if err != nil { From 7659a91445b1e63ea614dcb715d54dac008c307d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 15 May 2015 21:14:41 -0700 Subject: [PATCH 073/956] template/interpolate: timestamp --- template/interpolate/funcs.go | 25 +++++++++++++++++++++---- template/interpolate/funcs_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 4 deletions(-) diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go index 51fcd0911..ead8eb95e 100644 --- a/template/interpolate/funcs.go +++ b/template/interpolate/funcs.go @@ -4,16 +4,27 @@ import ( "errors" "fmt" "os" + "strconv" "text/template" "time" ) +// InitTime is the UTC time when this package was initialized. It is +// used as the timestamp for all configuration templates so that they +// match for a single build. +var InitTime time.Time + +func init() { + InitTime = time.Now().UTC() +} + // Funcs are the interpolation funcs that are available within interpolations. var FuncGens = map[string]FuncGenerator{ - "env": funcGenEnv, - "isotime": funcGenIsotime, - "pwd": funcGenPwd, - "user": funcGenUser, + "env": funcGenEnv, + "isotime": funcGenIsotime, + "pwd": funcGenPwd, + "timestamp": funcGenTimestamp, + "user": funcGenUser, } // FuncGenerator is a function that given a context generates a template @@ -63,6 +74,12 @@ func funcGenPwd(ctx *Context) interface{} { } } +func funcGenTimestamp(ctx *Context) interface{} { + return func() string { + return strconv.FormatInt(InitTime.Unix(), 10) + } +} + func funcGenUser(ctx *Context) interface{} { return func() string { return "" diff --git a/template/interpolate/funcs_test.go b/template/interpolate/funcs_test.go index ef0753e4f..37dee1ad6 100644 --- a/template/interpolate/funcs_test.go +++ b/template/interpolate/funcs_test.go @@ -2,6 +2,7 @@ package interpolate import ( "os" + "strconv" "testing" "time" ) @@ -114,3 +115,30 @@ func TestFuncPwd(t *testing.T) { } } } + +func TestFuncTimestamp(t *testing.T) { + expected := strconv.FormatInt(InitTime.Unix(), 10) + + cases := []struct { + Input string + Output string + }{ + { + `{{timestamp}}`, + expected, + }, + } + + ctx := &Context{} + for _, tc := range cases { + i := &I{Value: tc.Input} + result, err := i.Render(ctx) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", tc.Input, err) + } + + if result != tc.Output { + t.Fatalf("Input: %s\n\nGot: %s", tc.Input, result) + } + } +} From a4b5e08fe48dc366dbf493326caf17476d8cab07 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 15 May 2015 21:16:52 -0700 Subject: [PATCH 074/956] template/interpolate: upper/lower --- template/interpolate/funcs.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go index ead8eb95e..8b081966c 100644 --- a/template/interpolate/funcs.go +++ b/template/interpolate/funcs.go @@ -5,8 +5,11 @@ import ( "fmt" "os" "strconv" + "strings" "text/template" "time" + + "github.com/mitchellh/packer/common/uuid" ) // InitTime is the UTC time when this package was initialized. It is @@ -24,7 +27,11 @@ var FuncGens = map[string]FuncGenerator{ "isotime": funcGenIsotime, "pwd": funcGenPwd, "timestamp": funcGenTimestamp, + "uuid": funcGenUuid, "user": funcGenUser, + + "upper": funcGenPrimitive(strings.ToUpper), + "lower": funcGenPrimitive(strings.ToLower), } // FuncGenerator is a function that given a context generates a template @@ -68,6 +75,12 @@ func funcGenIsotime(ctx *Context) interface{} { } } +func funcGenPrimitive(value interface{}) FuncGenerator { + return func(ctx *Context) interface{} { + return value + } +} + func funcGenPwd(ctx *Context) interface{} { return func() (string, error) { return os.Getwd() @@ -85,3 +98,9 @@ func funcGenUser(ctx *Context) interface{} { return "" } } + +func funcGenUuid(ctx *Context) interface{} { + return func() string { + return uuid.TimeOrderedUUID() + } +} From 1e745d950885a1aafa0a41b18c578e76722fe12d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 15 May 2015 21:18:27 -0700 Subject: [PATCH 075/956] template/interpolate: user variables --- template/interpolate/funcs.go | 8 +++++-- template/interpolate/funcs_test.go | 34 ++++++++++++++++++++++++++++++ template/interpolate/i.go | 4 ++++ 3 files changed, 44 insertions(+), 2 deletions(-) diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go index 8b081966c..68592e046 100644 --- a/template/interpolate/funcs.go +++ b/template/interpolate/funcs.go @@ -94,8 +94,12 @@ func funcGenTimestamp(ctx *Context) interface{} { } func funcGenUser(ctx *Context) interface{} { - return func() string { - return "" + return func(k string) string { + if ctx == nil || ctx.UserVariables == nil { + return "" + } + + return ctx.UserVariables[k] } } diff --git a/template/interpolate/funcs_test.go b/template/interpolate/funcs_test.go index 37dee1ad6..7afa53447 100644 --- a/template/interpolate/funcs_test.go +++ b/template/interpolate/funcs_test.go @@ -142,3 +142,37 @@ func TestFuncTimestamp(t *testing.T) { } } } + +func TestFuncUser(t *testing.T) { + cases := []struct { + Input string + Output string + }{ + { + `{{user "foo"}}`, + `foo`, + }, + + { + `{{user "what"}}`, + ``, + }, + } + + ctx := &Context{ + UserVariables: map[string]string{ + "foo": "foo", + }, + } + for _, tc := range cases { + i := &I{Value: tc.Input} + result, err := i.Render(ctx) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", tc.Input, err) + } + + if result != tc.Output { + t.Fatalf("Input: %s\n\nGot: %s", tc.Input, result) + } + } +} diff --git a/template/interpolate/i.go b/template/interpolate/i.go index 68c60d95c..1033ad86a 100644 --- a/template/interpolate/i.go +++ b/template/interpolate/i.go @@ -11,6 +11,10 @@ type Context struct { // Data is the data for the template that is available Data interface{} + // UserVariables is the mapping of user variables that the + // "user" function reads from. + UserVariables map[string]string + // DisableEnv disables the env function DisableEnv bool } From 3984f5e6f6e083a25562be3db7b77dbc63c07b5a Mon Sep 17 00:00:00 2001 From: Vasiliy Tolstov Date: Sun, 17 May 2015 20:48:58 +0300 Subject: [PATCH 076/956] add discard option to qemu builder Enabling discards for disk can greatly minimize disk size then user inside vm use fstrim command or trim/discard unneded blocks. Signed-off-by: Vasiliy Tolstov --- builder/qemu/builder.go | 15 +++++++++++++++ builder/qemu/step_run.go | 2 +- website/source/docs/builders/qemu.html.markdown | 3 +++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index e3bc6e4a1..9fce4ce15 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -64,6 +64,11 @@ var diskCache = map[string]bool{ "directsync": true, } +var diskDiscard = map[string]bool{ + "unmap": true, + "ignore": true, +} + type Builder struct { config config runner multistep.Runner @@ -77,6 +82,7 @@ type config struct { DiskInterface string `mapstructure:"disk_interface"` DiskSize uint `mapstructure:"disk_size"` DiskCache string `mapstructure:"disk_cache"` + DiskDiscard string `mapstructure:"disk_discard"` FloppyFiles []string `mapstructure:"floppy_files"` Format string `mapstructure:"format"` Headless bool `mapstructure:"headless"` @@ -141,6 +147,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.DiskCache = "writeback" } + if b.config.DiskDiscard == "" { + b.config.DiskDiscard = "ignore" + } + if b.config.Accelerator == "" { b.config.Accelerator = "kvm" } @@ -300,6 +310,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { errs, errors.New("unrecognized disk cache type")) } + if _, ok := diskDiscard[b.config.DiskDiscard]; !ok { + errs = packer.MultiErrorAppend( + errs, errors.New("unrecognized disk cache type")) + } + if b.config.HTTPPortMin > b.config.HTTPPortMax { errs = packer.MultiErrorAppend( errs, errors.New("http_port_min must be less than http_port_max")) diff --git a/builder/qemu/step_run.go b/builder/qemu/step_run.go index 3f900d651..39dbc521d 100644 --- a/builder/qemu/step_run.go +++ b/builder/qemu/step_run.go @@ -81,7 +81,7 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error defaultArgs["-machine"] = fmt.Sprintf("type=%s", config.MachineType) defaultArgs["-netdev"] = fmt.Sprintf("user,id=user.0,hostfwd=tcp::%v-:22", sshHostPort) defaultArgs["-device"] = fmt.Sprintf("%s,netdev=user.0", config.NetDevice) - defaultArgs["-drive"] = fmt.Sprintf("file=%s,if=%s,cache=%s", imgPath, config.DiskInterface, config.DiskCache) + defaultArgs["-drive"] = fmt.Sprintf("file=%s,if=%s,cache=%s,discard=%s", imgPath, config.DiskInterface, config.DiskCache, config.DiskDiscard) if !config.DiskImage { defaultArgs["-cdrom"] = isoPath } diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index 5aa0cf152..0e22ccc3b 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -115,6 +115,9 @@ each category, the available options are alphabetized and described. values include any of "writethrough", "writeback", "none", "unsafe" or "directsync". +* `disk_discard` (string) - The discard mode to use for disk. Allowed values + include any of "unmap" or "ignore". + * `disk_image` (boolean) - Packer defaults to building from an ISO file, this parameter controls whether the ISO URL supplied is actually a bootable QEMU image. When this value is set to true, the machine will clone the From 76c8cfd498e35d9569c6e69403228da9248ed7ff Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Mon, 18 May 2015 15:13:01 -0700 Subject: [PATCH 077/956] common: don't scrub "" If the access_key or secret_key were loaded from somewhere other than the packer file then ScrubConfig can get called to scrub "" and "". This results in very long output: <Fi... Don't do that. --- common/config.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/common/config.go b/common/config.go index 72b3bdd27..d821faf35 100644 --- a/common/config.go +++ b/common/config.go @@ -18,6 +18,9 @@ import ( func ScrubConfig(target interface{}, values ...string) string { conf := fmt.Sprintf("Config: %+v", target) for _, value := range values { + if value == "" { + continue + } conf = strings.Replace(conf, value, "", -1) } return conf From 2fe785ed350f65cef85edc68035e169e8ddad52d Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Thu, 16 Apr 2015 15:08:28 -0700 Subject: [PATCH 078/956] common: remove dead code The referenced bug was fixed in Go 1.2, and Packer requires Go 1.2+. --- common/config.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/common/config.go b/common/config.go index d821faf35..db001056b 100644 --- a/common/config.go +++ b/common/config.go @@ -161,14 +161,6 @@ func DownloadableURL(original string) (string, error) { // Make sure it is lowercased url.Scheme = strings.ToLower(url.Scheme) - // This is to work around issue #5927. This can safely be removed once - // we distribute with a version of Go that fixes that bug. - // - // See: https://code.google.com/p/go/issues/detail?id=5927 - if url.Path != "" && url.Path[0] != '/' { - url.Path = "/" + url.Path - } - // Verify that the scheme is something we support in our common downloader. supported := []string{"file", "http", "https"} found := false From 95890003b751e1a976f7cb2d87284dc3a0b18515 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 19 May 2015 15:25:56 -0600 Subject: [PATCH 079/956] template: builder parsing --- template/parse.go | 123 ++++++++++++++++++ template/parse_test.go | 55 ++++++++ template/template.go | 77 +++++++++++ template/template_test.go | 12 ++ template/test-fixtures/parse-basic.json | 3 + .../test-fixtures/parse-builder-no-type.json | 3 + .../test-fixtures/parse-builder-repeat.json | 6 + 7 files changed, 279 insertions(+) create mode 100644 template/parse.go create mode 100644 template/parse_test.go create mode 100644 template/template.go create mode 100644 template/template_test.go create mode 100644 template/test-fixtures/parse-basic.json create mode 100644 template/test-fixtures/parse-builder-no-type.json create mode 100644 template/test-fixtures/parse-builder-repeat.json diff --git a/template/parse.go b/template/parse.go new file mode 100644 index 000000000..3cb9e288e --- /dev/null +++ b/template/parse.go @@ -0,0 +1,123 @@ +package template + +import ( + "encoding/json" + "fmt" + "io" + "sort" + + "github.com/hashicorp/go-multierror" + "github.com/mitchellh/mapstructure" +) + +// rawTemplate is the direct JSON document format of the template file. +// This is what is decoded directly from the file, and then it is turned +// into a Template object thereafter. +type rawTemplate struct { + MinVersion string `mapstructure:"min_packer_version"` + Description string + + Builders []map[string]interface{} + Push map[string]interface{} + PostProcesors []interface{} `mapstructure:"post-processors"` + Provisioners []map[string]interface{} + Variables map[string]interface{} +} + +// Template returns the actual Template object built from this raw +// structure. +func (r *rawTemplate) Template() (*Template, error) { + var result Template + var errs error + + // Let's start by gathering all the builders + result.Builders = make(map[string]*Builder) + for i, rawB := range r.Builders { + var b Builder + if err := mapstructure.WeakDecode(rawB, &b); err != nil { + errs = multierror.Append(errs, fmt.Errorf( + "builder %d: %s", i+1, err)) + continue + } + + // Set the raw configuration and delete any special keys + b.Config = rawB + delete(b.Config, "name") + delete(b.Config, "type") + if len(b.Config) == 0 { + b.Config = nil + } + + // If there is no type set, it is an error + if b.Type == "" { + errs = multierror.Append(errs, fmt.Errorf( + "builder %d: missing 'type'", i+1)) + continue + } + + // The name defaults to the type if it isn't set + if b.Name == "" { + b.Name = b.Type + } + + // If this builder already exists, it is an error + if _, ok := result.Builders[b.Name]; ok { + errs = multierror.Append(errs, fmt.Errorf( + "builder %d: builder with name '%s' already exists", + i+1, b.Name)) + continue + } + + // Append the builders + result.Builders[b.Name] = &b + } + + // If we have errors, return those with a nil result + if errs != nil { + return nil, errs + } + + return &result, nil +} + +// Parse takes the given io.Reader and parses a Template object out of it. +func Parse(r io.Reader) (*Template, error) { + // First, decode the object into an interface{}. We do this instead of + // the rawTemplate directly because we'd rather use mapstructure to + // decode since it has richer errors. + var raw interface{} + if err := json.NewDecoder(r).Decode(&raw); err != nil { + return nil, err + } + + // Create our decoder + var md mapstructure.Metadata + var rawTpl rawTemplate + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Metadata: &md, + Result: &rawTpl, + }) + if err != nil { + return nil, err + } + + // Do the actual decode into our structure + if err := decoder.Decode(raw); err != nil { + return nil, err + } + + // Build an error if there are unused root level keys + if len(md.Unused) > 0 { + sort.Strings(md.Unused) + for _, unused := range md.Unused { + err = multierror.Append(err, fmt.Errorf( + "Unknown root level key in template: '%s'", unused)) + } + + // Return early for these errors + return nil, err + } + + // Return the template parsed from the raw structure + return rawTpl.Template() +} diff --git a/template/parse_test.go b/template/parse_test.go new file mode 100644 index 000000000..b7789298c --- /dev/null +++ b/template/parse_test.go @@ -0,0 +1,55 @@ +package template + +import ( + "os" + "reflect" + "testing" +) + +func TestParse(t *testing.T) { + cases := []struct { + File string + Result *Template + Err bool + }{ + { + "parse-basic.json", + &Template{ + Builders: map[string]*Builder{ + "something": &Builder{ + Name: "something", + Type: "something", + }, + }, + }, + false, + }, + { + "parse-builder-no-type.json", + nil, + true, + }, + { + "parse-builder-repeat.json", + nil, + true, + }, + } + + for _, tc := range cases { + f, err := os.Open(fixtureDir(tc.File)) + if err != nil { + t.Fatalf("err: %s", err) + } + + tpl, err := Parse(f) + f.Close() + if (err != nil) != tc.Err { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(tpl, tc.Result) { + t.Fatalf("bad: %#v", tpl) + } + } +} diff --git a/template/template.go b/template/template.go new file mode 100644 index 000000000..477a6d824 --- /dev/null +++ b/template/template.go @@ -0,0 +1,77 @@ +package template + +import ( + "fmt" + "time" +) + +// Template represents the parsed template that is used to configure +// Packer builds. +type Template struct { + Description string + MinVersion string + + Variables map[string]*Variable + Builders map[string]*Builder + Provisioners []*Provisioner + PostProcessors [][]*PostProcessor + Push *Push +} + +// Builder represents a builder configured in the template +type Builder struct { + Name string + Type string + Config map[string]interface{} +} + +// PostProcessor represents a post-processor within the template. +type PostProcessor struct { + OnlyExcept + + Type string + KeepInputArtifact bool + Config map[string]interface{} +} + +// Provisioner represents a provisioner within the template. +type Provisioner struct { + OnlyExcept + + Type string + Config map[string]interface{} + Override map[string]interface{} + PauseBefore time.Duration +} + +// Push represents the configuration for pushing the template to Atlas. +type Push struct { + Name string + Address string + BaseDir string `mapstructure:"base_dir"` + Include []string + Exclude []string + Token string + VCS bool +} + +// Variable represents a variable within the template +type Variable struct { + Default string + Required bool +} + +// OnlyExcept is a struct that is meant to be embedded that contains the +// logic required for "only" and "except" meta-parameters. +type OnlyExcept struct { + Only []string + Except []string +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (b *Builder) GoString() string { + return fmt.Sprintf("*%#v", *b) +} diff --git a/template/template_test.go b/template/template_test.go new file mode 100644 index 000000000..2847bf9a2 --- /dev/null +++ b/template/template_test.go @@ -0,0 +1,12 @@ +package template + +import ( + "path/filepath" +) + +const FixturesDir = "./test-fixtures" + +// fixtureDir returns the path to a test fixtures directory +func fixtureDir(n string) string { + return filepath.Join(FixturesDir, n) +} diff --git a/template/test-fixtures/parse-basic.json b/template/test-fixtures/parse-basic.json new file mode 100644 index 000000000..43b7a7898 --- /dev/null +++ b/template/test-fixtures/parse-basic.json @@ -0,0 +1,3 @@ +{ + "builders": [{"type": "something"}] +} diff --git a/template/test-fixtures/parse-builder-no-type.json b/template/test-fixtures/parse-builder-no-type.json new file mode 100644 index 000000000..1729d0827 --- /dev/null +++ b/template/test-fixtures/parse-builder-no-type.json @@ -0,0 +1,3 @@ +{ + "builders": [{"foo": "something"}] +} diff --git a/template/test-fixtures/parse-builder-repeat.json b/template/test-fixtures/parse-builder-repeat.json new file mode 100644 index 000000000..258b75883 --- /dev/null +++ b/template/test-fixtures/parse-builder-repeat.json @@ -0,0 +1,6 @@ +{ + "builders": [ + {"type": "something"}, + {"type": "something"} + ] +} From 4583ed610809d7ea0f1373b93af3ed9dc1b676bc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 21 May 2015 13:34:44 -0600 Subject: [PATCH 080/956] template: parse provisioners --- template/parse.go | 54 +++++++++++- template/parse_test.go | 85 ++++++++++++++++++- template/template.go | 8 +- .../parse-provisioner-basic.json | 5 ++ .../parse-provisioner-except.json | 8 ++ .../parse-provisioner-no-type.json | 5 ++ .../test-fixtures/parse-provisioner-only.json | 8 ++ .../parse-provisioner-override.json | 10 +++ .../parse-provisioner-pause-before.json | 8 ++ 9 files changed, 187 insertions(+), 4 deletions(-) create mode 100644 template/test-fixtures/parse-provisioner-basic.json create mode 100644 template/test-fixtures/parse-provisioner-except.json create mode 100644 template/test-fixtures/parse-provisioner-no-type.json create mode 100644 template/test-fixtures/parse-provisioner-only.json create mode 100644 template/test-fixtures/parse-provisioner-override.json create mode 100644 template/test-fixtures/parse-provisioner-pause-before.json diff --git a/template/parse.go b/template/parse.go index 3cb9e288e..a96117111 100644 --- a/template/parse.go +++ b/template/parse.go @@ -31,7 +31,9 @@ func (r *rawTemplate) Template() (*Template, error) { var errs error // Let's start by gathering all the builders - result.Builders = make(map[string]*Builder) + if len(r.Builders) > 0 { + result.Builders = make(map[string]*Builder, len(r.Builders)) + } for i, rawB := range r.Builders { var b Builder if err := mapstructure.WeakDecode(rawB, &b); err != nil { @@ -72,6 +74,39 @@ func (r *rawTemplate) Template() (*Template, error) { result.Builders[b.Name] = &b } + // Gather all the provisioners + if len(r.Provisioners) > 0 { + result.Provisioners = make([]*Provisioner, 0, len(r.Provisioners)) + } + for i, v := range r.Provisioners { + var p Provisioner + if err := r.decoder(&p, nil).Decode(v); err != nil { + errs = multierror.Append(errs, fmt.Errorf( + "provisioner %d: %s", i+1, err)) + continue + } + + // Type is required before any richer validation + if p.Type == "" { + errs = multierror.Append(errs, fmt.Errorf( + "provisioner %d: missing 'type'", i+1)) + continue + } + + // Copy the configuration + delete(v, "except") + delete(v, "only") + delete(v, "override") + delete(v, "pause_before") + delete(v, "type") + if len(v) > 0 { + p.Config = v + } + + // TODO: stuff + result.Provisioners = append(result.Provisioners, &p) + } + // If we have errors, return those with a nil result if errs != nil { return nil, errs @@ -80,6 +115,23 @@ func (r *rawTemplate) Template() (*Template, error) { return &result, nil } +func (r *rawTemplate) decoder( + result interface{}, + md *mapstructure.Metadata) *mapstructure.Decoder { + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeDurationHookFunc(), + Metadata: md, + Result: result, + }) + if err != nil { + // This really shouldn't happen since we have firm control over + // all the arguments and they're all unit tested. So we use a + // panic here to note this would definitely be a bug. + panic(err) + } + return d +} + // Parse takes the given io.Reader and parses a Template object out of it. func Parse(r io.Reader) (*Template, error) { // First, decode the object into an interface{}. We do this instead of diff --git a/template/parse_test.go b/template/parse_test.go index b7789298c..2c5b8f735 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -4,6 +4,7 @@ import ( "os" "reflect" "testing" + "time" ) func TestParse(t *testing.T) { @@ -12,6 +13,9 @@ func TestParse(t *testing.T) { Result *Template Err bool }{ + /* + * Builders + */ { "parse-basic.json", &Template{ @@ -34,6 +38,85 @@ func TestParse(t *testing.T) { nil, true, }, + + /* + * Provisioners + */ + { + "parse-provisioner-basic.json", + &Template{ + Provisioners: []*Provisioner{ + &Provisioner{ + Type: "something", + }, + }, + }, + false, + }, + + { + "parse-provisioner-pause-before.json", + &Template{ + Provisioners: []*Provisioner{ + &Provisioner{ + Type: "something", + PauseBefore: 1 * time.Second, + }, + }, + }, + false, + }, + + { + "parse-provisioner-only.json", + &Template{ + Provisioners: []*Provisioner{ + &Provisioner{ + Type: "something", + OnlyExcept: OnlyExcept{ + Only: []string{"foo"}, + }, + }, + }, + }, + false, + }, + + { + "parse-provisioner-except.json", + &Template{ + Provisioners: []*Provisioner{ + &Provisioner{ + Type: "something", + OnlyExcept: OnlyExcept{ + Except: []string{"foo"}, + }, + }, + }, + }, + false, + }, + + { + "parse-provisioner-override.json", + &Template{ + Provisioners: []*Provisioner{ + &Provisioner{ + Type: "something", + Override: map[string]interface{}{ + "foo": map[string]interface{}{}, + }, + }, + }, + }, + false, + }, + + { + "parse-provisioner-no-type.json", + nil, + true, + }, } for _, tc := range cases { @@ -49,7 +132,7 @@ func TestParse(t *testing.T) { } if !reflect.DeepEqual(tpl, tc.Result) { - t.Fatalf("bad: %#v", tpl) + t.Fatalf("bad: %s\n\n%#v\n\n%#v", tc.File, tpl, tc.Result) } } } diff --git a/template/template.go b/template/template.go index 477a6d824..daee508fc 100644 --- a/template/template.go +++ b/template/template.go @@ -36,12 +36,12 @@ type PostProcessor struct { // Provisioner represents a provisioner within the template. type Provisioner struct { - OnlyExcept + OnlyExcept `mapstructure:",squash"` Type string Config map[string]interface{} Override map[string]interface{} - PauseBefore time.Duration + PauseBefore time.Duration `mapstructure:"pause_before"` } // Push represents the configuration for pushing the template to Atlas. @@ -75,3 +75,7 @@ type OnlyExcept struct { func (b *Builder) GoString() string { return fmt.Sprintf("*%#v", *b) } + +func (p *Provisioner) GoString() string { + return fmt.Sprintf("*%#v", *p) +} diff --git a/template/test-fixtures/parse-provisioner-basic.json b/template/test-fixtures/parse-provisioner-basic.json new file mode 100644 index 000000000..bf0d8d910 --- /dev/null +++ b/template/test-fixtures/parse-provisioner-basic.json @@ -0,0 +1,5 @@ +{ + "provisioners": [ + {"type": "something"} + ] +} diff --git a/template/test-fixtures/parse-provisioner-except.json b/template/test-fixtures/parse-provisioner-except.json new file mode 100644 index 000000000..8c7f0c8f5 --- /dev/null +++ b/template/test-fixtures/parse-provisioner-except.json @@ -0,0 +1,8 @@ +{ + "provisioners": [ + { + "type": "something", + "except": ["foo"] + } + ] +} diff --git a/template/test-fixtures/parse-provisioner-no-type.json b/template/test-fixtures/parse-provisioner-no-type.json new file mode 100644 index 000000000..40bc214d2 --- /dev/null +++ b/template/test-fixtures/parse-provisioner-no-type.json @@ -0,0 +1,5 @@ +{ + "provisioners": [ + {"foo": "something"} + ] +} diff --git a/template/test-fixtures/parse-provisioner-only.json b/template/test-fixtures/parse-provisioner-only.json new file mode 100644 index 000000000..3bbb534b2 --- /dev/null +++ b/template/test-fixtures/parse-provisioner-only.json @@ -0,0 +1,8 @@ +{ + "provisioners": [ + { + "type": "something", + "only": ["foo"] + } + ] +} diff --git a/template/test-fixtures/parse-provisioner-override.json b/template/test-fixtures/parse-provisioner-override.json new file mode 100644 index 000000000..5b55099ba --- /dev/null +++ b/template/test-fixtures/parse-provisioner-override.json @@ -0,0 +1,10 @@ +{ + "provisioners": [ + { + "type": "something", + "override": { + "foo": {} + } + } + ] +} diff --git a/template/test-fixtures/parse-provisioner-pause-before.json b/template/test-fixtures/parse-provisioner-pause-before.json new file mode 100644 index 000000000..70640847b --- /dev/null +++ b/template/test-fixtures/parse-provisioner-pause-before.json @@ -0,0 +1,8 @@ +{ + "provisioners": [ + { + "type": "something", + "pause_before": "1s" + } + ] +} From fbda5b119a68d091431ed06098b1fc83caaebd16 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 21 May 2015 13:40:33 -0600 Subject: [PATCH 081/956] template: variable parsing --- template/parse.go | 20 ++++++++++++++++ template/parse_test.go | 24 +++++++++++++++++++ .../test-fixtures/parse-variable-default.json | 5 ++++ .../parse-variable-required.json | 5 ++++ 4 files changed, 54 insertions(+) create mode 100644 template/test-fixtures/parse-variable-default.json create mode 100644 template/test-fixtures/parse-variable-required.json diff --git a/template/parse.go b/template/parse.go index a96117111..47d97effa 100644 --- a/template/parse.go +++ b/template/parse.go @@ -30,6 +30,26 @@ func (r *rawTemplate) Template() (*Template, error) { var result Template var errs error + // Gather the variables + if len(r.Variables) > 0 { + result.Variables = make(map[string]*Variable, len(r.Variables)) + } + for k, rawV := range r.Variables { + var v Variable + + // Variable is required if the value is exactly nil + v.Required = rawV == nil + + // Weak decode the default if we have one + if err := r.decoder(&v.Default, nil).Decode(rawV); err != nil { + errs = multierror.Append(errs, fmt.Errorf( + "variable %s: %s", k, err)) + continue + } + + result.Variables[k] = &v + } + // Let's start by gathering all the builders if len(r.Builders) > 0 { result.Builders = make(map[string]*Builder, len(r.Builders)) diff --git a/template/parse_test.go b/template/parse_test.go index 2c5b8f735..fcf7fcf29 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -117,6 +117,30 @@ func TestParse(t *testing.T) { nil, true, }, + + { + "parse-variable-default.json", + &Template{ + Variables: map[string]*Variable{ + "foo": &Variable{ + Default: "foo", + }, + }, + }, + false, + }, + + { + "parse-variable-required.json", + &Template{ + Variables: map[string]*Variable{ + "foo": &Variable{ + Required: true, + }, + }, + }, + false, + }, } for _, tc := range cases { diff --git a/template/test-fixtures/parse-variable-default.json b/template/test-fixtures/parse-variable-default.json new file mode 100644 index 000000000..05192b64d --- /dev/null +++ b/template/test-fixtures/parse-variable-default.json @@ -0,0 +1,5 @@ +{ + "variables": { + "foo": "foo" + } +} diff --git a/template/test-fixtures/parse-variable-required.json b/template/test-fixtures/parse-variable-required.json new file mode 100644 index 000000000..ca6458aaa --- /dev/null +++ b/template/test-fixtures/parse-variable-required.json @@ -0,0 +1,5 @@ +{ + "variables": { + "foo": null + } +} From 839784b044cf683e6d4dfb12b36e770c06d61d71 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 21 May 2015 14:32:22 -0600 Subject: [PATCH 082/956] template: parse post-processors --- template/parse.go | 92 ++++++++++++++++- template/parse_test.go | 102 +++++++++++++++++++ template/template.go | 12 ++- template/test-fixtures/parse-pp-basic.json | 6 ++ template/test-fixtures/parse-pp-keep.json | 6 ++ template/test-fixtures/parse-pp-map.json | 5 + template/test-fixtures/parse-pp-multi.json | 5 + template/test-fixtures/parse-pp-no-type.json | 5 + template/test-fixtures/parse-pp-slice.json | 5 + template/test-fixtures/parse-pp-string.json | 3 + 10 files changed, 234 insertions(+), 7 deletions(-) create mode 100644 template/test-fixtures/parse-pp-basic.json create mode 100644 template/test-fixtures/parse-pp-keep.json create mode 100644 template/test-fixtures/parse-pp-map.json create mode 100644 template/test-fixtures/parse-pp-multi.json create mode 100644 template/test-fixtures/parse-pp-no-type.json create mode 100644 template/test-fixtures/parse-pp-slice.json create mode 100644 template/test-fixtures/parse-pp-string.json diff --git a/template/parse.go b/template/parse.go index 47d97effa..f9dc7cba3 100644 --- a/template/parse.go +++ b/template/parse.go @@ -17,11 +17,11 @@ type rawTemplate struct { MinVersion string `mapstructure:"min_packer_version"` Description string - Builders []map[string]interface{} - Push map[string]interface{} - PostProcesors []interface{} `mapstructure:"post-processors"` - Provisioners []map[string]interface{} - Variables map[string]interface{} + Builders []map[string]interface{} + Push map[string]interface{} + PostProcessors []interface{} `mapstructure:"post-processors"` + Provisioners []map[string]interface{} + Variables map[string]interface{} } // Template returns the actual Template object built from this raw @@ -94,6 +94,49 @@ func (r *rawTemplate) Template() (*Template, error) { result.Builders[b.Name] = &b } + // Gather all the post-processors + if len(r.PostProcessors) > 0 { + result.PostProcessors = make([][]*PostProcessor, 0, len(r.PostProcessors)) + } + for i, v := range r.PostProcessors { + // Parse the configurations. We need to do this because post-processors + // can take three different formats. + configs, err := r.parsePostProcessor(i, v) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + + // Parse the PostProcessors out of the configs + pps := make([]*PostProcessor, 0, len(configs)) + for j, c := range configs { + var pp PostProcessor + if err := r.decoder(&pp, nil).Decode(c); err != nil { + errs = multierror.Append(errs, fmt.Errorf( + "post-processor %d.%d: %s", i+1, j+1, err)) + continue + } + + // Type is required + if pp.Type == "" { + errs = multierror.Append(errs, fmt.Errorf( + "post-processor %d.%d: type is required", i+1, j+1)) + continue + } + + // Set the configuration + delete(c, "keep_input_artifact") + delete(c, "type") + if len(c) > 0 { + pp.Config = c + } + + pps = append(pps, &pp) + } + + result.PostProcessors = append(result.PostProcessors, pps) + } + // Gather all the provisioners if len(r.Provisioners) > 0 { result.Provisioners = make([]*Provisioner, 0, len(r.Provisioners)) @@ -152,6 +195,45 @@ func (r *rawTemplate) decoder( return d } +func (r *rawTemplate) parsePostProcessor( + i int, raw interface{}) ([]map[string]interface{}, error) { + switch v := raw.(type) { + case string: + return []map[string]interface{}{ + {"type": v}, + }, nil + case map[string]interface{}: + return []map[string]interface{}{v}, nil + case []interface{}: + var err error + result := make([]map[string]interface{}, len(v)) + for j, innerRaw := range v { + switch innerV := innerRaw.(type) { + case string: + result[j] = map[string]interface{}{"type": innerV} + case map[string]interface{}: + result[j] = innerV + case []interface{}: + err = multierror.Append(err, fmt.Errorf( + "post-processor %d.%d: sequence not allowed to be nested in a sequence", + i+1, j+1)) + default: + err = multierror.Append(err, fmt.Errorf( + "post-processor %d.%d: unknown format", + i+1, j+1)) + } + } + + if err != nil { + return nil, err + } + + return result, nil + default: + return nil, fmt.Errorf("post-processor %d: bad format", i+1) + } +} + // Parse takes the given io.Reader and parses a Template object out of it. func Parse(r io.Reader) (*Template, error) { // First, decode the object into an interface{}. We do this instead of diff --git a/template/parse_test.go b/template/parse_test.go index fcf7fcf29..b28670e40 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -141,6 +141,108 @@ func TestParse(t *testing.T) { }, false, }, + + { + "parse-pp-basic.json", + &Template{ + PostProcessors: [][]*PostProcessor{ + []*PostProcessor{ + &PostProcessor{ + Type: "foo", + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + }, + }, + }, + false, + }, + + { + "parse-pp-keep.json", + &Template{ + PostProcessors: [][]*PostProcessor{ + []*PostProcessor{ + &PostProcessor{ + Type: "foo", + KeepInputArtifact: true, + }, + }, + }, + }, + false, + }, + + { + "parse-pp-string.json", + &Template{ + PostProcessors: [][]*PostProcessor{ + []*PostProcessor{ + &PostProcessor{ + Type: "foo", + }, + }, + }, + }, + false, + }, + + { + "parse-pp-map.json", + &Template{ + PostProcessors: [][]*PostProcessor{ + []*PostProcessor{ + &PostProcessor{ + Type: "foo", + }, + }, + }, + }, + false, + }, + + { + "parse-pp-slice.json", + &Template{ + PostProcessors: [][]*PostProcessor{ + []*PostProcessor{ + &PostProcessor{ + Type: "foo", + }, + }, + []*PostProcessor{ + &PostProcessor{ + Type: "bar", + }, + }, + }, + }, + false, + }, + + { + "parse-pp-multi.json", + &Template{ + PostProcessors: [][]*PostProcessor{ + []*PostProcessor{ + &PostProcessor{ + Type: "foo", + }, + &PostProcessor{ + Type: "bar", + }, + }, + }, + }, + false, + }, + + { + "parse-pp-no-type.json", + nil, + true, + }, } for _, tc := range cases { diff --git a/template/template.go b/template/template.go index daee508fc..8de30b1a9 100644 --- a/template/template.go +++ b/template/template.go @@ -27,10 +27,10 @@ type Builder struct { // PostProcessor represents a post-processor within the template. type PostProcessor struct { - OnlyExcept + OnlyExcept `mapstructure:",squash"` Type string - KeepInputArtifact bool + KeepInputArtifact bool `mapstructure:"keep_input_artifact"` Config map[string]interface{} } @@ -79,3 +79,11 @@ func (b *Builder) GoString() string { func (p *Provisioner) GoString() string { return fmt.Sprintf("*%#v", *p) } + +func (p *PostProcessor) GoString() string { + return fmt.Sprintf("*%#v", *p) +} + +func (v *Variable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} diff --git a/template/test-fixtures/parse-pp-basic.json b/template/test-fixtures/parse-pp-basic.json new file mode 100644 index 000000000..56a1145e6 --- /dev/null +++ b/template/test-fixtures/parse-pp-basic.json @@ -0,0 +1,6 @@ +{ + "post-processors": [{ + "type": "foo", + "foo": "bar" + }] +} diff --git a/template/test-fixtures/parse-pp-keep.json b/template/test-fixtures/parse-pp-keep.json new file mode 100644 index 000000000..d0bd513e5 --- /dev/null +++ b/template/test-fixtures/parse-pp-keep.json @@ -0,0 +1,6 @@ +{ + "post-processors": [{ + "type": "foo", + "keep_input_artifact": true + }] +} diff --git a/template/test-fixtures/parse-pp-map.json b/template/test-fixtures/parse-pp-map.json new file mode 100644 index 000000000..43cc6abb8 --- /dev/null +++ b/template/test-fixtures/parse-pp-map.json @@ -0,0 +1,5 @@ +{ + "post-processors": [{ + "type": "foo" + }] +} diff --git a/template/test-fixtures/parse-pp-multi.json b/template/test-fixtures/parse-pp-multi.json new file mode 100644 index 000000000..32a60fa34 --- /dev/null +++ b/template/test-fixtures/parse-pp-multi.json @@ -0,0 +1,5 @@ +{ + "post-processors": [[{ + "type": "foo" + }, "bar"]] +} diff --git a/template/test-fixtures/parse-pp-no-type.json b/template/test-fixtures/parse-pp-no-type.json new file mode 100644 index 000000000..f4dda63e8 --- /dev/null +++ b/template/test-fixtures/parse-pp-no-type.json @@ -0,0 +1,5 @@ +{ + "post-processors": [{ + "keep_input_artifact": true + }] +} diff --git a/template/test-fixtures/parse-pp-slice.json b/template/test-fixtures/parse-pp-slice.json new file mode 100644 index 000000000..94c3a5247 --- /dev/null +++ b/template/test-fixtures/parse-pp-slice.json @@ -0,0 +1,5 @@ +{ + "post-processors": [{ + "type": "foo" + }, "bar"] +} diff --git a/template/test-fixtures/parse-pp-string.json b/template/test-fixtures/parse-pp-string.json new file mode 100644 index 000000000..8e77358ea --- /dev/null +++ b/template/test-fixtures/parse-pp-string.json @@ -0,0 +1,3 @@ +{ + "post-processors": ["foo"] +} From 43fbd26dc91a4d78897e3b8845b5b2e00937b47e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 21 May 2015 14:41:33 -0600 Subject: [PATCH 083/956] template: copy some description and min vesrion --- template/parse.go | 4 ++++ template/parse_test.go | 16 ++++++++++++++++ template/test-fixtures/parse-description.json | 3 +++ template/test-fixtures/parse-min-version.json | 3 +++ 4 files changed, 26 insertions(+) create mode 100644 template/test-fixtures/parse-description.json create mode 100644 template/test-fixtures/parse-min-version.json diff --git a/template/parse.go b/template/parse.go index f9dc7cba3..df29e8af0 100644 --- a/template/parse.go +++ b/template/parse.go @@ -30,6 +30,10 @@ func (r *rawTemplate) Template() (*Template, error) { var result Template var errs error + // Copy some literals + result.Description = r.Description + result.MinVersion = r.MinVersion + // Gather the variables if len(r.Variables) > 0 { result.Variables = make(map[string]*Variable, len(r.Variables)) diff --git a/template/parse_test.go b/template/parse_test.go index b28670e40..3e6847604 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -243,6 +243,22 @@ func TestParse(t *testing.T) { nil, true, }, + + { + "parse-description.json", + &Template{ + Description: "foo", + }, + false, + }, + + { + "parse-min-version.json", + &Template{ + MinVersion: "1.2", + }, + false, + }, } for _, tc := range cases { diff --git a/template/test-fixtures/parse-description.json b/template/test-fixtures/parse-description.json new file mode 100644 index 000000000..c72a24eb8 --- /dev/null +++ b/template/test-fixtures/parse-description.json @@ -0,0 +1,3 @@ +{ + "description": "foo" +} diff --git a/template/test-fixtures/parse-min-version.json b/template/test-fixtures/parse-min-version.json new file mode 100644 index 000000000..f98101efb --- /dev/null +++ b/template/test-fixtures/parse-min-version.json @@ -0,0 +1,3 @@ +{ + "min_packer_version": "1.2" +} From 2e4dd639124ce787a59f4e7dd915f09810cc85a4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 21 May 2015 14:44:29 -0600 Subject: [PATCH 084/956] template: parse push --- template/parse.go | 11 +++++++++++ template/parse_test.go | 10 ++++++++++ template/test-fixtures/parse-push.json | 5 +++++ 3 files changed, 26 insertions(+) create mode 100644 template/test-fixtures/parse-push.json diff --git a/template/parse.go b/template/parse.go index df29e8af0..c0e21b1c4 100644 --- a/template/parse.go +++ b/template/parse.go @@ -174,6 +174,17 @@ func (r *rawTemplate) Template() (*Template, error) { result.Provisioners = append(result.Provisioners, &p) } + // Push + if len(r.Push) > 0 { + var p Push + if err := r.decoder(&p, nil).Decode(r.Push); err != nil { + errs = multierror.Append(errs, fmt.Errorf( + "push: %s", err)) + } + + result.Push = &p + } + // If we have errors, return those with a nil result if errs != nil { return nil, errs diff --git a/template/parse_test.go b/template/parse_test.go index 3e6847604..023c3d537 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -259,6 +259,16 @@ func TestParse(t *testing.T) { }, false, }, + + { + "parse-push.json", + &Template{ + Push: &Push{ + Name: "foo", + }, + }, + false, + }, } for _, tc := range cases { diff --git a/template/test-fixtures/parse-push.json b/template/test-fixtures/parse-push.json new file mode 100644 index 000000000..2529eedc4 --- /dev/null +++ b/template/test-fixtures/parse-push.json @@ -0,0 +1,5 @@ +{ + "push": { + "name": "foo" + } +} From 2f7e95cc462f6b6bfc74651e80b967cedd6a184a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 21 May 2015 15:29:45 -0600 Subject: [PATCH 085/956] template: Validate --- template/template.go | 35 ++++++++++++++++ template/template_test.go | 42 +++++++++++++++++++ .../test-fixtures/validate-bad-override.json | 12 ++++++ .../test-fixtures/validate-good-override.json | 12 ++++++ .../test-fixtures/validate-no-builders.json | 1 + 5 files changed, 102 insertions(+) create mode 100644 template/test-fixtures/validate-bad-override.json create mode 100644 template/test-fixtures/validate-good-override.json create mode 100644 template/test-fixtures/validate-no-builders.json diff --git a/template/template.go b/template/template.go index 8de30b1a9..0ea2a4eb7 100644 --- a/template/template.go +++ b/template/template.go @@ -1,8 +1,11 @@ package template import ( + "errors" "fmt" "time" + + "github.com/hashicorp/go-multierror" ) // Template represents the parsed template that is used to configure @@ -68,6 +71,38 @@ type OnlyExcept struct { Except []string } +//------------------------------------------------------------------- +// Functions +//------------------------------------------------------------------- + +// Validate does some basic validation of the template on top of the +// validation that occurs while parsing. If possible, we try to defer +// validation to here. The validation errors that occur during parsing +// are the minimal necessary to make sure parsing builds a reasonable +// Template structure. +func (t *Template) Validate() error { + var err error + + // At least one builder must be defined + if len(t.Builders) == 0 { + err = multierror.Append(err, errors.New( + "at least one builder must be defined")) + } + + // Verify that the provisioner overrides target builders that exist + for i, p := range t.Provisioners { + for name, _ := range p.Override { + if _, ok := t.Builders[name]; !ok { + err = multierror.Append(err, fmt.Errorf( + "provisioner %d: override '%s' doesn't exist", + i+1, name)) + } + } + } + + return err +} + //------------------------------------------------------------------- // GoStringer //------------------------------------------------------------------- diff --git a/template/template_test.go b/template/template_test.go index 2847bf9a2..e8a0ff2fe 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -1,7 +1,9 @@ package template import ( + "os" "path/filepath" + "testing" ) const FixturesDir = "./test-fixtures" @@ -10,3 +12,43 @@ const FixturesDir = "./test-fixtures" func fixtureDir(n string) string { return filepath.Join(FixturesDir, n) } + +func TestTemplateValidate(t *testing.T) { + cases := []struct { + File string + Err bool + }{ + { + "validate-no-builders.json", + true, + }, + + { + "validate-bad-override.json", + true, + }, + + { + "validate-good-override.json", + false, + }, + } + + for _, tc := range cases { + f, err := os.Open(fixtureDir(tc.File)) + if err != nil { + t.Fatalf("err: %s", err) + } + + tpl, err := Parse(f) + f.Close() + if err != nil { + t.Fatalf("err: %s\n\n%s", tc.File, err) + } + + err = tpl.Validate() + if (err != nil) != tc.Err { + t.Fatalf("err: %s\n\n%s", tc.File, err) + } + } +} diff --git a/template/test-fixtures/validate-bad-override.json b/template/test-fixtures/validate-bad-override.json new file mode 100644 index 000000000..7f6c64588 --- /dev/null +++ b/template/test-fixtures/validate-bad-override.json @@ -0,0 +1,12 @@ +{ + "builders": [{ + "type": "foo" + }], + + "provisioners": [{ + "type": "bar", + "override": { + "bar": {} + } + }] +} diff --git a/template/test-fixtures/validate-good-override.json b/template/test-fixtures/validate-good-override.json new file mode 100644 index 000000000..4d7e0f757 --- /dev/null +++ b/template/test-fixtures/validate-good-override.json @@ -0,0 +1,12 @@ +{ + "builders": [{ + "type": "foo" + }], + + "provisioners": [{ + "type": "bar", + "override": { + "foo": {} + } + }] +} diff --git a/template/test-fixtures/validate-no-builders.json b/template/test-fixtures/validate-no-builders.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/template/test-fixtures/validate-no-builders.json @@ -0,0 +1 @@ +{} From 637fabc1c7684b1d0ad37a9cc95636bff7f51398 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 21 May 2015 15:39:32 -0600 Subject: [PATCH 086/956] template: validate only/except --- template/template.go | 32 +++++++++++++++++++ template/template_test.go | 20 ++++++++++++ .../validate-bad-prov-except.json | 10 ++++++ .../test-fixtures/validate-bad-prov-only.json | 10 ++++++ .../validate-good-prov-except.json | 10 ++++++ .../validate-good-prov-only.json | 10 ++++++ 6 files changed, 92 insertions(+) create mode 100644 template/test-fixtures/validate-bad-prov-except.json create mode 100644 template/test-fixtures/validate-bad-prov-only.json create mode 100644 template/test-fixtures/validate-good-prov-except.json create mode 100644 template/test-fixtures/validate-good-prov-only.json diff --git a/template/template.go b/template/template.go index 0ea2a4eb7..0d2671ca0 100644 --- a/template/template.go +++ b/template/template.go @@ -91,6 +91,15 @@ func (t *Template) Validate() error { // Verify that the provisioner overrides target builders that exist for i, p := range t.Provisioners { + // Validate only/except + if verr := p.OnlyExcept.Validate(t); verr != nil { + for _, e := range multierror.Append(verr).Errors { + err = multierror.Append(err, fmt.Errorf( + "provisioner %d: %s", i+1, e)) + } + } + + // Validate overrides for name, _ := range p.Override { if _, ok := t.Builders[name]; !ok { err = multierror.Append(err, fmt.Errorf( @@ -103,6 +112,29 @@ func (t *Template) Validate() error { return err } +// Validate validates that the OnlyExcept settings are correct for a thing. +func (o *OnlyExcept) Validate(t *Template) error { + if len(o.Only) > 0 && len(o.Except) > 0 { + return errors.New("only one of 'only' or 'except' may be specified") + } + + var err error + for _, n := range o.Only { + if _, ok := t.Builders[n]; !ok { + err = multierror.Append(err, fmt.Errorf( + "'only' specified builder '%s' not found", n)) + } + } + for _, n := range o.Except { + if _, ok := t.Builders[n]; !ok { + err = multierror.Append(err, fmt.Errorf( + "'except' specified builder '%s' not found", n)) + } + } + + return err +} + //------------------------------------------------------------------- // GoStringer //------------------------------------------------------------------- diff --git a/template/template_test.go b/template/template_test.go index e8a0ff2fe..dbb2cf33d 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -32,6 +32,26 @@ func TestTemplateValidate(t *testing.T) { "validate-good-override.json", false, }, + + { + "validate-bad-prov-only.json", + true, + }, + + { + "validate-good-prov-only.json", + false, + }, + + { + "validate-bad-prov-except.json", + true, + }, + + { + "validate-good-prov-except.json", + false, + }, } for _, tc := range cases { diff --git a/template/test-fixtures/validate-bad-prov-except.json b/template/test-fixtures/validate-bad-prov-except.json new file mode 100644 index 000000000..0a24bf58e --- /dev/null +++ b/template/test-fixtures/validate-bad-prov-except.json @@ -0,0 +1,10 @@ +{ + "builders": [{ + "type": "foo" + }], + + "provisioners": [{ + "type": "bar", + "except": ["bar"] + }] +} diff --git a/template/test-fixtures/validate-bad-prov-only.json b/template/test-fixtures/validate-bad-prov-only.json new file mode 100644 index 000000000..fa8c9ccab --- /dev/null +++ b/template/test-fixtures/validate-bad-prov-only.json @@ -0,0 +1,10 @@ +{ + "builders": [{ + "type": "foo" + }], + + "provisioners": [{ + "type": "bar", + "only": ["bar"] + }] +} diff --git a/template/test-fixtures/validate-good-prov-except.json b/template/test-fixtures/validate-good-prov-except.json new file mode 100644 index 000000000..e075d09ca --- /dev/null +++ b/template/test-fixtures/validate-good-prov-except.json @@ -0,0 +1,10 @@ +{ + "builders": [{ + "type": "foo" + }], + + "provisioners": [{ + "type": "bar", + "except": ["foo"] + }] +} diff --git a/template/test-fixtures/validate-good-prov-only.json b/template/test-fixtures/validate-good-prov-only.json new file mode 100644 index 000000000..db162fa28 --- /dev/null +++ b/template/test-fixtures/validate-good-prov-only.json @@ -0,0 +1,10 @@ +{ + "builders": [{ + "type": "foo" + }], + + "provisioners": [{ + "type": "bar", + "only": ["foo"] + }] +} From 28dc1c2aedc5271175dc17b178bdd4dd6162ba9c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 21 May 2015 15:42:12 -0600 Subject: [PATCH 087/956] template: validate post-processor only/except --- template/template.go | 13 ++++++++++++ template/template_test.go | 20 +++++++++++++++++++ .../test-fixtures/validate-bad-pp-except.json | 10 ++++++++++ .../test-fixtures/validate-bad-pp-only.json | 10 ++++++++++ .../validate-good-pp-except.json | 10 ++++++++++ .../test-fixtures/validate-good-pp-only.json | 10 ++++++++++ 6 files changed, 73 insertions(+) create mode 100644 template/test-fixtures/validate-bad-pp-except.json create mode 100644 template/test-fixtures/validate-bad-pp-only.json create mode 100644 template/test-fixtures/validate-good-pp-except.json create mode 100644 template/test-fixtures/validate-good-pp-only.json diff --git a/template/template.go b/template/template.go index 0d2671ca0..17d808029 100644 --- a/template/template.go +++ b/template/template.go @@ -109,6 +109,19 @@ func (t *Template) Validate() error { } } + // Verify post-processors + for i, chain := range t.PostProcessors { + for j, p := range chain { + // Validate only/except + if verr := p.OnlyExcept.Validate(t); verr != nil { + for _, e := range multierror.Append(verr).Errors { + err = multierror.Append(err, fmt.Errorf( + "post-processor %d.%d: %s", i+1, j+1, e)) + } + } + } + } + return err } diff --git a/template/template_test.go b/template/template_test.go index dbb2cf33d..d14682728 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -52,6 +52,26 @@ func TestTemplateValidate(t *testing.T) { "validate-good-prov-except.json", false, }, + + { + "validate-bad-pp-only.json", + true, + }, + + { + "validate-good-pp-only.json", + false, + }, + + { + "validate-bad-pp-except.json", + true, + }, + + { + "validate-good-pp-except.json", + false, + }, } for _, tc := range cases { diff --git a/template/test-fixtures/validate-bad-pp-except.json b/template/test-fixtures/validate-bad-pp-except.json new file mode 100644 index 000000000..3f66cd8ca --- /dev/null +++ b/template/test-fixtures/validate-bad-pp-except.json @@ -0,0 +1,10 @@ +{ + "builders": [{ + "type": "foo" + }], + + "post-processors": [{ + "type": "bar", + "except": ["bar"] + }] +} diff --git a/template/test-fixtures/validate-bad-pp-only.json b/template/test-fixtures/validate-bad-pp-only.json new file mode 100644 index 000000000..a79edcb80 --- /dev/null +++ b/template/test-fixtures/validate-bad-pp-only.json @@ -0,0 +1,10 @@ +{ + "builders": [{ + "type": "foo" + }], + + "post-processors": [{ + "type": "bar", + "only": ["bar"] + }] +} diff --git a/template/test-fixtures/validate-good-pp-except.json b/template/test-fixtures/validate-good-pp-except.json new file mode 100644 index 000000000..79a1b2a24 --- /dev/null +++ b/template/test-fixtures/validate-good-pp-except.json @@ -0,0 +1,10 @@ +{ + "builders": [{ + "type": "foo" + }], + + "post-processors": [{ + "type": "bar", + "except": ["foo"] + }] +} diff --git a/template/test-fixtures/validate-good-pp-only.json b/template/test-fixtures/validate-good-pp-only.json new file mode 100644 index 000000000..24ef7c95d --- /dev/null +++ b/template/test-fixtures/validate-good-pp-only.json @@ -0,0 +1,10 @@ +{ + "builders": [{ + "type": "foo" + }], + + "post-processors": [{ + "type": "bar", + "only": ["foo"] + }] +} From bda4ef7c6556fa8767dba0f542f10c16d9d6a89d Mon Sep 17 00:00:00 2001 From: lokulin Date: Fri, 22 May 2015 15:08:51 +1000 Subject: [PATCH 088/956] Retry the AWS API when looking for a newly created instance Sometimes the AWS API responds that it can't find a newly created instance if you poll it too soon after creation. Retry a few times to be sure it really hasn't been created. --- builder/amazon/common/step_run_source_instance.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index 50cedf6ea..545e7765d 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -195,7 +195,16 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi instanceId = spotResp.SpotRequestResults[0].InstanceId } - instanceResp, err := ec2conn.Instances([]string{instanceId}, nil) + var instanceResp, instanceErr = ec2conn.Instances([]string{instanceId}, nil) + for i := 0; i < 10; i++ { + if instanceErr == nil { + err = instanceErr + break + } + time.Sleep(time.Duration(3)) + instanceResp, err = ec2conn.Instances([]string{instanceId}, nil) + } + if err != nil { err := fmt.Errorf("Error finding source instance (%s): %s", instanceId, err) state.Put("error", err) From ded13a8b10b4cbe53a75c9eb968e099fc79a524b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 23 May 2015 14:48:07 -0700 Subject: [PATCH 089/956] packer: Core, and template validate --- packer/core.go | 77 +++++++++++++++++++ packer/core_test.go | 60 +++++++++++++++ packer/packer_test.go | 11 +++ .../test-fixtures/validate-dup-builder.json | 10 +++ .../test-fixtures/validate-req-variable.json | 9 +++ 5 files changed, 167 insertions(+) create mode 100644 packer/core.go create mode 100644 packer/core_test.go create mode 100644 packer/packer_test.go create mode 100644 packer/test-fixtures/validate-dup-builder.json create mode 100644 packer/test-fixtures/validate-req-variable.json diff --git a/packer/core.go b/packer/core.go new file mode 100644 index 000000000..de6fe4169 --- /dev/null +++ b/packer/core.go @@ -0,0 +1,77 @@ +package packer + +import ( + "fmt" + "os" + + "github.com/hashicorp/go-multierror" + "github.com/mitchellh/packer/template" +) + +// Core is the main executor of Packer. If Packer is being used as a +// library, this is the struct you'll want to instantiate to get anything done. +type Core struct { + cache Cache + components ComponentFinder + ui Ui + template *template.Template + variables map[string]string +} + +// CoreConfig is the structure for initializing a new Core. Once a CoreConfig +// is used to initialize a Core, it shouldn't be re-used or modified again. +type CoreConfig struct { + Cache Cache + Components ComponentFinder + Ui Ui + Template *template.Template + Variables map[string]string +} + +// NewCore creates a new Core. +func NewCore(c *CoreConfig) (*Core, error) { + if c.Ui == nil { + c.Ui = &BasicUi{ + Reader: os.Stdin, + Writer: os.Stdout, + ErrorWriter: os.Stdout, + } + } + + return &Core{ + cache: c.Cache, + components: c.Components, + ui: c.Ui, + template: c.Template, + variables: c.Variables, + }, nil +} + +// Validate does a full validation of the template. +// +// This will automatically call template.Validate() in addition to doing +// richer semantic checks around variables and so on. +func (c *Core) Validate() error { + // First validate the template in general, we can't do anything else + // unless the template itself is valid. + if err := c.template.Validate(); err != nil { + return err + } + + // Validate variables are set + var err error + for n, v := range c.template.Variables { + if v.Required { + if _, ok := c.variables[n]; !ok { + err = multierror.Append(err, fmt.Errorf( + "required variable not set: %s", n)) + } + } + } + + // TODO: validate all builders exist + // TODO: ^^ provisioner + // TODO: ^^ post-processor + + return err +} diff --git a/packer/core_test.go b/packer/core_test.go new file mode 100644 index 000000000..dc7880302 --- /dev/null +++ b/packer/core_test.go @@ -0,0 +1,60 @@ +package packer + +import ( + "os" + "testing" + + "github.com/mitchellh/packer/template" +) + +func TestCoreValidate(t *testing.T) { + cases := []struct { + File string + Vars map[string]string + Err bool + }{ + { + "validate-dup-builder.json", + nil, + true, + }, + + // Required variable not set + { + "validate-req-variable.json", + nil, + true, + }, + + { + "validate-req-variable.json", + map[string]string{"foo": "bar"}, + false, + }, + } + + for _, tc := range cases { + f, err := os.Open(fixtureDir(tc.File)) + if err != nil { + t.Fatalf("err: %s", err) + } + + tpl, err := template.Parse(f) + f.Close() + if err != nil { + t.Fatalf("err: %s\n\n%s", tc.File, err) + } + + core, err := NewCore(&CoreConfig{ + Template: tpl, + Variables: tc.Vars, + }) + if err != nil { + t.Fatalf("err: %s\n\n%s", tc.File, err) + } + + if err := core.Validate(); (err != nil) != tc.Err { + t.Fatalf("err: %s\n\n%s", tc.File, err) + } + } +} diff --git a/packer/packer_test.go b/packer/packer_test.go new file mode 100644 index 000000000..ec536e2c3 --- /dev/null +++ b/packer/packer_test.go @@ -0,0 +1,11 @@ +package packer + +import ( + "path/filepath" +) + +const FixtureDir = "./test-fixtures" + +func fixtureDir(n string) string { + return filepath.Join(FixtureDir, n) +} diff --git a/packer/test-fixtures/validate-dup-builder.json b/packer/test-fixtures/validate-dup-builder.json new file mode 100644 index 000000000..21d206d35 --- /dev/null +++ b/packer/test-fixtures/validate-dup-builder.json @@ -0,0 +1,10 @@ +{ + "builders": [ + {"type": "foo"} + ], + + "provisioners": [{ + "type": "foo", + "only": ["bar"] + }] +} diff --git a/packer/test-fixtures/validate-req-variable.json b/packer/test-fixtures/validate-req-variable.json new file mode 100644 index 000000000..796d0b669 --- /dev/null +++ b/packer/test-fixtures/validate-req-variable.json @@ -0,0 +1,9 @@ +{ + "variables": { + "foo": null + }, + + "builders": [{ + "type": "foo" + }] +} From d74dacc4c02cd1808f71930fa5fac05e5f1c35e6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 23 May 2015 15:08:50 -0700 Subject: [PATCH 090/956] packer: Core.Build --- packer/core.go | 29 +++++++++++++++++++++++++++++ packer/testing.go | 44 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 packer/testing.go diff --git a/packer/core.go b/packer/core.go index de6fe4169..caa18a196 100644 --- a/packer/core.go +++ b/packer/core.go @@ -47,6 +47,35 @@ func NewCore(c *CoreConfig) (*Core, error) { }, nil } +// Build returns the Build object for the given name. +func (c *Core) Build(n string) (Build, error) { + // Setup the builder + configBuilder, ok := c.template.Builders[n] + if !ok { + return nil, fmt.Errorf("no such build found: %s", n) + } + builder, err := c.components.Builder(configBuilder.Type) + if err != nil { + return nil, fmt.Errorf( + "error initializing builder '%s': %s", + configBuilder.Type, err) + } + if builder == nil { + return nil, fmt.Errorf( + "builder type not found: %s", configBuilder.Type) + } + + // TODO: template process name + + return &coreBuild{ + name: n, + builder: builder, + builderConfig: configBuilder.Config, + builderType: configBuilder.Type, + variables: c.variables, + }, nil +} + // Validate does a full validation of the template. // // This will automatically call template.Validate() in addition to doing diff --git a/packer/testing.go b/packer/testing.go new file mode 100644 index 000000000..099119180 --- /dev/null +++ b/packer/testing.go @@ -0,0 +1,44 @@ +package packer + +import ( + "bytes" + "io/ioutil" + "os" + "testing" +) + +func TestCoreConfig(t *testing.T) *CoreConfig { + // Create a UI that is effectively /dev/null everywhere + var buf bytes.Buffer + ui := &BasicUi{ + Reader: &buf, + Writer: ioutil.Discard, + ErrorWriter: ioutil.Discard, + } + + // Create some test components + components := ComponentFinder{ + Builder: func(n string) (Builder, error) { + if n != "test" { + return nil, nil + } + + return &MockBuilder{}, nil + }, + } + + return &CoreConfig{ + Cache: &FileCache{CacheDir: os.TempDir()}, + Components: components, + Ui: ui, + } +} + +func TestCore(t *testing.T, c *CoreConfig) *Core { + core, err := NewCore(c) + if err != nil { + t.Fatalf("err: %s", err) + } + + return core +} From 97a48e35bb4bb579843833a8f118124c2cf2c110 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 23 May 2015 15:44:54 -0700 Subject: [PATCH 091/956] template: ParseFile --- template/parse.go | 13 +++++++++++++ template/parse_test.go | 9 +-------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/template/parse.go b/template/parse.go index c0e21b1c4..a46adc594 100644 --- a/template/parse.go +++ b/template/parse.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "io" + "os" "sort" "github.com/hashicorp/go-multierror" @@ -290,3 +291,15 @@ func Parse(r io.Reader) (*Template, error) { // Return the template parsed from the raw structure return rawTpl.Template() } + +// ParseFile is the same as Parse but is a helper to automatically open +// a file for parsing. +func ParseFile(path string) (*Template, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + return Parse(f) +} diff --git a/template/parse_test.go b/template/parse_test.go index 023c3d537..2cca68b88 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -1,7 +1,6 @@ package template import ( - "os" "reflect" "testing" "time" @@ -272,13 +271,7 @@ func TestParse(t *testing.T) { } for _, tc := range cases { - f, err := os.Open(fixtureDir(tc.File)) - if err != nil { - t.Fatalf("err: %s", err) - } - - tpl, err := Parse(f) - f.Close() + tpl, err := ParseFile(fixtureDir(tc.File)) if (err != nil) != tc.Err { t.Fatalf("err: %s", err) } From 47b570a2d2510e007afeb222c6724fe161540b96 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 23 May 2015 16:06:11 -0700 Subject: [PATCH 092/956] template/interpolate: flip disable to enableenv --- template/interpolate/funcs.go | 2 +- template/interpolate/funcs_test.go | 4 ++-- template/interpolate/i.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go index 68592e046..1ddbbe167 100644 --- a/template/interpolate/funcs.go +++ b/template/interpolate/funcs.go @@ -51,7 +51,7 @@ func Funcs(ctx *Context) template.FuncMap { func funcGenEnv(ctx *Context) interface{} { return func(k string) (string, error) { - if ctx.DisableEnv { + if !ctx.EnableEnv { // The error message doesn't have to be that detailed since // semantic checks should catch this. return "", errors.New("env vars are not allowed here") diff --git a/template/interpolate/funcs_test.go b/template/interpolate/funcs_test.go index 7afa53447..aad05d376 100644 --- a/template/interpolate/funcs_test.go +++ b/template/interpolate/funcs_test.go @@ -26,7 +26,7 @@ func TestFuncEnv(t *testing.T) { os.Setenv("PACKER_TEST_ENV", "foo") defer os.Setenv("PACKER_TEST_ENV", "") - ctx := &Context{} + ctx := &Context{EnableEnv: true} for _, tc := range cases { i := &I{Value: tc.Input} result, err := i.Render(ctx) @@ -53,7 +53,7 @@ func TestFuncEnv_disable(t *testing.T) { }, } - ctx := &Context{DisableEnv: true} + ctx := &Context{EnableEnv: false} for _, tc := range cases { i := &I{Value: tc.Input} result, err := i.Render(ctx) diff --git a/template/interpolate/i.go b/template/interpolate/i.go index 1033ad86a..5f70ed82a 100644 --- a/template/interpolate/i.go +++ b/template/interpolate/i.go @@ -15,8 +15,8 @@ type Context struct { // "user" function reads from. UserVariables map[string]string - // DisableEnv disables the env function - DisableEnv bool + // EnableEnv enables the env function + EnableEnv bool } // I stands for "interpolation" and is the main interpolation struct From 3ebfe06ec8dcfbe2da1d7d7fde02d9fd4c61193d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 23 May 2015 16:12:32 -0700 Subject: [PATCH 093/956] packer: render build names --- packer/core.go | 31 ++++++++++++++++ packer/core_test.go | 41 +++++++++++++++++++++ packer/test-fixtures/build-names-basic.json | 5 +++ packer/test-fixtures/build-names-func.json | 5 +++ template/interpolate/i.go | 5 +++ 5 files changed, 87 insertions(+) create mode 100644 packer/test-fixtures/build-names-basic.json create mode 100644 packer/test-fixtures/build-names-func.json diff --git a/packer/core.go b/packer/core.go index caa18a196..21ea4b8f9 100644 --- a/packer/core.go +++ b/packer/core.go @@ -3,9 +3,11 @@ package packer import ( "fmt" "os" + "sort" "github.com/hashicorp/go-multierror" "github.com/mitchellh/packer/template" + "github.com/mitchellh/packer/template/interpolate" ) // Core is the main executor of Packer. If Packer is being used as a @@ -16,6 +18,7 @@ type Core struct { ui Ui template *template.Template variables map[string]string + builds map[string]*template.Builder } // CoreConfig is the structure for initializing a new Core. Once a CoreConfig @@ -38,15 +41,43 @@ func NewCore(c *CoreConfig) (*Core, error) { } } + // Go through and interpolate all the build names. We shuld be able + // to do this at this point with the variables. + builds := make(map[string]*template.Builder) + for _, b := range c.Template.Builders { + v, err := interpolate.Render(b.Name, &interpolate.Context{ + UserVariables: c.Variables, + }) + if err != nil { + return nil, fmt.Errorf( + "Error interpolating builder '%s': %s", + b.Name, err) + } + + builds[v] = b + } + return &Core{ cache: c.Cache, components: c.Components, ui: c.Ui, template: c.Template, variables: c.Variables, + builds: builds, }, nil } +// BuildNames returns the builds that are available in this configured core. +func (c *Core) BuildNames() []string { + r := make([]string, 0, len(c.builds)) + for n, _ := range c.builds { + r = append(r, n) + } + sort.Strings(r) + + return r +} + // Build returns the Build object for the given name. func (c *Core) Build(n string) (Build, error) { // Setup the builder diff --git a/packer/core_test.go b/packer/core_test.go index dc7880302..d3f338d12 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -2,11 +2,52 @@ package packer import ( "os" + "reflect" "testing" "github.com/mitchellh/packer/template" ) +func TestCoreBuildNames(t *testing.T) { + cases := []struct { + File string + Vars map[string]string + Result []string + }{ + { + "build-names-basic.json", + nil, + []string{"something"}, + }, + + { + "build-names-func.json", + nil, + []string{"TUBES"}, + }, + } + + for _, tc := range cases { + tpl, err := template.ParseFile(fixtureDir(tc.File)) + if err != nil { + t.Fatalf("err: %s\n\n%s", tc.File, err) + } + + core, err := NewCore(&CoreConfig{ + Template: tpl, + Variables: tc.Vars, + }) + if err != nil { + t.Fatalf("err: %s\n\n%s", tc.File, err) + } + + names := core.BuildNames() + if !reflect.DeepEqual(names, tc.Result) { + t.Fatalf("err: %s\n\n%#v", tc.File, names) + } + } +} + func TestCoreValidate(t *testing.T) { cases := []struct { File string diff --git a/packer/test-fixtures/build-names-basic.json b/packer/test-fixtures/build-names-basic.json new file mode 100644 index 000000000..1b0162551 --- /dev/null +++ b/packer/test-fixtures/build-names-basic.json @@ -0,0 +1,5 @@ +{ + "builders": [ + {"type": "something"} + ] +} diff --git a/packer/test-fixtures/build-names-func.json b/packer/test-fixtures/build-names-func.json new file mode 100644 index 000000000..feb28cf37 --- /dev/null +++ b/packer/test-fixtures/build-names-func.json @@ -0,0 +1,5 @@ +{ + "builders": [ + {"type": "{{upper `tubes`}}"} + ] +} diff --git a/template/interpolate/i.go b/template/interpolate/i.go index 5f70ed82a..d52653fcf 100644 --- a/template/interpolate/i.go +++ b/template/interpolate/i.go @@ -19,6 +19,11 @@ type Context struct { EnableEnv bool } +// Render is shorthand for constructing an I and calling Render. +func Render(v string, ctx *Context) (string, error) { + return (&I{Value: v}).Render(ctx) +} + // I stands for "interpolation" and is the main interpolation struct // in order to render values. type I struct { From 9d89ca8e07be91d81a6c3365011cb412d226c2bd Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 23 May 2015 16:30:45 -0700 Subject: [PATCH 094/956] command: build should be converted to new API, compiles --- TODO.txt | 2 + command/build.go | 114 +++++++++++--------------- command/command_test.go | 18 ++-- command/meta.go | 145 ++++++++++++++++++++++++++++++++- command/push.go | 2 +- command/version.go | 6 +- commands.go | 5 +- config.go | 3 + helper/flag-kv/flag.go | 29 +++++++ helper/flag-kv/flag_test.go | 56 +++++++++++++ helper/flag-slice/flag.go | 16 ++++ helper/flag-slice/flag_test.go | 33 ++++++++ main.go | 7 ++ 13 files changed, 355 insertions(+), 81 deletions(-) create mode 100644 TODO.txt create mode 100644 helper/flag-kv/flag.go create mode 100644 helper/flag-kv/flag_test.go create mode 100644 helper/flag-slice/flag.go create mode 100644 helper/flag-slice/flag_test.go diff --git a/TODO.txt b/TODO.txt new file mode 100644 index 000000000..031ec3ae3 --- /dev/null +++ b/TODO.txt @@ -0,0 +1,2 @@ +- var-file doesn't work +- prov/post-processors/hooks don't work diff --git a/command/build.go b/command/build.go index a0b33e530..ec6f70555 100644 --- a/command/build.go +++ b/command/build.go @@ -2,16 +2,16 @@ package command import ( "bytes" - "flag" "fmt" - cmdcommon "github.com/mitchellh/packer/common/command" - "github.com/mitchellh/packer/packer" "log" "os" "os/signal" "strconv" "strings" "sync" + + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template" ) type BuildCommand struct { @@ -20,71 +20,52 @@ type BuildCommand struct { func (c BuildCommand) Run(args []string) int { var cfgColor, cfgDebug, cfgForce, cfgParallel bool - buildOptions := new(cmdcommon.BuildOptions) - - env, err := c.Meta.Environment() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error initializing environment: %s", err)) + flags := c.Meta.FlagSet("build", FlagSetBuildFilter|FlagSetVars) + flags.Usage = func() { c.Ui.Say(c.Help()) } + flags.BoolVar(&cfgColor, "color", true, "") + flags.BoolVar(&cfgDebug, "debug", false, "") + flags.BoolVar(&cfgForce, "force", false, "") + flags.BoolVar(&cfgParallel, "parallel", true, "") + if err := flags.Parse(args); err != nil { return 1 } - cmdFlags := flag.NewFlagSet("build", flag.ContinueOnError) - cmdFlags.Usage = func() { env.Ui().Say(c.Help()) } - cmdFlags.BoolVar(&cfgColor, "color", true, "enable or disable color") - cmdFlags.BoolVar(&cfgDebug, "debug", false, "debug mode for builds") - cmdFlags.BoolVar(&cfgForce, "force", false, "force a build if artifacts exist") - cmdFlags.BoolVar(&cfgParallel, "parallel", true, "enable/disable parallelization") - cmdcommon.BuildOptionFlags(cmdFlags, buildOptions) - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - args = cmdFlags.Args() + args = flags.Args() if len(args) != 1 { - cmdFlags.Usage() + flags.Usage() return 1 } - if err := buildOptions.Validate(); err != nil { - env.Ui().Error(err.Error()) - env.Ui().Error("") - env.Ui().Error(c.Help()) - return 1 - } - - userVars, err := buildOptions.AllUserVars() + // Parse the template + tpl, err := template.ParseFile(args[0]) if err != nil { - env.Ui().Error(fmt.Sprintf("Error compiling user variables: %s", err)) - env.Ui().Error("") - env.Ui().Error(c.Help()) + c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err)) return 1 } - // Read the file into a byte array so that we can parse the template - log.Printf("Reading template: %s", args[0]) - tpl, err := packer.ParseTemplateFile(args[0], userVars) + // Get the core + core, err := c.Meta.Core(tpl) if err != nil { - env.Ui().Error(fmt.Sprintf("Failed to parse template: %s", err)) + c.Ui.Error(err.Error()) return 1 } - // The component finder for our builds - components := &packer.ComponentFinder{ - Builder: env.Builder, - Hook: env.Hook, - PostProcessor: env.PostProcessor, - Provisioner: env.Provisioner, - } + // Get the builds we care about + buildNames := c.Meta.BuildNames(core) + builds := make([]packer.Build, 0, len(buildNames)) + for _, n := range buildNames { + b, err := core.Build(n) + if err != nil { + c.Ui.Error(fmt.Sprintf( + "Failed to initialize build '%s': %s", + n, err)) + } - // Go through each builder and compile the builds that we care about - builds, err := buildOptions.Builds(tpl, components) - if err != nil { - env.Ui().Error(err.Error()) - return 1 + builds = append(builds, b) } if cfgDebug { - env.Ui().Say("Debug mode enabled. Builds will not be parallelized.") + c.Ui.Say("Debug mode enabled. Builds will not be parallelized.") } // Compile all the UIs for the builds @@ -95,24 +76,23 @@ func (c BuildCommand) Run(args []string) int { packer.UiColorYellow, packer.UiColorBlue, } - buildUis := make(map[string]packer.Ui) - for i, b := range builds { + for i, b := range buildNames { var ui packer.Ui - ui = env.Ui() + ui = c.Ui if cfgColor { ui = &packer.ColoredUi{ Color: colors[i%len(colors)], - Ui: env.Ui(), + Ui: ui, } } - buildUis[b.Name()] = ui - ui.Say(fmt.Sprintf("%s output will be in this color.", b.Name())) + buildUis[b] = ui + ui.Say(fmt.Sprintf("%s output will be in this color.", b)) } // Add a newline between the color output and the actual output - env.Ui().Say("") + c.Ui.Say("") log.Printf("Build debug mode: %v", cfgDebug) log.Printf("Force build: %v", cfgForce) @@ -125,7 +105,7 @@ func (c BuildCommand) Run(args []string) int { warnings, err := b.Prepare() if err != nil { - env.Ui().Error(err.Error()) + c.Ui.Error(err.Error()) return 1 } if len(warnings) > 0 { @@ -169,7 +149,7 @@ func (c BuildCommand) Run(args []string) int { name := b.Name() log.Printf("Starting build run: %s", name) ui := buildUis[name] - runArtifacts, err := b.Run(ui, env.Cache()) + runArtifacts, err := b.Run(ui, c.CoreConfig.Cache) if err != nil { ui.Error(fmt.Sprintf("Build '%s' errored: %s", name, err)) @@ -205,34 +185,34 @@ func (c BuildCommand) Run(args []string) int { interruptWg.Wait() if interrupted { - env.Ui().Say("Cleanly cancelled builds after being interrupted.") + c.Ui.Say("Cleanly cancelled builds after being interrupted.") return 1 } if len(errors) > 0 { - env.Ui().Machine("error-count", strconv.FormatInt(int64(len(errors)), 10)) + c.Ui.Machine("error-count", strconv.FormatInt(int64(len(errors)), 10)) - env.Ui().Error("\n==> Some builds didn't complete successfully and had errors:") + c.Ui.Error("\n==> Some builds didn't complete successfully and had errors:") for name, err := range errors { // Create a UI for the machine readable stuff to be targetted ui := &packer.TargettedUi{ Target: name, - Ui: env.Ui(), + Ui: c.Ui, } ui.Machine("error", err.Error()) - env.Ui().Error(fmt.Sprintf("--> %s: %s", name, err)) + c.Ui.Error(fmt.Sprintf("--> %s: %s", name, err)) } } if len(artifacts) > 0 { - env.Ui().Say("\n==> Builds finished. The artifacts of successful builds are:") + c.Ui.Say("\n==> Builds finished. The artifacts of successful builds are:") for name, buildArtifacts := range artifacts { // Create a UI for the machine readable stuff to be targetted ui := &packer.TargettedUi{ Target: name, - Ui: env.Ui(), + Ui: c.Ui, } // Machine-readable helpful @@ -267,11 +247,11 @@ func (c BuildCommand) Run(args []string) int { } ui.Machine("artifact", iStr, "end") - env.Ui().Say(message.String()) + c.Ui.Say(message.String()) } } } else { - env.Ui().Say("\n==> Builds finished but no artifacts were created.") + c.Ui.Say("\n==> Builds finished but no artifacts were created.") } if len(errors) > 0 { diff --git a/command/command_test.go b/command/command_test.go index 500ea7f9e..49e0c7276 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -1,20 +1,23 @@ package command import ( + "bytes" "path/filepath" "testing" - "github.com/mitchellh/cli" + "github.com/mitchellh/packer/packer" ) const fixturesDir = "./test-fixtures" func fatalCommand(t *testing.T, m Meta) { - ui := m.Ui.(*cli.MockUi) + ui := m.Ui.(*packer.BasicUi) + out := ui.Writer.(*bytes.Buffer) + err := ui.ErrorWriter.(*bytes.Buffer) t.Fatalf( "Bad exit code.\n\nStdout:\n\n%s\n\nStderr:\n\n%s", - ui.OutputWriter.String(), - ui.ErrorWriter.String()) + out.String(), + err.String()) } func testFixture(n string) string { @@ -22,7 +25,12 @@ func testFixture(n string) string { } func testMeta(t *testing.T) Meta { + var out, err bytes.Buffer + return Meta{ - Ui: new(cli.MockUi), + Ui: &packer.BasicUi{ + Writer: &out, + ErrorWriter: &err, + }, } } diff --git a/command/meta.go b/command/meta.go index 9c2f7f921..bb059da35 100644 --- a/command/meta.go +++ b/command/meta.go @@ -1,13 +1,152 @@ package command import ( - "github.com/mitchellh/cli" + "bufio" + "flag" + "fmt" + "io" + + "github.com/mitchellh/packer/helper/flag-kv" + "github.com/mitchellh/packer/helper/flag-slice" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template" ) +// FlagSetFlags is an enum to define what flags are present in the +// default FlagSet returned by Meta.FlagSet +type FlagSetFlags uint + +const ( + FlagSetNone FlagSetFlags = 0 + FlagSetBuildFilter FlagSetFlags = 1 << iota + FlagSetVars +) + +// Meta contains the meta-options and functionality that nearly every +// Packer command inherits. type Meta struct { - EnvConfig *packer.EnvironmentConfig - Ui cli.Ui + CoreConfig *packer.CoreConfig + EnvConfig *packer.EnvironmentConfig + Ui packer.Ui + + // These are set by command-line flags + flagBuildExcept []string + flagBuildOnly []string + flagVars map[string]string + flagVarFiles []string +} + +// Core returns the core for the given template given the configured +// CoreConfig and user variables on this Meta. +func (m *Meta) Core(tpl *template.Template) (*packer.Core, error) { + // Copy the config so we don't modify it + config := *m.CoreConfig + config.Template = tpl + config.Variables = m.flagVars + + // Init the core + core, err := packer.NewCore(&config) + if err != nil { + return nil, fmt.Errorf("Error initializing core: %s", err) + } + + // Validate it + if err := core.Validate(); err != nil { + return nil, err + } + + return core, nil +} + +// BuildNames returns the list of builds that are in the given core +// that we care about taking into account the only and except flags. +func (m *Meta) BuildNames(c *packer.Core) []string { + // TODO: test + + // Filter the "only" + if len(m.flagBuildOnly) > 0 { + // Build a set of all the available names + nameSet := make(map[string]struct{}) + for _, n := range c.BuildNames() { + nameSet[n] = struct{}{} + } + + // Build our result set which we pre-allocate some sane number + result := make([]string, 0, len(m.flagBuildOnly)) + for _, n := range m.flagBuildOnly { + if _, ok := nameSet[n]; ok { + result = append(result, n) + } + } + + return result + } + + // Filter the "except" + if len(m.flagBuildExcept) > 0 { + // Build a set of the things we don't want + nameSet := make(map[string]struct{}) + for _, n := range m.flagBuildExcept { + nameSet[n] = struct{}{} + } + + // Build our result set which is the names of all builds except + // those in the given set. + names := c.BuildNames() + result := make([]string, 0, len(names)) + for _, n := range names { + if _, ok := nameSet[n]; !ok { + result = append(result, n) + } + } + return result + } + + // We care about everything + return c.BuildNames() +} + +// FlagSet returns a FlagSet with the common flags that every +// command implements. The exact behavior of FlagSet can be configured +// using the flags as the second parameter, for example to disable +// build settings on the commands that don't handle builds. +func (m *Meta) FlagSet(n string, fs FlagSetFlags) *flag.FlagSet { + f := flag.NewFlagSet(n, flag.ContinueOnError) + + // FlagSetBuildFilter tells us to enable the settings for selecting + // builds we care about. + if fs&FlagSetBuildFilter != 0 { + f.Var((*sliceflag.StringFlag)(&m.flagBuildExcept), "except", "") + f.Var((*sliceflag.StringFlag)(&m.flagBuildOnly), "only", "") + } + + // FlagSetVars tells us what variables to use + if fs&FlagSetVars != 0 { + f.Var((*kvflag.Flag)(&m.flagVars), "var", "") + f.Var((*sliceflag.StringFlag)(&m.flagVarFiles), "var-file", "") + } + + // Create an io.Writer that writes to our Ui properly for errors. + // This is kind of a hack, but it does the job. Basically: create + // a pipe, use a scanner to break it into lines, and output each line + // to the UI. Do this forever. + errR, errW := io.Pipe() + errScanner := bufio.NewScanner(errR) + go func() { + for errScanner.Scan() { + m.Ui.Error(errScanner.Text()) + } + }() + f.SetOutput(errW) + + return f +} + +// ValidateFlags should be called after parsing flags to validate the +// given flags +func (m *Meta) ValidateFlags() error { + // TODO + return nil } func (m *Meta) Environment() (packer.Environment, error) { diff --git a/command/push.go b/command/push.go index 74915de3f..ef0f42924 100644 --- a/command/push.go +++ b/command/push.go @@ -221,7 +221,7 @@ func (c *PushCommand) Run(args []string) int { return 1 } - c.Ui.Output(fmt.Sprintf("Push successful to '%s'", tpl.Push.Name)) + c.Ui.Say(fmt.Sprintf("Push successful to '%s'", tpl.Push.Name)) return 0 } diff --git a/command/version.go b/command/version.go index 689614e60..d9358b3a6 100644 --- a/command/version.go +++ b/command/version.go @@ -53,13 +53,13 @@ func (c *VersionCommand) Run(args []string) int { } } - c.Ui.Output(versionString.String()) + c.Ui.Say(versionString.String()) // If we have a version check function, then let's check for // the latest version as well. if c.CheckFunc != nil { // Separate the prior output with a newline - c.Ui.Output("") + c.Ui.Say("") // Check the latest version info, err := c.CheckFunc() @@ -68,7 +68,7 @@ func (c *VersionCommand) Run(args []string) int { "Error checking latest version: %s", err)) } if info.Outdated { - c.Ui.Output(fmt.Sprintf( + c.Ui.Say(fmt.Sprintf( "Your version of Packer is out of date! The latest version\n"+ "is %s. You can update by downloading from www.packer.io", info.Latest)) diff --git a/commands.go b/commands.go index 9c6458f64..24bdc2b04 100644 --- a/commands.go +++ b/commands.go @@ -27,8 +27,9 @@ func init() { } meta := command.Meta{ - EnvConfig: &EnvConfig, - Ui: Ui, + CoreConfig: &CoreConfig, + EnvConfig: &EnvConfig, + Ui: Ui, } Commands = map[string]cli.CommandFactory{ diff --git a/config.go b/config.go index 4acb3c3b1..34cfdcb40 100644 --- a/config.go +++ b/config.go @@ -13,6 +13,9 @@ import ( "github.com/mitchellh/packer/packer/plugin" ) +// CoreConfig is the global CoreConfig we use to initialize the CLI. +var CoreConfig packer.CoreConfig + // EnvConfig is the global EnvironmentConfig we use to initialize the CLI. var EnvConfig packer.EnvironmentConfig diff --git a/helper/flag-kv/flag.go b/helper/flag-kv/flag.go new file mode 100644 index 000000000..0bf4b0086 --- /dev/null +++ b/helper/flag-kv/flag.go @@ -0,0 +1,29 @@ +package kvflag + +import ( + "fmt" + "strings" +) + +// Flag is a flag.Value implementation for parsing user variables +// from the command-line in the format of '-var key=value'. +type Flag map[string]string + +func (v *Flag) String() string { + return "" +} + +func (v *Flag) Set(raw string) error { + idx := strings.Index(raw, "=") + if idx == -1 { + return fmt.Errorf("No '=' value in arg: %s", raw) + } + + if *v == nil { + *v = make(map[string]string) + } + + key, value := raw[0:idx], raw[idx+1:] + (*v)[key] = value + return nil +} diff --git a/helper/flag-kv/flag_test.go b/helper/flag-kv/flag_test.go new file mode 100644 index 000000000..9f81d5192 --- /dev/null +++ b/helper/flag-kv/flag_test.go @@ -0,0 +1,56 @@ +package kvflag + +import ( + "flag" + "reflect" + "testing" +) + +func TestFlag_impl(t *testing.T) { + var _ flag.Value = new(Flag) +} + +func TestFlag(t *testing.T) { + cases := []struct { + Input string + Output map[string]string + Error bool + }{ + { + "key=value", + map[string]string{"key": "value"}, + false, + }, + + { + "key=", + map[string]string{"key": ""}, + false, + }, + + { + "key=foo=bar", + map[string]string{"key": "foo=bar"}, + false, + }, + + { + "key", + nil, + true, + }, + } + + for _, tc := range cases { + f := new(Flag) + err := f.Set(tc.Input) + if (err != nil) != tc.Error { + t.Fatalf("bad error. Input: %#v", tc.Input) + } + + actual := map[string]string(*f) + if !reflect.DeepEqual(actual, tc.Output) { + t.Fatalf("bad: %#v", actual) + } + } +} diff --git a/helper/flag-slice/flag.go b/helper/flag-slice/flag.go new file mode 100644 index 000000000..da75149dc --- /dev/null +++ b/helper/flag-slice/flag.go @@ -0,0 +1,16 @@ +package sliceflag + +import "strings" + +// StringFlag implements the flag.Value interface and allows multiple +// calls to the same variable to append a list. +type StringFlag []string + +func (s *StringFlag) String() string { + return strings.Join(*s, ",") +} + +func (s *StringFlag) Set(value string) error { + *s = append(*s, value) + return nil +} diff --git a/helper/flag-slice/flag_test.go b/helper/flag-slice/flag_test.go new file mode 100644 index 000000000..f72e1d960 --- /dev/null +++ b/helper/flag-slice/flag_test.go @@ -0,0 +1,33 @@ +package sliceflag + +import ( + "flag" + "reflect" + "testing" +) + +func TestStringFlag_implements(t *testing.T) { + var raw interface{} + raw = new(StringFlag) + if _, ok := raw.(flag.Value); !ok { + t.Fatalf("StringFlag should be a Value") + } +} + +func TestStringFlagSet(t *testing.T) { + sv := new(StringFlag) + err := sv.Set("foo") + if err != nil { + t.Fatalf("err: %s", err) + } + + err = sv.Set("bar") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{"foo", "bar"} + if !reflect.DeepEqual([]string(*sv), expected) { + t.Fatalf("Bad: %#v", sv) + } +} diff --git a/main.go b/main.go index 2bebafb9e..8616a8e2d 100644 --- a/main.go +++ b/main.go @@ -159,6 +159,13 @@ func wrappedMain() int { } } + // Create the core configuration + CoreConfig = packer.CoreConfig{ + Cache: EnvConfig.Cache, + Components: EnvConfig.Components, + Ui: EnvConfig.Ui, + } + //setupSignalHandlers(env) cli := &cli.CLI{ From ba359394b11059ed9af60a974eca1f210320e4af Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 23 May 2015 16:32:36 -0700 Subject: [PATCH 095/956] fix compilation --- commands.go | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/commands.go b/commands.go index 24bdc2b04..d3a458f16 100644 --- a/commands.go +++ b/commands.go @@ -6,6 +6,7 @@ import ( "github.com/mitchellh/cli" "github.com/mitchellh/packer/command" + "github.com/mitchellh/packer/packer" ) // Commands is the mapping of all the available Terraform commands. @@ -18,18 +19,14 @@ const ErrorPrefix = "e:" const OutputPrefix = "o:" func init() { - Ui = &cli.PrefixedUi{ - AskPrefix: OutputPrefix, - OutputPrefix: OutputPrefix, - InfoPrefix: OutputPrefix, - ErrorPrefix: ErrorPrefix, - Ui: &cli.BasicUi{Writer: os.Stdout}, - } - meta := command.Meta{ CoreConfig: &CoreConfig, EnvConfig: &EnvConfig, - Ui: Ui, + Ui: &packer.BasicUi{ + Reader: os.Stdin, + Writer: os.Stdout, + ErrorWriter: os.Stdout, + }, } Commands = map[string]cli.CommandFactory{ From dc74ec56127e64640906ab850cd2b99dfd49fc16 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 May 2015 17:29:10 -0700 Subject: [PATCH 096/956] packer: remove Environment --- command/fix.go | 29 ++- command/inspect.go | 17 +- command/meta.go | 5 - command/validate.go | 100 +++++------ command/version.go | 12 +- commands.go | 1 - config.go | 3 - main.go | 20 +-- packer/core.go | 22 +++ packer/core_test.go | 11 ++ packer/environment.go | 183 ------------------- packer/environment_test.go | 310 --------------------------------- packer/rpc/client.go | 7 - packer/rpc/environment.go | 178 ------------------- packer/rpc/environment_test.go | 124 ------------- packer/rpc/server.go | 8 - signal.go | 6 +- 17 files changed, 102 insertions(+), 934 deletions(-) delete mode 100644 packer/environment.go delete mode 100644 packer/environment_test.go delete mode 100644 packer/rpc/environment.go delete mode 100644 packer/rpc/environment_test.go diff --git a/command/fix.go b/command/fix.go index aac0b3916..e908dc52e 100644 --- a/command/fix.go +++ b/command/fix.go @@ -3,7 +3,6 @@ package command import ( "bytes" "encoding/json" - "flag" "fmt" "log" "os" @@ -17,28 +16,22 @@ type FixCommand struct { } func (c *FixCommand) Run(args []string) int { - env, err := c.Meta.Environment() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error initializing environment: %s", err)) + flags := c.Meta.FlagSet("fix", FlagSetNone) + flags.Usage = func() { c.Ui.Say(c.Help()) } + if err := flags.Parse(args); err != nil { return 1 } - cmdFlags := flag.NewFlagSet("fix", flag.ContinueOnError) - cmdFlags.Usage = func() { env.Ui().Say(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - args = cmdFlags.Args() + args = flags.Args() if len(args) != 1 { - cmdFlags.Usage() + flags.Usage() return 1 } // Read the file for decoding tplF, err := os.Open(args[0]) if err != nil { - env.Ui().Error(fmt.Sprintf("Error opening template: %s", err)) + c.Ui.Error(fmt.Sprintf("Error opening template: %s", err)) return 1 } defer tplF.Close() @@ -47,7 +40,7 @@ func (c *FixCommand) Run(args []string) int { var templateData map[string]interface{} decoder := json.NewDecoder(tplF) if err := decoder.Decode(&templateData); err != nil { - env.Ui().Error(fmt.Sprintf("Error parsing template: %s", err)) + c.Ui.Error(fmt.Sprintf("Error parsing template: %s", err)) return 1 } @@ -65,7 +58,7 @@ func (c *FixCommand) Run(args []string) int { log.Printf("Running fixer: %s", name) input, err = fixer.Fix(input) if err != nil { - env.Ui().Error(fmt.Sprintf("Error fixing: %s", err)) + c.Ui.Error(fmt.Sprintf("Error fixing: %s", err)) return 1 } } @@ -73,20 +66,20 @@ func (c *FixCommand) Run(args []string) int { var output bytes.Buffer encoder := json.NewEncoder(&output) if err := encoder.Encode(input); err != nil { - env.Ui().Error(fmt.Sprintf("Error encoding: %s", err)) + c.Ui.Error(fmt.Sprintf("Error encoding: %s", err)) return 1 } var indented bytes.Buffer if err := json.Indent(&indented, output.Bytes(), "", " "); err != nil { - env.Ui().Error(fmt.Sprintf("Error encoding: %s", err)) + c.Ui.Error(fmt.Sprintf("Error encoding: %s", err)) return 1 } result := indented.String() result = strings.Replace(result, `\u003c`, "<", -1) result = strings.Replace(result, `\u003e`, ">", -1) - env.Ui().Say(result) + c.Ui.Say(result) return 0 } diff --git a/command/inspect.go b/command/inspect.go index 8a9fd9569..2574615fb 100644 --- a/command/inspect.go +++ b/command/inspect.go @@ -1,7 +1,6 @@ package command import ( - "flag" "fmt" "github.com/mitchellh/packer/packer" "log" @@ -9,19 +8,13 @@ import ( "strings" ) -type InspectCommand struct{ +type InspectCommand struct { Meta } func (c *InspectCommand) Run(args []string) int { - env, err := c.Meta.Environment() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error initializing environment: %s", err)) - return 1 - } - - flags := flag.NewFlagSet("inspect", flag.ContinueOnError) - flags.Usage = func() { env.Ui().Say(c.Help()) } + flags := c.Meta.FlagSet("build", FlagSetNone) + flags.Usage = func() { c.Ui.Say(c.Help()) } if err := flags.Parse(args); err != nil { return 1 } @@ -36,12 +29,12 @@ func (c *InspectCommand) Run(args []string) int { log.Printf("Reading template: %#v", args[0]) tpl, err := packer.ParseTemplateFile(args[0], nil) if err != nil { - env.Ui().Error(fmt.Sprintf("Failed to parse template: %s", err)) + c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err)) return 1 } // Convenience... - ui := env.Ui() + ui := c.Ui // Description if tpl.Description != "" { diff --git a/command/meta.go b/command/meta.go index bb059da35..e62577df9 100644 --- a/command/meta.go +++ b/command/meta.go @@ -26,7 +26,6 @@ const ( // Packer command inherits. type Meta struct { CoreConfig *packer.CoreConfig - EnvConfig *packer.EnvironmentConfig Ui packer.Ui // These are set by command-line flags @@ -148,7 +147,3 @@ func (m *Meta) ValidateFlags() error { // TODO return nil } - -func (m *Meta) Environment() (packer.Environment, error) { - return packer.NewEnvironment(m.EnvConfig) -} diff --git a/command/validate.go b/command/validate.go index a63019a9c..5d7e16c5d 100644 --- a/command/validate.go +++ b/command/validate.go @@ -1,12 +1,12 @@ package command import ( - "flag" "fmt" - cmdcommon "github.com/mitchellh/packer/common/command" - "github.com/mitchellh/packer/packer" "log" "strings" + + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template" ) type ValidateCommand struct { @@ -15,72 +15,54 @@ type ValidateCommand struct { func (c *ValidateCommand) Run(args []string) int { var cfgSyntaxOnly bool - buildOptions := new(cmdcommon.BuildOptions) - - env, err := c.Meta.Environment() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error initializing environment: %s", err)) + flags := c.Meta.FlagSet("validate", FlagSetBuildFilter|FlagSetVars) + flags.Usage = func() { c.Ui.Say(c.Help()) } + flags.BoolVar(&cfgSyntaxOnly, "syntax-only", false, "check syntax only") + if err := flags.Parse(args); err != nil { return 1 } - cmdFlags := flag.NewFlagSet("validate", flag.ContinueOnError) - cmdFlags.Usage = func() { env.Ui().Say(c.Help()) } - cmdFlags.BoolVar(&cfgSyntaxOnly, "syntax-only", false, "check syntax only") - cmdcommon.BuildOptionFlags(cmdFlags, buildOptions) - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - args = cmdFlags.Args() + args = flags.Args() if len(args) != 1 { - cmdFlags.Usage() + flags.Usage() return 1 } - if err := buildOptions.Validate(); err != nil { - env.Ui().Error(err.Error()) - env.Ui().Error("") - env.Ui().Error(c.Help()) - return 1 - } - - userVars, err := buildOptions.AllUserVars() + // Parse the template + tpl, err := template.ParseFile(args[0]) if err != nil { - env.Ui().Error(fmt.Sprintf("Error compiling user variables: %s", err)) - env.Ui().Error("") - env.Ui().Error(c.Help()) - return 1 - } - - // Parse the template into a machine-usable format - log.Printf("Reading template: %s", args[0]) - tpl, err := packer.ParseTemplateFile(args[0], userVars) - if err != nil { - env.Ui().Error(fmt.Sprintf("Failed to parse template: %s", err)) + c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err)) return 1 } + // If we're only checking syntax, then we're done already if cfgSyntaxOnly { - env.Ui().Say("Syntax-only check passed. Everything looks okay.") + c.Ui.Say("Syntax-only check passed. Everything looks okay.") return 0 } + // Get the core + core, err := c.Meta.Core(tpl) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + errs := make([]error, 0) warnings := make(map[string][]string) - // The component finder for our builds - components := &packer.ComponentFinder{ - Builder: env.Builder, - Hook: env.Hook, - PostProcessor: env.PostProcessor, - Provisioner: env.Provisioner, - } + // Get the builds we care about + buildNames := c.Meta.BuildNames(core) + builds := make([]packer.Build, 0, len(buildNames)) + for _, n := range buildNames { + b, err := core.Build(n) + if err != nil { + c.Ui.Error(fmt.Sprintf( + "Failed to initialize build '%s': %s", + n, err)) + } - // Otherwise, get all the builds - builds, err := buildOptions.Builds(tpl, components) - if err != nil { - env.Ui().Error(err.Error()) - return 1 + builds = append(builds, b) } // Check the configuration of all builds @@ -96,12 +78,12 @@ func (c *ValidateCommand) Run(args []string) int { } if len(errs) > 0 { - env.Ui().Error("Template validation failed. Errors are shown below.\n") + c.Ui.Error("Template validation failed. Errors are shown below.\n") for i, err := range errs { - env.Ui().Error(err.Error()) + c.Ui.Error(err.Error()) if (i + 1) < len(errs) { - env.Ui().Error("") + c.Ui.Error("") } } @@ -109,21 +91,21 @@ func (c *ValidateCommand) Run(args []string) int { } if len(warnings) > 0 { - env.Ui().Say("Template validation succeeded, but there were some warnings.") - env.Ui().Say("These are ONLY WARNINGS, and Packer will attempt to build the") - env.Ui().Say("template despite them, but they should be paid attention to.\n") + c.Ui.Say("Template validation succeeded, but there were some warnings.") + c.Ui.Say("These are ONLY WARNINGS, and Packer will attempt to build the") + c.Ui.Say("template despite them, but they should be paid attention to.\n") for build, warns := range warnings { - env.Ui().Say(fmt.Sprintf("Warnings for build '%s':\n", build)) + c.Ui.Say(fmt.Sprintf("Warnings for build '%s':\n", build)) for _, warning := range warns { - env.Ui().Say(fmt.Sprintf("* %s", warning)) + c.Ui.Say(fmt.Sprintf("* %s", warning)) } } return 0 } - env.Ui().Say("Template validated successfully.") + c.Ui.Say("Template validated successfully.") return 0 } diff --git a/command/version.go b/command/version.go index d9358b3a6..cd170f2df 100644 --- a/command/version.go +++ b/command/version.go @@ -33,15 +33,9 @@ func (c *VersionCommand) Help() string { } func (c *VersionCommand) Run(args []string) int { - env, err := c.Meta.Environment() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error initializing environment: %s", err)) - return 1 - } - - env.Ui().Machine("version", c.Version) - env.Ui().Machine("version-prelease", c.VersionPrerelease) - env.Ui().Machine("version-commit", c.Revision) + c.Ui.Machine("version", c.Version) + c.Ui.Machine("version-prelease", c.VersionPrerelease) + c.Ui.Machine("version-commit", c.Revision) var versionString bytes.Buffer fmt.Fprintf(&versionString, "Packer v%s", c.Version) diff --git a/commands.go b/commands.go index d3a458f16..e0f313957 100644 --- a/commands.go +++ b/commands.go @@ -21,7 +21,6 @@ const OutputPrefix = "o:" func init() { meta := command.Meta{ CoreConfig: &CoreConfig, - EnvConfig: &EnvConfig, Ui: &packer.BasicUi{ Reader: os.Stdin, Writer: os.Stdout, diff --git a/config.go b/config.go index 34cfdcb40..a9c07043f 100644 --- a/config.go +++ b/config.go @@ -16,9 +16,6 @@ import ( // CoreConfig is the global CoreConfig we use to initialize the CLI. var CoreConfig packer.CoreConfig -// EnvConfig is the global EnvironmentConfig we use to initialize the CLI. -var EnvConfig packer.EnvironmentConfig - type config struct { DisableCheckpoint bool `json:"disable_checkpoint"` DisableCheckpointSignature bool `json:"disable_checkpoint_signature"` diff --git a/main.go b/main.go index 8616a8e2d..73d4b88cf 100644 --- a/main.go +++ b/main.go @@ -140,14 +140,13 @@ func wrappedMain() int { defer plugin.CleanupClients() // Create the environment configuration - EnvConfig = *packer.DefaultEnvironmentConfig() - EnvConfig.Cache = cache - EnvConfig.Components.Builder = config.LoadBuilder - EnvConfig.Components.Hook = config.LoadHook - EnvConfig.Components.PostProcessor = config.LoadPostProcessor - EnvConfig.Components.Provisioner = config.LoadProvisioner + CoreConfig.Cache = cache + CoreConfig.Components.Builder = config.LoadBuilder + CoreConfig.Components.Hook = config.LoadHook + CoreConfig.Components.PostProcessor = config.LoadPostProcessor + CoreConfig.Components.Provisioner = config.LoadProvisioner if machineReadable { - EnvConfig.Ui = &packer.MachineReadableUi{ + CoreConfig.Ui = &packer.MachineReadableUi{ Writer: os.Stdout, } @@ -159,13 +158,6 @@ func wrappedMain() int { } } - // Create the core configuration - CoreConfig = packer.CoreConfig{ - Cache: EnvConfig.Cache, - Components: EnvConfig.Components, - Ui: EnvConfig.Ui, - } - //setupSignalHandlers(env) cli := &cli.CLI{ diff --git a/packer/core.go b/packer/core.go index 21ea4b8f9..4c4292ca7 100644 --- a/packer/core.go +++ b/packer/core.go @@ -31,6 +31,28 @@ type CoreConfig struct { Variables map[string]string } +// The function type used to lookup Builder implementations. +type BuilderFunc func(name string) (Builder, error) + +// The function type used to lookup Hook implementations. +type HookFunc func(name string) (Hook, error) + +// The function type used to lookup PostProcessor implementations. +type PostProcessorFunc func(name string) (PostProcessor, error) + +// The function type used to lookup Provisioner implementations. +type ProvisionerFunc func(name string) (Provisioner, error) + +// ComponentFinder is a struct that contains the various function +// pointers necessary to look up components of Packer such as builders, +// commands, etc. +type ComponentFinder struct { + Builder BuilderFunc + Hook HookFunc + PostProcessor PostProcessorFunc + Provisioner ProvisionerFunc +} + // NewCore creates a new Core. func NewCore(c *CoreConfig) (*Core, error) { if c.Ui == nil { diff --git a/packer/core_test.go b/packer/core_test.go index d3f338d12..d66a7786e 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -99,3 +99,14 @@ func TestCoreValidate(t *testing.T) { } } } + +func testComponentFinder() *ComponentFinder { + builderFactory := func(n string) (Builder, error) { return new(MockBuilder), nil } + ppFactory := func(n string) (PostProcessor, error) { return new(TestPostProcessor), nil } + provFactory := func(n string) (Provisioner, error) { return new(MockProvisioner), nil } + return &ComponentFinder{ + Builder: builderFactory, + PostProcessor: ppFactory, + Provisioner: provFactory, + } +} diff --git a/packer/environment.go b/packer/environment.go deleted file mode 100644 index 58585ffcf..000000000 --- a/packer/environment.go +++ /dev/null @@ -1,183 +0,0 @@ -// The packer package contains the core components of Packer. -package packer - -import ( - "errors" - "fmt" - "os" -) - -// The function type used to lookup Builder implementations. -type BuilderFunc func(name string) (Builder, error) - -// The function type used to lookup Hook implementations. -type HookFunc func(name string) (Hook, error) - -// The function type used to lookup PostProcessor implementations. -type PostProcessorFunc func(name string) (PostProcessor, error) - -// The function type used to lookup Provisioner implementations. -type ProvisionerFunc func(name string) (Provisioner, error) - -// ComponentFinder is a struct that contains the various function -// pointers necessary to look up components of Packer such as builders, -// commands, etc. -type ComponentFinder struct { - Builder BuilderFunc - Hook HookFunc - PostProcessor PostProcessorFunc - Provisioner ProvisionerFunc -} - -// The environment interface provides access to the configuration and -// state of a single Packer run. -// -// It allows for things such as executing CLI commands, getting the -// list of available builders, and more. -type Environment interface { - Builder(string) (Builder, error) - Cache() Cache - Hook(string) (Hook, error) - PostProcessor(string) (PostProcessor, error) - Provisioner(string) (Provisioner, error) - Ui() Ui -} - -// An implementation of an Environment that represents the Packer core -// environment. -type coreEnvironment struct { - cache Cache - components ComponentFinder - ui Ui -} - -// This struct configures new environments. -type EnvironmentConfig struct { - Cache Cache - Components ComponentFinder - Ui Ui -} - -// DefaultEnvironmentConfig returns a default EnvironmentConfig that can -// be used to create a new enviroment with NewEnvironment with sane defaults. -func DefaultEnvironmentConfig() *EnvironmentConfig { - config := &EnvironmentConfig{} - config.Ui = &BasicUi{ - Reader: os.Stdin, - Writer: os.Stdout, - ErrorWriter: os.Stdout, - } - - return config -} - -// This creates a new environment -func NewEnvironment(config *EnvironmentConfig) (resultEnv Environment, err error) { - if config == nil { - err = errors.New("config must be given to initialize environment") - return - } - - env := &coreEnvironment{} - env.cache = config.Cache - env.components = config.Components - env.ui = config.Ui - - // We want to make sure the components have valid function pointers. - // If a function pointer was not given, we assume that the function - // will just return a nil component. - if env.components.Builder == nil { - env.components.Builder = func(string) (Builder, error) { return nil, nil } - } - - if env.components.Hook == nil { - env.components.Hook = func(string) (Hook, error) { return nil, nil } - } - - if env.components.PostProcessor == nil { - env.components.PostProcessor = func(string) (PostProcessor, error) { return nil, nil } - } - - if env.components.Provisioner == nil { - env.components.Provisioner = func(string) (Provisioner, error) { return nil, nil } - } - - // The default cache is just the system temporary directory - if env.cache == nil { - env.cache = &FileCache{CacheDir: os.TempDir()} - } - - resultEnv = env - return -} - -// Returns a builder of the given name that is registered with this -// environment. -func (e *coreEnvironment) Builder(name string) (b Builder, err error) { - b, err = e.components.Builder(name) - if err != nil { - return - } - - if b == nil { - err = fmt.Errorf("No builder returned for name: %s", name) - } - - return -} - -// Returns the cache for this environment -func (e *coreEnvironment) Cache() Cache { - return e.cache -} - -// Returns a hook of the given name that is registered with this -// environment. -func (e *coreEnvironment) Hook(name string) (h Hook, err error) { - h, err = e.components.Hook(name) - if err != nil { - return - } - - if h == nil { - err = fmt.Errorf("No hook returned for name: %s", name) - } - - return -} - -// Returns a PostProcessor for the given name that is registered with this -// environment. -func (e *coreEnvironment) PostProcessor(name string) (p PostProcessor, err error) { - p, err = e.components.PostProcessor(name) - if err != nil { - return - } - - if p == nil { - err = fmt.Errorf("No post processor found for name: %s", name) - } - - return -} - -// Returns a provisioner for the given name that is registered with this -// environment. -func (e *coreEnvironment) Provisioner(name string) (p Provisioner, err error) { - p, err = e.components.Provisioner(name) - if err != nil { - return - } - - if p == nil { - err = fmt.Errorf("No provisioner returned for name: %s", name) - } - - return -} - -// Returns the UI for the environment. The UI is the interface that should -// be used for all communication with the outside world. -func (e *coreEnvironment) Ui() Ui { - return e.ui -} diff --git a/packer/environment_test.go b/packer/environment_test.go deleted file mode 100644 index 80edab58e..000000000 --- a/packer/environment_test.go +++ /dev/null @@ -1,310 +0,0 @@ -package packer - -import ( - "bytes" - "errors" - "io/ioutil" - "log" - "os" - "testing" -) - -func init() { - // Disable log output for tests - log.SetOutput(ioutil.Discard) -} - -func testComponentFinder() *ComponentFinder { - builderFactory := func(n string) (Builder, error) { return new(MockBuilder), nil } - ppFactory := func(n string) (PostProcessor, error) { return new(TestPostProcessor), nil } - provFactory := func(n string) (Provisioner, error) { return new(MockProvisioner), nil } - return &ComponentFinder{ - Builder: builderFactory, - PostProcessor: ppFactory, - Provisioner: provFactory, - } -} - -func testEnvironment() Environment { - config := DefaultEnvironmentConfig() - config.Ui = &BasicUi{ - Reader: new(bytes.Buffer), - Writer: new(bytes.Buffer), - ErrorWriter: new(bytes.Buffer), - } - - env, err := NewEnvironment(config) - if err != nil { - panic(err) - } - - return env -} - -func TestEnvironment_DefaultConfig_Ui(t *testing.T) { - config := DefaultEnvironmentConfig() - if config.Ui == nil { - t.Fatal("config.Ui should not be nil") - } - - rwUi, ok := config.Ui.(*BasicUi) - if !ok { - t.Fatal("default UI should be BasicUi") - } - if rwUi.Writer != os.Stdout { - t.Fatal("default UI should go to stdout") - } - if rwUi.Reader != os.Stdin { - t.Fatal("default UI reader should go to stdin") - } -} - -func TestNewEnvironment_NoConfig(t *testing.T) { - env, err := NewEnvironment(nil) - if env != nil { - t.Fatal("env should be nil") - } - if err == nil { - t.Fatal("should have error") - } -} - -func TestEnvironment_NilComponents(t *testing.T) { - config := DefaultEnvironmentConfig() - config.Components = *new(ComponentFinder) - - env, err := NewEnvironment(config) - if err != nil { - t.Fatalf("err: %s", err) - } - - // All of these should not cause panics... so we don't assert - // anything but if there is a panic in the test then yeah, something - // went wrong. - env.Builder("foo") - env.Hook("foo") - env.PostProcessor("foo") - env.Provisioner("foo") -} - -func TestEnvironment_Builder(t *testing.T) { - builder := &MockBuilder{} - builders := make(map[string]Builder) - builders["foo"] = builder - - config := DefaultEnvironmentConfig() - config.Components.Builder = func(n string) (Builder, error) { return builders[n], nil } - - env, _ := NewEnvironment(config) - returnedBuilder, err := env.Builder("foo") - if err != nil { - t.Fatalf("err: %s", err) - } - if returnedBuilder != builder { - t.Fatalf("bad: %#v", returnedBuilder) - } -} - -func TestEnvironment_Builder_NilError(t *testing.T) { - config := DefaultEnvironmentConfig() - config.Components.Builder = func(n string) (Builder, error) { return nil, nil } - - env, _ := NewEnvironment(config) - returnedBuilder, err := env.Builder("foo") - if err == nil { - t.Fatal("should have error") - } - if returnedBuilder != nil { - t.Fatalf("bad: %#v", returnedBuilder) - } -} - -func TestEnvironment_Builder_Error(t *testing.T) { - config := DefaultEnvironmentConfig() - config.Components.Builder = func(n string) (Builder, error) { return nil, errors.New("foo") } - - env, _ := NewEnvironment(config) - returnedBuilder, err := env.Builder("foo") - if err == nil { - t.Fatal("should have error") - } - if err.Error() != "foo" { - t.Fatalf("bad err: %s", err) - } - if returnedBuilder != nil { - t.Fatalf("should be nil: %#v", returnedBuilder) - } -} - -func TestEnvironment_Cache(t *testing.T) { - config := DefaultEnvironmentConfig() - env, _ := NewEnvironment(config) - if env.Cache() == nil { - t.Fatal("cache should not be nil") - } -} - -func TestEnvironment_Hook(t *testing.T) { - hook := &MockHook{} - hooks := make(map[string]Hook) - hooks["foo"] = hook - - config := DefaultEnvironmentConfig() - config.Components.Hook = func(n string) (Hook, error) { return hooks[n], nil } - - env, _ := NewEnvironment(config) - returned, err := env.Hook("foo") - if err != nil { - t.Fatalf("err: %s", err) - } - if returned != hook { - t.Fatalf("bad: %#v", returned) - } -} - -func TestEnvironment_Hook_NilError(t *testing.T) { - config := DefaultEnvironmentConfig() - config.Components.Hook = func(n string) (Hook, error) { return nil, nil } - - env, _ := NewEnvironment(config) - returned, err := env.Hook("foo") - if err == nil { - t.Fatal("should have error") - } - if returned != nil { - t.Fatalf("bad: %#v", returned) - } -} - -func TestEnvironment_Hook_Error(t *testing.T) { - config := DefaultEnvironmentConfig() - config.Components.Hook = func(n string) (Hook, error) { return nil, errors.New("foo") } - - env, _ := NewEnvironment(config) - returned, err := env.Hook("foo") - if err == nil { - t.Fatal("should have error") - } - if err.Error() != "foo" { - t.Fatalf("err: %s", err) - } - if returned != nil { - t.Fatalf("bad: %#v", returned) - } -} - -func TestEnvironment_PostProcessor(t *testing.T) { - pp := &TestPostProcessor{} - pps := make(map[string]PostProcessor) - pps["foo"] = pp - - config := DefaultEnvironmentConfig() - config.Components.PostProcessor = func(n string) (PostProcessor, error) { return pps[n], nil } - - env, _ := NewEnvironment(config) - returned, err := env.PostProcessor("foo") - if err != nil { - t.Fatalf("err: %s", err) - } - if returned != pp { - t.Fatalf("bad: %#v", returned) - } -} - -func TestEnvironment_PostProcessor_NilError(t *testing.T) { - config := DefaultEnvironmentConfig() - config.Components.PostProcessor = func(n string) (PostProcessor, error) { return nil, nil } - - env, _ := NewEnvironment(config) - returned, err := env.PostProcessor("foo") - if err == nil { - t.Fatal("should have error") - } - if returned != nil { - t.Fatalf("bad: %#v", returned) - } -} - -func TestEnvironment_PostProcessor_Error(t *testing.T) { - config := DefaultEnvironmentConfig() - config.Components.PostProcessor = func(n string) (PostProcessor, error) { return nil, errors.New("foo") } - - env, _ := NewEnvironment(config) - returned, err := env.PostProcessor("foo") - if err == nil { - t.Fatal("should be an error") - } - if err.Error() != "foo" { - t.Fatalf("bad err: %s", err) - } - if returned != nil { - t.Fatalf("bad: %#v", returned) - } -} - -func TestEnvironmentProvisioner(t *testing.T) { - p := &MockProvisioner{} - ps := make(map[string]Provisioner) - ps["foo"] = p - - config := DefaultEnvironmentConfig() - config.Components.Provisioner = func(n string) (Provisioner, error) { return ps[n], nil } - - env, _ := NewEnvironment(config) - returned, err := env.Provisioner("foo") - if err != nil { - t.Fatalf("err: %s", err) - } - if returned != p { - t.Fatalf("bad: %#v", returned) - } -} - -func TestEnvironmentProvisioner_NilError(t *testing.T) { - config := DefaultEnvironmentConfig() - config.Components.Provisioner = func(n string) (Provisioner, error) { return nil, nil } - - env, _ := NewEnvironment(config) - returned, err := env.Provisioner("foo") - if err == nil { - t.Fatal("should have error") - } - if returned != nil { - t.Fatalf("bad: %#v", returned) - } -} - -func TestEnvironmentProvisioner_Error(t *testing.T) { - config := DefaultEnvironmentConfig() - config.Components.Provisioner = func(n string) (Provisioner, error) { - return nil, errors.New("foo") - } - - env, _ := NewEnvironment(config) - returned, err := env.Provisioner("foo") - if err == nil { - t.Fatal("should have error") - } - if err.Error() != "foo" { - t.Fatalf("err: %s", err) - } - if returned != nil { - t.Fatalf("bad: %#v", returned) - } -} - -func TestEnvironment_SettingUi(t *testing.T) { - ui := &BasicUi{ - Reader: new(bytes.Buffer), - Writer: new(bytes.Buffer), - } - - config := &EnvironmentConfig{} - config.Ui = ui - - env, _ := NewEnvironment(config) - - if env.Ui() != ui { - t.Fatalf("UI should be equal: %#v", env.Ui()) - } -} diff --git a/packer/rpc/client.go b/packer/rpc/client.go index 0e0140028..2f682f47a 100644 --- a/packer/rpc/client.go +++ b/packer/rpc/client.go @@ -100,13 +100,6 @@ func (c *Client) Communicator() packer.Communicator { } } -func (c *Client) Environment() packer.Environment { - return &Environment{ - client: c.client, - mux: c.mux, - } -} - func (c *Client) Hook() packer.Hook { return &hook{ client: c.client, diff --git a/packer/rpc/environment.go b/packer/rpc/environment.go deleted file mode 100644 index 4e2b73da8..000000000 --- a/packer/rpc/environment.go +++ /dev/null @@ -1,178 +0,0 @@ -package rpc - -import ( - "github.com/mitchellh/packer/packer" - "log" - "net/rpc" -) - -// A Environment is an implementation of the packer.Environment interface -// where the actual environment is executed over an RPC connection. -type Environment struct { - client *rpc.Client - mux *muxBroker -} - -// A EnvironmentServer wraps a packer.Environment and makes it exportable -// as part of a Golang RPC server. -type EnvironmentServer struct { - env packer.Environment - mux *muxBroker -} - -func (e *Environment) Builder(name string) (b packer.Builder, err error) { - var streamId uint32 - err = e.client.Call("Environment.Builder", name, &streamId) - if err != nil { - return - } - - client, err := newClientWithMux(e.mux, streamId) - if err != nil { - return nil, err - } - b = client.Builder() - return -} - -func (e *Environment) Cache() packer.Cache { - var streamId uint32 - if err := e.client.Call("Environment.Cache", new(interface{}), &streamId); err != nil { - panic(err) - } - - client, err := newClientWithMux(e.mux, streamId) - if err != nil { - log.Printf("[ERR] Error getting cache client: %s", err) - return nil - } - return client.Cache() -} - -func (e *Environment) Hook(name string) (h packer.Hook, err error) { - var streamId uint32 - err = e.client.Call("Environment.Hook", name, &streamId) - if err != nil { - return - } - - client, err := newClientWithMux(e.mux, streamId) - if err != nil { - return nil, err - } - return client.Hook(), nil -} - -func (e *Environment) PostProcessor(name string) (p packer.PostProcessor, err error) { - var streamId uint32 - err = e.client.Call("Environment.PostProcessor", name, &streamId) - if err != nil { - return - } - - client, err := newClientWithMux(e.mux, streamId) - if err != nil { - return nil, err - } - p = client.PostProcessor() - return -} - -func (e *Environment) Provisioner(name string) (p packer.Provisioner, err error) { - var streamId uint32 - err = e.client.Call("Environment.Provisioner", name, &streamId) - if err != nil { - return - } - - client, err := newClientWithMux(e.mux, streamId) - if err != nil { - return nil, err - } - p = client.Provisioner() - return -} - -func (e *Environment) Ui() packer.Ui { - var streamId uint32 - e.client.Call("Environment.Ui", new(interface{}), &streamId) - - client, err := newClientWithMux(e.mux, streamId) - if err != nil { - log.Printf("[ERR] Error connecting to Ui: %s", err) - return nil - } - return client.Ui() -} - -func (e *EnvironmentServer) Builder(name string, reply *uint32) error { - builder, err := e.env.Builder(name) - if err != nil { - return NewBasicError(err) - } - - *reply = e.mux.NextId() - server := newServerWithMux(e.mux, *reply) - server.RegisterBuilder(builder) - go server.Serve() - return nil -} - -func (e *EnvironmentServer) Cache(args *interface{}, reply *uint32) error { - cache := e.env.Cache() - - *reply = e.mux.NextId() - server := newServerWithMux(e.mux, *reply) - server.RegisterCache(cache) - go server.Serve() - return nil -} - -func (e *EnvironmentServer) Hook(name string, reply *uint32) error { - hook, err := e.env.Hook(name) - if err != nil { - return NewBasicError(err) - } - - *reply = e.mux.NextId() - server := newServerWithMux(e.mux, *reply) - server.RegisterHook(hook) - go server.Serve() - return nil -} - -func (e *EnvironmentServer) PostProcessor(name string, reply *uint32) error { - pp, err := e.env.PostProcessor(name) - if err != nil { - return NewBasicError(err) - } - - *reply = e.mux.NextId() - server := newServerWithMux(e.mux, *reply) - server.RegisterPostProcessor(pp) - go server.Serve() - return nil -} - -func (e *EnvironmentServer) Provisioner(name string, reply *uint32) error { - prov, err := e.env.Provisioner(name) - if err != nil { - return NewBasicError(err) - } - - *reply = e.mux.NextId() - server := newServerWithMux(e.mux, *reply) - server.RegisterProvisioner(prov) - go server.Serve() - return nil -} - -func (e *EnvironmentServer) Ui(args *interface{}, reply *uint32) error { - ui := e.env.Ui() - - *reply = e.mux.NextId() - server := newServerWithMux(e.mux, *reply) - server.RegisterUi(ui) - go server.Serve() - return nil -} diff --git a/packer/rpc/environment_test.go b/packer/rpc/environment_test.go deleted file mode 100644 index a5085d0ef..000000000 --- a/packer/rpc/environment_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package rpc - -import ( - "github.com/mitchellh/packer/packer" - "testing" -) - -var testEnvBuilder = &packer.MockBuilder{} -var testEnvCache = &testCache{} -var testEnvUi = &testUi{} - -type testEnvironment struct { - builderCalled bool - builderName string - cliCalled bool - cliArgs []string - hookCalled bool - hookName string - ppCalled bool - ppName string - provCalled bool - provName string - uiCalled bool -} - -func (e *testEnvironment) Builder(name string) (packer.Builder, error) { - e.builderCalled = true - e.builderName = name - return testEnvBuilder, nil -} - -func (e *testEnvironment) Cache() packer.Cache { - return testEnvCache -} - -func (e *testEnvironment) Cli(args []string) (int, error) { - e.cliCalled = true - e.cliArgs = args - return 42, nil -} - -func (e *testEnvironment) Hook(name string) (packer.Hook, error) { - e.hookCalled = true - e.hookName = name - return nil, nil -} - -func (e *testEnvironment) PostProcessor(name string) (packer.PostProcessor, error) { - e.ppCalled = true - e.ppName = name - return nil, nil -} - -func (e *testEnvironment) Provisioner(name string) (packer.Provisioner, error) { - e.provCalled = true - e.provName = name - return nil, nil -} - -func (e *testEnvironment) Ui() packer.Ui { - e.uiCalled = true - return testEnvUi -} - -func TestEnvironmentRPC(t *testing.T) { - // Create the interface to test - e := &testEnvironment{} - - // Start the server - client, server := testClientServer(t) - defer client.Close() - defer server.Close() - server.RegisterEnvironment(e) - eClient := client.Environment() - - // Test Builder - builder, _ := eClient.Builder("foo") - if !e.builderCalled { - t.Fatal("builder should be called") - } - if e.builderName != "foo" { - t.Fatalf("bad: %#v", e.builderName) - } - - builder.Prepare(nil) - if !testEnvBuilder.PrepareCalled { - t.Fatal("should be called") - } - - // Test Cache - cache := eClient.Cache() - cache.Lock("foo") - if !testEnvCache.lockCalled { - t.Fatal("should be called") - } - - // Test Provisioner - _, _ = eClient.Provisioner("foo") - if !e.provCalled { - t.Fatal("should be called") - } - if e.provName != "foo" { - t.Fatalf("bad: %s", e.provName) - } - - // Test Ui - ui := eClient.Ui() - if !e.uiCalled { - t.Fatal("should be called") - } - - // Test calls on the Ui - ui.Say("format") - if !testEnvUi.sayCalled { - t.Fatal("should be called") - } - if testEnvUi.sayMessage != "format" { - t.Fatalf("bad: %#v", testEnvUi.sayMessage) - } -} - -func TestEnvironment_ImplementsEnvironment(t *testing.T) { - var _ packer.Environment = new(Environment) -} diff --git a/packer/rpc/server.go b/packer/rpc/server.go index 1f3e7eef2..b6d17dacf 100644 --- a/packer/rpc/server.go +++ b/packer/rpc/server.go @@ -19,7 +19,6 @@ const ( DefaultCacheEndpoint = "Cache" DefaultCommandEndpoint = "Command" DefaultCommunicatorEndpoint = "Communicator" - DefaultEnvironmentEndpoint = "Environment" DefaultHookEndpoint = "Hook" DefaultPostProcessorEndpoint = "PostProcessor" DefaultProvisionerEndpoint = "Provisioner" @@ -95,13 +94,6 @@ func (s *Server) RegisterCommunicator(c packer.Communicator) { }) } -func (s *Server) RegisterEnvironment(b packer.Environment) { - s.server.RegisterName(DefaultEnvironmentEndpoint, &EnvironmentServer{ - env: b, - mux: s.mux, - }) -} - func (s *Server) RegisterHook(h packer.Hook) { s.server.RegisterName(DefaultHookEndpoint, &HookServer{ hook: h, diff --git a/signal.go b/signal.go index b198558d7..e63dd2fe5 100644 --- a/signal.go +++ b/signal.go @@ -10,7 +10,7 @@ import ( // Prepares the signal handlers so that we handle interrupts properly. // The signal handler exists in a goroutine. -func setupSignalHandlers(env packer.Environment) { +func setupSignalHandlers(ui packer.Ui) { ch := make(chan os.Signal, 1) signal.Notify(ch, os.Interrupt) @@ -20,13 +20,13 @@ func setupSignalHandlers(env packer.Environment) { <-ch log.Println("First interrupt. Ignoring to allow plugins to clean up.") - env.Ui().Error("Interrupt signal received. Cleaning up...") + ui.Error("Interrupt signal received. Cleaning up...") // Second interrupt. Go down hard. <-ch log.Println("Second interrupt. Exiting now.") - env.Ui().Error("Interrupt signal received twice. Forcefully exiting now.") + ui.Error("Interrupt signal received twice. Forcefully exiting now.") // Force kill all the plugins, but mark that we're killing them // first so that we don't get panics everywhere. From 547d9e759e5695496c0d2e17cfa6fe31918562e3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 May 2015 17:58:59 -0700 Subject: [PATCH 097/956] packer: test Build --- packer/core_test.go | 39 +++++++++++++++++++++++++++ packer/test-fixtures/build-basic.json | 5 ++++ packer/testing.go | 16 +++++++++++ 3 files changed, 60 insertions(+) create mode 100644 packer/test-fixtures/build-basic.json diff --git a/packer/core_test.go b/packer/core_test.go index d66a7786e..5935b1407 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -48,6 +48,36 @@ func TestCoreBuildNames(t *testing.T) { } } +func TestCoreBuild_basic(t *testing.T) { + config := TestCoreConfig(t) + testCoreTemplate(t, config, fixtureDir("build-basic.json")) + b := TestBuilder(t, config, "test") + core := TestCore(t, config) + + b.ArtifactId = "hello" + + build, err := core.Build("test") + if err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := build.Prepare(); err != nil { + t.Fatalf("err: %s", err) + } + + artifact, err := build.Run(nil, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(artifact) != 1 { + t.Fatalf("bad: %#v", artifact) + } + + if artifact[0].Id() != b.ArtifactId { + t.Fatalf("bad: %s", artifact[0].Id()) + } +} + func TestCoreValidate(t *testing.T) { cases := []struct { File string @@ -110,3 +140,12 @@ func testComponentFinder() *ComponentFinder { Provisioner: provFactory, } } + +func testCoreTemplate(t *testing.T, c *CoreConfig, p string) { + tpl, err := template.ParseFile(p) + if err != nil { + t.Fatalf("err: %s\n\n%s", p, err) + } + + c.Template = tpl +} diff --git a/packer/test-fixtures/build-basic.json b/packer/test-fixtures/build-basic.json new file mode 100644 index 000000000..d14f6cad3 --- /dev/null +++ b/packer/test-fixtures/build-basic.json @@ -0,0 +1,5 @@ +{ + "builders": [{ + "type": "test" + }] +} diff --git a/packer/testing.go b/packer/testing.go index 099119180..389b02f90 100644 --- a/packer/testing.go +++ b/packer/testing.go @@ -42,3 +42,19 @@ func TestCore(t *testing.T, c *CoreConfig) *Core { return core } + +// TestBuilder sets the builder with the name n to the component finder +// and returns the mock. +func TestBuilder(t *testing.T, c *CoreConfig, n string) *MockBuilder { + var b MockBuilder + + c.Components.Builder = func(actual string) (Builder, error) { + if actual != n { + return nil, nil + } + + return &b, nil + } + + return &b +} From c12072ecad14e0aee4c7524ca941f7afdb6d8e09 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 May 2015 18:15:07 -0700 Subject: [PATCH 098/956] packer: tests around interpolated names --- packer/core.go | 2 +- packer/core_test.go | 42 +++++++++++++++++++ .../build-basic-interpolated.json | 6 +++ 3 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 packer/test-fixtures/build-basic-interpolated.json diff --git a/packer/core.go b/packer/core.go index 4c4292ca7..cabf10359 100644 --- a/packer/core.go +++ b/packer/core.go @@ -103,7 +103,7 @@ func (c *Core) BuildNames() []string { // Build returns the Build object for the given name. func (c *Core) Build(n string) (Build, error) { // Setup the builder - configBuilder, ok := c.template.Builders[n] + configBuilder, ok := c.builds[n] if !ok { return nil, fmt.Errorf("no such build found: %s", n) } diff --git a/packer/core_test.go b/packer/core_test.go index 5935b1407..31ee34218 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -78,6 +78,48 @@ func TestCoreBuild_basic(t *testing.T) { } } +func TestCoreBuild_basicInterpolated(t *testing.T) { + config := TestCoreConfig(t) + testCoreTemplate(t, config, fixtureDir("build-basic-interpolated.json")) + b := TestBuilder(t, config, "test") + core := TestCore(t, config) + + b.ArtifactId = "hello" + + build, err := core.Build("NAME") + if err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := build.Prepare(); err != nil { + t.Fatalf("err: %s", err) + } + + artifact, err := build.Run(nil, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(artifact) != 1 { + t.Fatalf("bad: %#v", artifact) + } + + if artifact[0].Id() != b.ArtifactId { + t.Fatalf("bad: %s", artifact[0].Id()) + } +} + +func TestCoreBuild_nonExist(t *testing.T) { + config := TestCoreConfig(t) + testCoreTemplate(t, config, fixtureDir("build-basic.json")) + TestBuilder(t, config, "test") + core := TestCore(t, config) + + _, err := core.Build("nope") + if err == nil { + t.Fatal("should error") + } +} + func TestCoreValidate(t *testing.T) { cases := []struct { File string diff --git a/packer/test-fixtures/build-basic-interpolated.json b/packer/test-fixtures/build-basic-interpolated.json new file mode 100644 index 000000000..c70677c52 --- /dev/null +++ b/packer/test-fixtures/build-basic-interpolated.json @@ -0,0 +1,6 @@ +{ + "builders": [{ + "name": "{{upper `name`}}", + "type": "test" + }] +} From b5f4ffa56c3d369d22cf321ef0371fc2d03c67cb Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 09:07:16 -0700 Subject: [PATCH 099/956] template: OnlyExcept skipping --- template/template.go | 25 +++++++++++++++++++++++ template/template_test.go | 43 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/template/template.go b/template/template.go index 17d808029..52f50089f 100644 --- a/template/template.go +++ b/template/template.go @@ -125,6 +125,31 @@ func (t *Template) Validate() error { return err } +// Skip says whether or not to skip the build with the given name. +func (o *OnlyExcept) Skip(n string) bool { + if len(o.Only) > 0 { + for _, v := range o.Only { + if v == n { + return false + } + } + + return true + } + + if len(o.Except) > 0 { + for _, v := range o.Except { + if v == n { + return true + } + } + + return false + } + + return false +} + // Validate validates that the OnlyExcept settings are correct for a thing. func (o *OnlyExcept) Validate(t *Template) error { if len(o.Only) > 0 && len(o.Except) > 0 { diff --git a/template/template_test.go b/template/template_test.go index d14682728..6fa39ab88 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -92,3 +92,46 @@ func TestTemplateValidate(t *testing.T) { } } } + +func TestOnlyExceptSkip(t *testing.T) { + cases := []struct { + Only, Except []string + Input string + Result bool + }{ + { + []string{"foo"}, + nil, + "foo", + false, + }, + + { + nil, + []string{"foo"}, + "foo", + true, + }, + + { + nil, + nil, + "foo", + false, + }, + } + + for _, tc := range cases { + oe := &OnlyExcept{ + Only: tc.Only, + Except: tc.Except, + } + + actual := oe.Skip(tc.Input) + if actual != tc.Result { + t.Fatalf( + "bad: %#v\n\n%#v\n\n%#v\n\n%#v", + actual, tc.Only, tc.Except, tc.Input) + } + } +} From b25ae21e13996f9f2bf3a5b5f1558d30c156c64a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 09:14:29 -0700 Subject: [PATCH 100/956] packer: run provisioners --- packer/builder_mock.go | 6 ++++ packer/core.go | 44 +++++++++++++++++++++++++++- packer/core_test.go | 34 +++++++++++++++++++++ packer/test-fixtures/build-prov.json | 9 ++++++ packer/testing.go | 16 ++++++++++ 5 files changed, 108 insertions(+), 1 deletion(-) create mode 100644 packer/test-fixtures/build-prov.json diff --git a/packer/builder_mock.go b/packer/builder_mock.go index bfa0a0e47..9cb016963 100644 --- a/packer/builder_mock.go +++ b/packer/builder_mock.go @@ -42,6 +42,12 @@ func (tb *MockBuilder) Run(ui Ui, h Hook, c Cache) (Artifact, error) { return nil, nil } + if h != nil { + if err := h.Run(HookProvision, ui, nil, nil); err != nil { + return nil, err + } + } + return &MockArtifact{ IdValue: tb.ArtifactId, }, nil diff --git a/packer/core.go b/packer/core.go index cabf10359..4a0bf6a4d 100644 --- a/packer/core.go +++ b/packer/core.go @@ -118,13 +118,55 @@ func (c *Core) Build(n string) (Build, error) { "builder type not found: %s", configBuilder.Type) } - // TODO: template process name + // rawName is the uninterpolated name that we use for various lookups + rawName := configBuilder.Name + + // Setup the provisioners for this build + provisioners := make([]coreBuildProvisioner, 0, len(c.template.Provisioners)) + for _, rawP := range c.template.Provisioners { + // If we're skipping this, then ignore it + if rawP.Skip(rawName) { + continue + } + + // Get the provisioner + provisioner, err := c.components.Provisioner(rawP.Type) + if err != nil { + return nil, fmt.Errorf( + "error initializing provisioner '%s': %s", + rawP.Type, err) + } + if provisioner == nil { + return nil, fmt.Errorf( + "provisioner type not found: %s", rawP.Type) + } + + // Get the configuration + config := make([]interface{}, 1, 2) + config[0] = rawP.Config + + // TODO override + + // If we're pausing, we wrap the provisioner in a special pauser. + if rawP.PauseBefore > 0 { + provisioner = &PausedProvisioner{ + PauseBefore: rawP.PauseBefore, + Provisioner: provisioner, + } + } + + provisioners = append(provisioners, coreBuildProvisioner{ + provisioner: provisioner, + config: config, + }) + } return &coreBuild{ name: n, builder: builder, builderConfig: configBuilder.Config, builderType: configBuilder.Type, + provisioners: provisioners, variables: c.variables, }, nil } diff --git a/packer/core_test.go b/packer/core_test.go index 31ee34218..c8cdfbfb8 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -120,6 +120,40 @@ func TestCoreBuild_nonExist(t *testing.T) { } } +func TestCoreBuild_prov(t *testing.T) { + config := TestCoreConfig(t) + testCoreTemplate(t, config, fixtureDir("build-prov.json")) + b := TestBuilder(t, config, "test") + p := TestProvisioner(t, config, "test") + core := TestCore(t, config) + + b.ArtifactId = "hello" + + build, err := core.Build("test") + if err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := build.Prepare(); err != nil { + t.Fatalf("err: %s", err) + } + + artifact, err := build.Run(nil, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(artifact) != 1 { + t.Fatalf("bad: %#v", artifact) + } + + if artifact[0].Id() != b.ArtifactId { + t.Fatalf("bad: %s", artifact[0].Id()) + } + if !p.ProvCalled { + t.Fatal("provisioner not called") + } +} + func TestCoreValidate(t *testing.T) { cases := []struct { File string diff --git a/packer/test-fixtures/build-prov.json b/packer/test-fixtures/build-prov.json new file mode 100644 index 000000000..332c28b1d --- /dev/null +++ b/packer/test-fixtures/build-prov.json @@ -0,0 +1,9 @@ +{ + "builders": [{ + "type": "test" + }], + + "provisioners": [{ + "type": "test" + }] +} diff --git a/packer/testing.go b/packer/testing.go index 389b02f90..30b95c6e4 100644 --- a/packer/testing.go +++ b/packer/testing.go @@ -58,3 +58,19 @@ func TestBuilder(t *testing.T, c *CoreConfig, n string) *MockBuilder { return &b } + +// TestProvisioner sets the prov. with the name n to the component finder +// and returns the mock. +func TestProvisioner(t *testing.T, c *CoreConfig, n string) *MockProvisioner { + var b MockProvisioner + + c.Components.Provisioner = func(actual string) (Provisioner, error) { + if actual != n { + return nil, nil + } + + return &b, nil + } + + return &b +} From 85e615bbe22e22613807c764d1c0e0e637f3b28f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 09:16:39 -0700 Subject: [PATCH 101/956] packer: a lot more provisioner tests --- packer/core_test.go | 68 +++++++++++++++++++ .../build-prov-skip-include.json | 10 +++ packer/test-fixtures/build-prov-skip.json | 10 +++ 3 files changed, 88 insertions(+) create mode 100644 packer/test-fixtures/build-prov-skip-include.json create mode 100644 packer/test-fixtures/build-prov-skip.json diff --git a/packer/core_test.go b/packer/core_test.go index c8cdfbfb8..5ef96dc96 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -154,6 +154,74 @@ func TestCoreBuild_prov(t *testing.T) { } } +func TestCoreBuild_provSkip(t *testing.T) { + config := TestCoreConfig(t) + testCoreTemplate(t, config, fixtureDir("build-prov-skip.json")) + b := TestBuilder(t, config, "test") + p := TestProvisioner(t, config, "test") + core := TestCore(t, config) + + b.ArtifactId = "hello" + + build, err := core.Build("test") + if err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := build.Prepare(); err != nil { + t.Fatalf("err: %s", err) + } + + artifact, err := build.Run(nil, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(artifact) != 1 { + t.Fatalf("bad: %#v", artifact) + } + + if artifact[0].Id() != b.ArtifactId { + t.Fatalf("bad: %s", artifact[0].Id()) + } + if p.ProvCalled { + t.Fatal("provisioner should not be called") + } +} + +func TestCoreBuild_provSkipInclude(t *testing.T) { + config := TestCoreConfig(t) + testCoreTemplate(t, config, fixtureDir("build-prov-skip-include.json")) + b := TestBuilder(t, config, "test") + p := TestProvisioner(t, config, "test") + core := TestCore(t, config) + + b.ArtifactId = "hello" + + build, err := core.Build("test") + if err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := build.Prepare(); err != nil { + t.Fatalf("err: %s", err) + } + + artifact, err := build.Run(nil, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(artifact) != 1 { + t.Fatalf("bad: %#v", artifact) + } + + if artifact[0].Id() != b.ArtifactId { + t.Fatalf("bad: %s", artifact[0].Id()) + } + if !p.ProvCalled { + t.Fatal("provisioner should be called") + } +} + func TestCoreValidate(t *testing.T) { cases := []struct { File string diff --git a/packer/test-fixtures/build-prov-skip-include.json b/packer/test-fixtures/build-prov-skip-include.json new file mode 100644 index 000000000..2ba5e77de --- /dev/null +++ b/packer/test-fixtures/build-prov-skip-include.json @@ -0,0 +1,10 @@ +{ + "builders": [{ + "type": "test" + }], + + "provisioners": [{ + "type": "test", + "only": ["test"] + }] +} diff --git a/packer/test-fixtures/build-prov-skip.json b/packer/test-fixtures/build-prov-skip.json new file mode 100644 index 000000000..bd9fa5072 --- /dev/null +++ b/packer/test-fixtures/build-prov-skip.json @@ -0,0 +1,10 @@ +{ + "builders": [{ + "type": "test" + }], + + "provisioners": [{ + "type": "test", + "only": ["foo"] + }] +} From 26c7ac2d9046459b2ea89ff9874536228f9cdead Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 09:28:59 -0700 Subject: [PATCH 102/956] packer: post-processors --- packer/build_test.go | 30 ++++++++--------- packer/core.go | 53 ++++++++++++++++++++++++++---- packer/core_test.go | 38 ++++++++++++++++++++- packer/post_processor_mock.go | 33 +++++++++++++++++++ packer/post_processor_test.go | 24 -------------- packer/template_test.go | 4 +-- packer/test-fixtures/build-pp.json | 7 ++++ packer/testing.go | 35 +++++++++++++++----- 8 files changed, 167 insertions(+), 57 deletions(-) create mode 100644 packer/post_processor_mock.go delete mode 100644 packer/post_processor_test.go create mode 100644 packer/test-fixtures/build-pp.json diff --git a/packer/build_test.go b/packer/build_test.go index 5a073d39c..4f93e03a5 100644 --- a/packer/build_test.go +++ b/packer/build_test.go @@ -19,7 +19,7 @@ func testBuild() *coreBuild { }, postProcessors: [][]coreBuildPostProcessor{ []coreBuildPostProcessor{ - coreBuildPostProcessor{&TestPostProcessor{artifactId: "pp"}, "testPP", make(map[string]interface{}), true}, + coreBuildPostProcessor{&MockPostProcessor{ArtifactId: "pp"}, "testPP", make(map[string]interface{}), true}, }, }, variables: make(map[string]string), @@ -66,12 +66,12 @@ func TestBuild_Prepare(t *testing.T) { } corePP := build.postProcessors[0][0] - pp := corePP.processor.(*TestPostProcessor) - if !pp.configCalled { + pp := corePP.processor.(*MockPostProcessor) + if !pp.ConfigureCalled { t.Fatal("should be called") } - if !reflect.DeepEqual(pp.configVal, []interface{}{make(map[string]interface{}), packerConfig}) { - t.Fatalf("bad: %#v", pp.configVal) + if !reflect.DeepEqual(pp.ConfigureConfigs, []interface{}{make(map[string]interface{}), packerConfig}) { + t.Fatalf("bad: %#v", pp.ConfigureConfigs) } } @@ -208,8 +208,8 @@ func TestBuild_Run(t *testing.T) { } // Verify post-processor was run - pp := build.postProcessors[0][0].processor.(*TestPostProcessor) - if !pp.ppCalled { + pp := build.postProcessors[0][0].processor.(*MockPostProcessor) + if !pp.PostProcessCalled { t.Fatal("should be called") } } @@ -244,7 +244,7 @@ func TestBuild_Run_Artifacts(t *testing.T) { build = testBuild() build.postProcessors = [][]coreBuildPostProcessor{ []coreBuildPostProcessor{ - coreBuildPostProcessor{&TestPostProcessor{artifactId: "pp"}, "pp", make(map[string]interface{}), false}, + coreBuildPostProcessor{&MockPostProcessor{ArtifactId: "pp"}, "pp", make(map[string]interface{}), false}, }, } @@ -269,10 +269,10 @@ func TestBuild_Run_Artifacts(t *testing.T) { build = testBuild() build.postProcessors = [][]coreBuildPostProcessor{ []coreBuildPostProcessor{ - coreBuildPostProcessor{&TestPostProcessor{artifactId: "pp1"}, "pp", make(map[string]interface{}), false}, + coreBuildPostProcessor{&MockPostProcessor{ArtifactId: "pp1"}, "pp", make(map[string]interface{}), false}, }, []coreBuildPostProcessor{ - coreBuildPostProcessor{&TestPostProcessor{artifactId: "pp2"}, "pp", make(map[string]interface{}), true}, + coreBuildPostProcessor{&MockPostProcessor{ArtifactId: "pp2"}, "pp", make(map[string]interface{}), true}, }, } @@ -297,12 +297,12 @@ func TestBuild_Run_Artifacts(t *testing.T) { build = testBuild() build.postProcessors = [][]coreBuildPostProcessor{ []coreBuildPostProcessor{ - coreBuildPostProcessor{&TestPostProcessor{artifactId: "pp1a"}, "pp", make(map[string]interface{}), false}, - coreBuildPostProcessor{&TestPostProcessor{artifactId: "pp1b"}, "pp", make(map[string]interface{}), true}, + coreBuildPostProcessor{&MockPostProcessor{ArtifactId: "pp1a"}, "pp", make(map[string]interface{}), false}, + coreBuildPostProcessor{&MockPostProcessor{ArtifactId: "pp1b"}, "pp", make(map[string]interface{}), true}, }, []coreBuildPostProcessor{ - coreBuildPostProcessor{&TestPostProcessor{artifactId: "pp2a"}, "pp", make(map[string]interface{}), false}, - coreBuildPostProcessor{&TestPostProcessor{artifactId: "pp2b"}, "pp", make(map[string]interface{}), false}, + coreBuildPostProcessor{&MockPostProcessor{ArtifactId: "pp2a"}, "pp", make(map[string]interface{}), false}, + coreBuildPostProcessor{&MockPostProcessor{ArtifactId: "pp2b"}, "pp", make(map[string]interface{}), false}, }, } @@ -328,7 +328,7 @@ func TestBuild_Run_Artifacts(t *testing.T) { build.postProcessors = [][]coreBuildPostProcessor{ []coreBuildPostProcessor{ coreBuildPostProcessor{ - &TestPostProcessor{artifactId: "pp", keep: true}, "pp", make(map[string]interface{}), false, + &MockPostProcessor{ArtifactId: "pp", Keep: true}, "pp", make(map[string]interface{}), false, }, }, } diff --git a/packer/core.go b/packer/core.go index 4a0bf6a4d..a372c7ee8 100644 --- a/packer/core.go +++ b/packer/core.go @@ -161,13 +161,54 @@ func (c *Core) Build(n string) (Build, error) { }) } + // Setup the post-processors + postProcessors := make([][]coreBuildPostProcessor, 0, len(c.template.PostProcessors)) + for _, rawPs := range c.template.PostProcessors { + current := make([]coreBuildPostProcessor, 0, len(rawPs)) + for _, rawP := range rawPs { + // If we skip, ignore + if rawP.Skip(rawName) { + continue + } + + // Get the post-processor + postProcessor, err := c.components.PostProcessor(rawP.Type) + if err != nil { + return nil, fmt.Errorf( + "error initializing post-processor '%s': %s", + rawP.Type, err) + } + if postProcessor == nil { + return nil, fmt.Errorf( + "post-processor type not found: %s", rawP.Type) + } + + current = append(current, coreBuildPostProcessor{ + processor: postProcessor, + processorType: rawP.Type, + config: rawP.Config, + keepInputArtifact: rawP.KeepInputArtifact, + }) + } + + // If we have no post-processors in this chain, just continue. + if len(current) == 0 { + continue + } + + postProcessors = append(postProcessors, current) + } + + // TODO hooks one day + return &coreBuild{ - name: n, - builder: builder, - builderConfig: configBuilder.Config, - builderType: configBuilder.Type, - provisioners: provisioners, - variables: c.variables, + name: n, + builder: builder, + builderConfig: configBuilder.Config, + builderType: configBuilder.Type, + postProcessors: postProcessors, + provisioners: provisioners, + variables: c.variables, }, nil } diff --git a/packer/core_test.go b/packer/core_test.go index 5ef96dc96..712694766 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -222,6 +222,42 @@ func TestCoreBuild_provSkipInclude(t *testing.T) { } } +func TestCoreBuild_postProcess(t *testing.T) { + config := TestCoreConfig(t) + testCoreTemplate(t, config, fixtureDir("build-pp.json")) + b := TestBuilder(t, config, "test") + p := TestPostProcessor(t, config, "test") + core := TestCore(t, config) + ui := TestUi(t) + + b.ArtifactId = "hello" + p.ArtifactId = "goodbye" + + build, err := core.Build("test") + if err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := build.Prepare(); err != nil { + t.Fatalf("err: %s", err) + } + + artifact, err := build.Run(ui, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(artifact) != 1 { + t.Fatalf("bad: %#v", artifact) + } + + if artifact[0].Id() != p.ArtifactId { + t.Fatalf("bad: %s", artifact[0].Id()) + } + if p.PostProcessArtifact.Id() != b.ArtifactId { + t.Fatalf("bad: %s", p.PostProcessArtifact.Id()) + } +} + func TestCoreValidate(t *testing.T) { cases := []struct { File string @@ -276,7 +312,7 @@ func TestCoreValidate(t *testing.T) { func testComponentFinder() *ComponentFinder { builderFactory := func(n string) (Builder, error) { return new(MockBuilder), nil } - ppFactory := func(n string) (PostProcessor, error) { return new(TestPostProcessor), nil } + ppFactory := func(n string) (PostProcessor, error) { return new(MockPostProcessor), nil } provFactory := func(n string) (Provisioner, error) { return new(MockProvisioner), nil } return &ComponentFinder{ Builder: builderFactory, diff --git a/packer/post_processor_mock.go b/packer/post_processor_mock.go new file mode 100644 index 000000000..591e4b876 --- /dev/null +++ b/packer/post_processor_mock.go @@ -0,0 +1,33 @@ +package packer + +// MockPostProcessor is an implementation of PostProcessor that can be +// used for tests. +type MockPostProcessor struct { + ArtifactId string + Keep bool + Error error + + ConfigureCalled bool + ConfigureConfigs []interface{} + ConfigureError error + + PostProcessCalled bool + PostProcessArtifact Artifact + PostProcessUi Ui +} + +func (t *MockPostProcessor) Configure(configs ...interface{}) error { + t.ConfigureCalled = true + t.ConfigureConfigs = configs + return t.ConfigureError +} + +func (t *MockPostProcessor) PostProcess(ui Ui, a Artifact) (Artifact, bool, error) { + t.PostProcessCalled = true + t.PostProcessArtifact = a + t.PostProcessUi = ui + + return &MockArtifact{ + IdValue: t.ArtifactId, + }, t.Keep, t.Error +} diff --git a/packer/post_processor_test.go b/packer/post_processor_test.go deleted file mode 100644 index fa6dbdbf9..000000000 --- a/packer/post_processor_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package packer - -type TestPostProcessor struct { - artifactId string - keep bool - configCalled bool - configVal []interface{} - ppCalled bool - ppArtifact Artifact - ppUi Ui -} - -func (pp *TestPostProcessor) Configure(v ...interface{}) error { - pp.configCalled = true - pp.configVal = v - return nil -} - -func (pp *TestPostProcessor) PostProcess(ui Ui, a Artifact) (Artifact, bool, error) { - pp.ppCalled = true - pp.ppArtifact = a - pp.ppUi = ui - return &TestArtifact{id: pp.artifactId}, pp.keep, nil -} diff --git a/packer/template_test.go b/packer/template_test.go index b676672b1..2d0949376 100644 --- a/packer/template_test.go +++ b/packer/template_test.go @@ -11,7 +11,7 @@ import ( func testTemplateComponentFinder() *ComponentFinder { builder := new(MockBuilder) - pp := new(TestPostProcessor) + pp := new(MockPostProcessor) provisioner := &MockProvisioner{} builderMap := map[string]Builder{ @@ -1018,7 +1018,7 @@ func TestTemplate_Build(t *testing.T) { "test-prov": provisioner, } - pp := new(TestPostProcessor) + pp := new(MockPostProcessor) ppMap := map[string]PostProcessor{ "simple": pp, } diff --git a/packer/test-fixtures/build-pp.json b/packer/test-fixtures/build-pp.json new file mode 100644 index 000000000..e2b32cfac --- /dev/null +++ b/packer/test-fixtures/build-pp.json @@ -0,0 +1,7 @@ +{ + "builders": [{ + "type": "test" + }], + + "post-processors": ["test"] +} diff --git a/packer/testing.go b/packer/testing.go index 30b95c6e4..7e7ad0b53 100644 --- a/packer/testing.go +++ b/packer/testing.go @@ -8,14 +8,6 @@ import ( ) func TestCoreConfig(t *testing.T) *CoreConfig { - // Create a UI that is effectively /dev/null everywhere - var buf bytes.Buffer - ui := &BasicUi{ - Reader: &buf, - Writer: ioutil.Discard, - ErrorWriter: ioutil.Discard, - } - // Create some test components components := ComponentFinder{ Builder: func(n string) (Builder, error) { @@ -30,7 +22,7 @@ func TestCoreConfig(t *testing.T) *CoreConfig { return &CoreConfig{ Cache: &FileCache{CacheDir: os.TempDir()}, Components: components, - Ui: ui, + Ui: TestUi(t), } } @@ -43,6 +35,15 @@ func TestCore(t *testing.T, c *CoreConfig) *Core { return core } +func TestUi(t *testing.T) Ui { + var buf bytes.Buffer + return &BasicUi{ + Reader: &buf, + Writer: ioutil.Discard, + ErrorWriter: ioutil.Discard, + } +} + // TestBuilder sets the builder with the name n to the component finder // and returns the mock. func TestBuilder(t *testing.T, c *CoreConfig, n string) *MockBuilder { @@ -74,3 +75,19 @@ func TestProvisioner(t *testing.T, c *CoreConfig, n string) *MockProvisioner { return &b } + +// TestPostProcessor sets the prov. with the name n to the component finder +// and returns the mock. +func TestPostProcessor(t *testing.T, c *CoreConfig, n string) *MockPostProcessor { + var b MockPostProcessor + + c.Components.PostProcessor = func(actual string) (PostProcessor, error) { + if actual != n { + return nil, nil + } + + return &b, nil + } + + return &b +} From 2fb08be192fcd3cd73b8c9d55ec213ddd22478f1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 09:38:02 -0700 Subject: [PATCH 103/956] template: store Rawcontents --- template/parse.go | 9 +++++++++ template/parse_test.go | 3 +++ template/template.go | 3 +++ 3 files changed, 15 insertions(+) diff --git a/template/parse.go b/template/parse.go index a46adc594..5566e31cc 100644 --- a/template/parse.go +++ b/template/parse.go @@ -1,6 +1,7 @@ package template import ( + "bytes" "encoding/json" "fmt" "io" @@ -23,6 +24,8 @@ type rawTemplate struct { PostProcessors []interface{} `mapstructure:"post-processors"` Provisioners []map[string]interface{} Variables map[string]interface{} + + RawContents []byte } // Template returns the actual Template object built from this raw @@ -34,6 +37,7 @@ func (r *rawTemplate) Template() (*Template, error) { // Copy some literals result.Description = r.Description result.MinVersion = r.MinVersion + result.RawContents = r.RawContents // Gather the variables if len(r.Variables) > 0 { @@ -252,6 +256,10 @@ func (r *rawTemplate) parsePostProcessor( // Parse takes the given io.Reader and parses a Template object out of it. func Parse(r io.Reader) (*Template, error) { + // Create a buffer to copy what we read + var buf bytes.Buffer + r = io.TeeReader(r, &buf) + // First, decode the object into an interface{}. We do this instead of // the rawTemplate directly because we'd rather use mapstructure to // decode since it has richer errors. @@ -263,6 +271,7 @@ func Parse(r io.Reader) (*Template, error) { // Create our decoder var md mapstructure.Metadata var rawTpl rawTemplate + rawTpl.RawContents = buf.Bytes() decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ Metadata: &md, Result: &rawTpl, diff --git a/template/parse_test.go b/template/parse_test.go index 2cca68b88..d5d9fcd8e 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -276,6 +276,9 @@ func TestParse(t *testing.T) { t.Fatalf("err: %s", err) } + if tpl != nil { + tpl.RawContents = nil + } if !reflect.DeepEqual(tpl, tc.Result) { t.Fatalf("bad: %s\n\n%#v\n\n%#v", tc.File, tpl, tc.Result) } diff --git a/template/template.go b/template/template.go index 52f50089f..1ab3c668e 100644 --- a/template/template.go +++ b/template/template.go @@ -19,6 +19,9 @@ type Template struct { Provisioners []*Provisioner PostProcessors [][]*PostProcessor Push *Push + + // RawContents is just the raw data for this template + RawContents []byte } // Builder represents a builder configured in the template From 946f74588177fe605dde79c1601e5065dedc0f49 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 09:38:09 -0700 Subject: [PATCH 104/956] command: don't use packer.Template --- command/inspect.go | 11 +++++------ command/push.go | 10 +++++----- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/command/inspect.go b/command/inspect.go index 2574615fb..f564b2689 100644 --- a/command/inspect.go +++ b/command/inspect.go @@ -2,10 +2,10 @@ package command import ( "fmt" - "github.com/mitchellh/packer/packer" - "log" "sort" "strings" + + "github.com/mitchellh/packer/template" ) type InspectCommand struct { @@ -13,7 +13,7 @@ type InspectCommand struct { } func (c *InspectCommand) Run(args []string) int { - flags := c.Meta.FlagSet("build", FlagSetNone) + flags := c.Meta.FlagSet("inspect", FlagSetNone) flags.Usage = func() { c.Ui.Say(c.Help()) } if err := flags.Parse(args); err != nil { return 1 @@ -25,9 +25,8 @@ func (c *InspectCommand) Run(args []string) int { return 1 } - // Read the file into a byte array so that we can parse the template - log.Printf("Reading template: %#v", args[0]) - tpl, err := packer.ParseTemplateFile(args[0], nil) + // Parse the template + tpl, err := template.ParseFile(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err)) return 1 diff --git a/command/push.go b/command/push.go index ef0f42924..1c53d8508 100644 --- a/command/push.go +++ b/command/push.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/atlas-go/archive" "github.com/hashicorp/atlas-go/v1" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template" ) // archiveTemplateEntry is the name the template always takes within the slug. @@ -58,15 +58,15 @@ func (c *PushCommand) Run(args []string) int { "longer used. It will be removed in the next version.")) } - // Read the template - tpl, err := packer.ParseTemplateFile(args[0], nil) + // Parse the template + tpl, err := template.ParseFile(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err)) return 1 } // Validate some things - if tpl.Push.Name == "" { + if tpl.Push == nil || tpl.Push.Name == "" { c.Ui.Error(fmt.Sprintf( "The 'push' section must be specified in the template with\n" + "at least the 'name' option set.")) @@ -131,7 +131,7 @@ func (c *PushCommand) Run(args []string) int { } // Find the Atlas post-processors, if possible - var atlasPPs []packer.RawPostProcessorConfig + var atlasPPs []*template.PostProcessor for _, list := range tpl.PostProcessors { for _, pp := range list { if pp.Type == "atlas" { From 99a93009ed0ff4427b5c1a67aa729482a47e0c9f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 09:38:24 -0700 Subject: [PATCH 105/956] packer: remove Template --- packer/template.go | 734 --------------- packer/template_test.go | 1914 --------------------------------------- 2 files changed, 2648 deletions(-) delete mode 100644 packer/template.go delete mode 100644 packer/template_test.go diff --git a/packer/template.go b/packer/template.go deleted file mode 100644 index 717ae7682..000000000 --- a/packer/template.go +++ /dev/null @@ -1,734 +0,0 @@ -package packer - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "sort" - "text/template" - "time" - - "github.com/hashicorp/go-version" - "github.com/mitchellh/mapstructure" - jsonutil "github.com/mitchellh/packer/common/json" -) - -// The rawTemplate struct represents the structure of a template read -// directly from a file. The builders and other components map just to -// "interface{}" pointers since we actually don't know what their contents -// are until we read the "type" field. -type rawTemplate struct { - MinimumPackerVersion string `mapstructure:"min_packer_version"` - - Description string - Builders []map[string]interface{} - Hooks map[string][]string - Push PushConfig - PostProcessors []interface{} `mapstructure:"post-processors"` - Provisioners []map[string]interface{} - Variables map[string]interface{} -} - -// The Template struct represents a parsed template, parsed into the most -// completed form it can be without additional processing by the caller. -type Template struct { - RawContents []byte - Description string - Variables map[string]RawVariable - Builders map[string]RawBuilderConfig - Hooks map[string][]string - Push *PushConfig - PostProcessors [][]RawPostProcessorConfig - Provisioners []RawProvisionerConfig -} - -// PushConfig is the configuration structure for the push settings. -type PushConfig struct { - Name string - Address string - BaseDir string `mapstructure:"base_dir"` - Include []string - Exclude []string - Token string - VCS bool -} - -// The RawBuilderConfig struct represents a raw, unprocessed builder -// configuration. It contains the name of the builder as well as the -// raw configuration. If requested, this is used to compile into a full -// builder configuration at some point. -type RawBuilderConfig struct { - Name string - Type string - - RawConfig interface{} -} - -// RawPostProcessorConfig represents a raw, unprocessed post-processor -// configuration. It contains the type of the post processor as well as the -// raw configuration that is handed to the post-processor for it to process. -type RawPostProcessorConfig struct { - TemplateOnlyExcept `mapstructure:",squash"` - - Type string - KeepInputArtifact bool `mapstructure:"keep_input_artifact"` - RawConfig map[string]interface{} -} - -// RawProvisionerConfig represents a raw, unprocessed provisioner configuration. -// It contains the type of the provisioner as well as the raw configuration -// that is handed to the provisioner for it to process. -type RawProvisionerConfig struct { - TemplateOnlyExcept `mapstructure:",squash"` - - Type string - Override map[string]interface{} - RawPauseBefore string `mapstructure:"pause_before"` - - RawConfig interface{} - - pauseBefore time.Duration -} - -// RawVariable represents a variable configuration within a template. -type RawVariable struct { - Default string // The default value for this variable - Required bool // If the variable is required or not - Value string // The set value for this variable - HasValue bool // True if the value was set -} - -// ParseTemplate takes a byte slice and parses a Template from it, returning -// the template and possibly errors while loading the template. The error -// could potentially be a MultiError, representing multiple errors. Knowing -// and checking for this can be useful, if you wish to format it in a certain -// way. -// -// The second parameter, vars, are the values for a set of user variables. -func ParseTemplate(data []byte, vars map[string]string) (t *Template, err error) { - var rawTplInterface interface{} - err = jsonutil.Unmarshal(data, &rawTplInterface) - if err != nil { - return - } - - // Decode the raw template interface into the actual rawTemplate - // structure, checking for any extranneous keys along the way. - var md mapstructure.Metadata - var rawTpl rawTemplate - decoderConfig := &mapstructure.DecoderConfig{ - Metadata: &md, - Result: &rawTpl, - } - - decoder, err := mapstructure.NewDecoder(decoderConfig) - if err != nil { - return - } - - err = decoder.Decode(rawTplInterface) - if err != nil { - return - } - - if rawTpl.MinimumPackerVersion != "" { - // TODO: NOPE! Replace this - Version := "1.0" - vCur, err := version.NewVersion(Version) - if err != nil { - panic(err) - } - vReq, err := version.NewVersion(rawTpl.MinimumPackerVersion) - if err != nil { - return nil, fmt.Errorf( - "'minimum_packer_version' error: %s", err) - } - - if vCur.LessThan(vReq) { - return nil, fmt.Errorf( - "Template requires Packer version %s. "+ - "Running version is %s.", - vReq, vCur) - } - } - - errors := make([]error, 0) - - if len(md.Unused) > 0 { - sort.Strings(md.Unused) - for _, unused := range md.Unused { - errors = append( - errors, fmt.Errorf("Unknown root level key in template: '%s'", unused)) - } - } - - t = &Template{} - t.RawContents = data - t.Description = rawTpl.Description - t.Variables = make(map[string]RawVariable) - t.Builders = make(map[string]RawBuilderConfig) - t.Hooks = rawTpl.Hooks - t.Push = &rawTpl.Push - t.PostProcessors = make([][]RawPostProcessorConfig, len(rawTpl.PostProcessors)) - t.Provisioners = make([]RawProvisionerConfig, len(rawTpl.Provisioners)) - - // Gather all the variables - for k, v := range rawTpl.Variables { - var variable RawVariable - variable.Required = v == nil - - // Create a new mapstructure decoder in order to decode the default - // value since this is the only value in the regular template that - // can be weakly typed. - decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - Result: &variable.Default, - WeaklyTypedInput: true, - }) - if err != nil { - // This should never happen. - panic(err) - } - - err = decoder.Decode(v) - if err != nil { - errors = append(errors, - fmt.Errorf("Error decoding default value for user var '%s': %s", k, err)) - continue - } - - // Set the value of this variable if we have it - if val, ok := vars[k]; ok { - variable.HasValue = true - variable.Value = val - delete(vars, k) - } - - t.Variables[k] = variable - } - - // Gather all the builders - for i, v := range rawTpl.Builders { - var raw RawBuilderConfig - if err := mapstructure.Decode(v, &raw); err != nil { - if merr, ok := err.(*mapstructure.Error); ok { - for _, err := range merr.Errors { - errors = append(errors, fmt.Errorf("builder %d: %s", i+1, err)) - } - } else { - errors = append(errors, fmt.Errorf("builder %d: %s", i+1, err)) - } - - continue - } - - if raw.Type == "" { - errors = append(errors, fmt.Errorf("builder %d: missing 'type'", i+1)) - continue - } - - // Attempt to get the name of the builder. If the "name" key - // missing, use the "type" field, which is guaranteed to exist - // at this point. - if raw.Name == "" { - raw.Name = raw.Type - } - - // Check if we already have a builder with this name and error if so - if _, ok := t.Builders[raw.Name]; ok { - errors = append(errors, fmt.Errorf("builder with name '%s' already exists", raw.Name)) - continue - } - - // Now that we have the name, remove it from the config - as the builder - // itself doesn't know about, and it will cause a validation error. - delete(v, "name") - - raw.RawConfig = v - - t.Builders[raw.Name] = raw - } - - // Gather all the post-processors. This is a complicated process since there - // are actually three different formats that the user can use to define - // a post-processor. - for i, rawV := range rawTpl.PostProcessors { - rawPP, err := parsePostProcessor(i, rawV) - if err != nil { - errors = append(errors, err...) - continue - } - - configs := make([]RawPostProcessorConfig, 0, len(rawPP)) - for j, pp := range rawPP { - var config RawPostProcessorConfig - if err := mapstructure.Decode(pp, &config); err != nil { - if merr, ok := err.(*mapstructure.Error); ok { - for _, err := range merr.Errors { - errors = append(errors, - fmt.Errorf("Post-processor #%d.%d: %s", i+1, j+1, err)) - } - } else { - errors = append(errors, - fmt.Errorf("Post-processor %d.%d: %s", i+1, j+1, err)) - } - - continue - } - - if config.Type == "" { - errors = append(errors, - fmt.Errorf("Post-processor %d.%d: missing 'type'", i+1, j+1)) - continue - } - - // Remove the input keep_input_artifact option - config.TemplateOnlyExcept.Prune(pp) - delete(pp, "keep_input_artifact") - - // Verify that the only settings are good - if errs := config.TemplateOnlyExcept.Validate(t.Builders); len(errs) > 0 { - for _, err := range errs { - errors = append(errors, - fmt.Errorf("Post-processor %d.%d: %s", i+1, j+1, err)) - } - - continue - } - - config.RawConfig = pp - - // Add it to the list of configs - configs = append(configs, config) - } - - t.PostProcessors[i] = configs - } - - // Gather all the provisioners - for i, v := range rawTpl.Provisioners { - raw := &t.Provisioners[i] - if err := mapstructure.Decode(v, raw); err != nil { - if merr, ok := err.(*mapstructure.Error); ok { - for _, err := range merr.Errors { - errors = append(errors, fmt.Errorf("provisioner %d: %s", i+1, err)) - } - } else { - errors = append(errors, fmt.Errorf("provisioner %d: %s", i+1, err)) - } - - continue - } - - if raw.Type == "" { - errors = append(errors, fmt.Errorf("provisioner %d: missing 'type'", i+1)) - continue - } - - // Delete the keys that we used - raw.TemplateOnlyExcept.Prune(v) - delete(v, "override") - - // Verify that the override keys exist... - for name, _ := range raw.Override { - if _, ok := t.Builders[name]; !ok { - errors = append( - errors, fmt.Errorf("provisioner %d: build '%s' not found for override", i+1, name)) - } - } - - // Verify that the only settings are good - if errs := raw.TemplateOnlyExcept.Validate(t.Builders); len(errs) > 0 { - for _, err := range errs { - errors = append(errors, - fmt.Errorf("provisioner %d: %s", i+1, err)) - } - } - - // Setup the pause settings - if raw.RawPauseBefore != "" { - duration, err := time.ParseDuration(raw.RawPauseBefore) - if err != nil { - errors = append( - errors, fmt.Errorf( - "provisioner %d: pause_before invalid: %s", - i+1, err)) - } - - raw.pauseBefore = duration - } - - // Remove the pause_before setting if it is there so that we don't - // get template validation errors later. - delete(v, "pause_before") - - raw.RawConfig = v - } - - if len(t.Builders) == 0 { - errors = append(errors, fmt.Errorf("No builders are defined in the template.")) - } - - // Verify that all the variable sets were for real variables. - for k, _ := range vars { - errors = append(errors, fmt.Errorf("Unknown user variables: %s", k)) - } - - // If there were errors, we put it into a MultiError and return - if len(errors) > 0 { - err = &MultiError{errors} - t = nil - return - } - - return -} - -// ParseTemplateFile takes the given template file and parses it into -// a single template. -func ParseTemplateFile(path string, vars map[string]string) (*Template, error) { - var data []byte - - if path == "-" { - // Read from stdin... - buf := new(bytes.Buffer) - _, err := io.Copy(buf, os.Stdin) - if err != nil { - return nil, err - } - - data = buf.Bytes() - } else { - var err error - data, err = ioutil.ReadFile(path) - if err != nil { - return nil, err - } - } - - return ParseTemplate(data, vars) -} - -func parsePostProcessor(i int, rawV interface{}) (result []map[string]interface{}, errors []error) { - switch v := rawV.(type) { - case string: - result = []map[string]interface{}{ - {"type": v}, - } - case map[string]interface{}: - result = []map[string]interface{}{v} - case []interface{}: - result = make([]map[string]interface{}, len(v)) - errors = make([]error, 0) - for j, innerRawV := range v { - switch innerV := innerRawV.(type) { - case string: - result[j] = map[string]interface{}{"type": innerV} - case map[string]interface{}: - result[j] = innerV - case []interface{}: - errors = append( - errors, - fmt.Errorf("Post-processor %d.%d: sequences not allowed to be nested in sequences", i+1, j+1)) - default: - errors = append(errors, fmt.Errorf("Post-processor %d.%d is in a bad format.", i+1, j+1)) - } - } - - if len(errors) == 0 { - errors = nil - } - default: - result = nil - errors = []error{fmt.Errorf("Post-processor %d is in a bad format.", i+1)} - } - - return -} - -// BuildNames returns a slice of the available names of builds that -// this template represents. -func (t *Template) BuildNames() []string { - names := make([]string, 0, len(t.Builders)) - for name, _ := range t.Builders { - names = append(names, name) - } - - return names -} - -// Build returns a Build for the given name. -// -// If the build does not exist as part of this template, an error is -// returned. -func (t *Template) Build(name string, components *ComponentFinder) (b Build, err error) { - // Setup the Builder - builderConfig, ok := t.Builders[name] - if !ok { - err = fmt.Errorf("No such build found in template: %s", name) - return - } - - // We panic if there is no builder function because this is really - // an internal bug that always needs to be fixed, not an error. - if components.Builder == nil { - panic("no builder function") - } - - // Panic if there are provisioners on the template but no provisioner - // component finder. This is always an internal error, so we panic. - if len(t.Provisioners) > 0 && components.Provisioner == nil { - panic("no provisioner function") - } - - builder, err := components.Builder(builderConfig.Type) - if err != nil { - return - } - - if builder == nil { - err = fmt.Errorf("Builder type not found: %s", builderConfig.Type) - return - } - - // Process the name - tpl, variables, err := t.NewConfigTemplate() - if err != nil { - return nil, err - } - - rawName := name - name, err = tpl.Process(name, nil) - if err != nil { - return nil, err - } - - // Gather the Hooks - hooks := make(map[string][]Hook) - for tplEvent, tplHooks := range t.Hooks { - curHooks := make([]Hook, 0, len(tplHooks)) - - for _, hookName := range tplHooks { - var hook Hook - hook, err = components.Hook(hookName) - if err != nil { - return - } - - if hook == nil { - err = fmt.Errorf("Hook not found: %s", hookName) - return - } - - curHooks = append(curHooks, hook) - } - - hooks[tplEvent] = curHooks - } - - // Prepare the post-processors - postProcessors := make([][]coreBuildPostProcessor, 0, len(t.PostProcessors)) - for _, rawPPs := range t.PostProcessors { - current := make([]coreBuildPostProcessor, 0, len(rawPPs)) - for _, rawPP := range rawPPs { - if rawPP.TemplateOnlyExcept.Skip(rawName) { - continue - } - - pp, err := components.PostProcessor(rawPP.Type) - if err != nil { - return nil, err - } - - if pp == nil { - return nil, fmt.Errorf("PostProcessor type not found: %s", rawPP.Type) - } - - current = append(current, coreBuildPostProcessor{ - processor: pp, - processorType: rawPP.Type, - config: rawPP.RawConfig, - keepInputArtifact: rawPP.KeepInputArtifact, - }) - } - - // If we have no post-processors in this chain, just continue. - // This can happen if the post-processors skip certain builds. - if len(current) == 0 { - continue - } - - postProcessors = append(postProcessors, current) - } - - // Prepare the provisioners - provisioners := make([]coreBuildProvisioner, 0, len(t.Provisioners)) - for _, rawProvisioner := range t.Provisioners { - if rawProvisioner.TemplateOnlyExcept.Skip(rawName) { - continue - } - - var provisioner Provisioner - provisioner, err = components.Provisioner(rawProvisioner.Type) - if err != nil { - return - } - - if provisioner == nil { - err = fmt.Errorf("Provisioner type not found: %s", rawProvisioner.Type) - return - } - - configs := make([]interface{}, 1, 2) - configs[0] = rawProvisioner.RawConfig - - if rawProvisioner.Override != nil { - if override, ok := rawProvisioner.Override[name]; ok { - configs = append(configs, override) - } - } - - if rawProvisioner.pauseBefore > 0 { - provisioner = &PausedProvisioner{ - PauseBefore: rawProvisioner.pauseBefore, - Provisioner: provisioner, - } - } - - coreProv := coreBuildProvisioner{provisioner, configs} - provisioners = append(provisioners, coreProv) - } - - b = &coreBuild{ - name: name, - builder: builder, - builderConfig: builderConfig.RawConfig, - builderType: builderConfig.Type, - hooks: hooks, - postProcessors: postProcessors, - provisioners: provisioners, - variables: variables, - } - - return -} - -//Build a ConfigTemplate object populated by the values within a -//parsed template -func (t *Template) NewConfigTemplate() (c *ConfigTemplate, variables map[string]string, err error) { - - // Prepare the variable template processor, which is a bit unique - // because we don't allow user variable usage and we add a function - // to read from the environment. - varTpl, err := NewConfigTemplate() - if err != nil { - return nil, nil, err - } - varTpl.Funcs(template.FuncMap{ - "env": templateEnv, - "user": templateDisableUser, - }) - - // Prepare the variables - var varErrors []error - variables = make(map[string]string) - for k, v := range t.Variables { - if v.Required && !v.HasValue { - varErrors = append(varErrors, - fmt.Errorf("Required user variable '%s' not set", k)) - } - - var val string - if v.HasValue { - val = v.Value - } else { - val, err = varTpl.Process(v.Default, nil) - if err != nil { - varErrors = append(varErrors, - fmt.Errorf("Error processing user variable '%s': %s'", k, err)) - } - } - - variables[k] = val - } - - if len(varErrors) > 0 { - return nil, variables, &MultiError{varErrors} - } - - // Process the name - tpl, err := NewConfigTemplate() - if err != nil { - return nil, variables, err - } - tpl.UserVars = variables - - return tpl, variables, nil -} - -// TemplateOnlyExcept contains the logic required for "only" and "except" -// meta-parameters. -type TemplateOnlyExcept struct { - Only []string - Except []string -} - -// Prune will prune out the used values from the raw map. -func (t *TemplateOnlyExcept) Prune(raw map[string]interface{}) { - delete(raw, "except") - delete(raw, "only") -} - -// Skip tests if we should skip putting this item onto a build. -func (t *TemplateOnlyExcept) Skip(name string) bool { - if len(t.Only) > 0 { - onlyFound := false - for _, n := range t.Only { - if n == name { - onlyFound = true - break - } - } - - if !onlyFound { - // Skip this provisioner - return true - } - } - - // If the name is in the except list, then skip that - for _, n := range t.Except { - if n == name { - return true - } - } - - return false -} - -// Validates the only/except parameters. -func (t *TemplateOnlyExcept) Validate(b map[string]RawBuilderConfig) (e []error) { - if len(t.Only) > 0 && len(t.Except) > 0 { - e = append(e, - fmt.Errorf("Only one of 'only' or 'except' may be specified.")) - } - - if len(t.Only) > 0 { - for _, n := range t.Only { - if _, ok := b[n]; !ok { - e = append(e, - fmt.Errorf("'only' specified builder '%s' not found", n)) - } - } - } - - for _, n := range t.Except { - if _, ok := b[n]; !ok { - e = append(e, - fmt.Errorf("'except' specified builder '%s' not found", n)) - } - } - - return -} diff --git a/packer/template_test.go b/packer/template_test.go deleted file mode 100644 index 2d0949376..000000000 --- a/packer/template_test.go +++ /dev/null @@ -1,1914 +0,0 @@ -package packer - -import ( - "io/ioutil" - "os" - "reflect" - "sort" - "testing" - "time" -) - -func testTemplateComponentFinder() *ComponentFinder { - builder := new(MockBuilder) - pp := new(MockPostProcessor) - provisioner := &MockProvisioner{} - - builderMap := map[string]Builder{ - "test-builder": builder, - } - - ppMap := map[string]PostProcessor{ - "test-pp": pp, - } - - provisionerMap := map[string]Provisioner{ - "test-prov": provisioner, - } - - builderFactory := func(n string) (Builder, error) { return builderMap[n], nil } - ppFactory := func(n string) (PostProcessor, error) { return ppMap[n], nil } - provFactory := func(n string) (Provisioner, error) { return provisionerMap[n], nil } - return &ComponentFinder{ - Builder: builderFactory, - PostProcessor: ppFactory, - Provisioner: provFactory, - } -} - -func TestParseTemplateFile_basic(t *testing.T) { - data := ` - { - "builders": [{"type": "something"}] - } - ` - - tf, err := ioutil.TempFile("", "packer") - if err != nil { - t.Fatalf("err: %s", err) - } - tf.Write([]byte(data)) - tf.Close() - - result, err := ParseTemplateFile(tf.Name(), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if len(result.Builders) != 1 { - t.Fatalf("bad: %#v", result.Builders) - } - - if string(result.RawContents) != data { - t.Fatalf("expected %q to be %q", result.RawContents, data) - } -} - -func TestParseTemplateFile_minPackerVersionBad(t *testing.T) { - data := ` - { - "min_packer_version": "27.0.0", - "builders": [{"type": "something"}] - } - ` - - tf, err := ioutil.TempFile("", "packer") - if err != nil { - t.Fatalf("err: %s", err) - } - tf.Write([]byte(data)) - tf.Close() - - _, err = ParseTemplateFile(tf.Name(), nil) - if err == nil { - t.Fatal("expects error") - } -} - -func TestParseTemplateFile_minPackerVersionFormat(t *testing.T) { - data := ` - { - "min_packer_version": "NOPE NOPE NOPE", - "builders": [{"type": "something"}] - } - ` - - tf, err := ioutil.TempFile("", "packer") - if err != nil { - t.Fatalf("err: %s", err) - } - tf.Write([]byte(data)) - tf.Close() - - _, err = ParseTemplateFile(tf.Name(), nil) - if err == nil { - t.Fatal("expects error") - } -} - -func TestParseTemplateFile_minPackerVersionGood(t *testing.T) { - data := ` - { - "min_packer_version": "0.1", - "builders": [{"type": "something"}] - } - ` - - tf, err := ioutil.TempFile("", "packer") - if err != nil { - t.Fatalf("err: %s", err) - } - tf.Write([]byte(data)) - tf.Close() - - _, err = ParseTemplateFile(tf.Name(), nil) - if err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestParseTemplateFile_stdin(t *testing.T) { - data := ` - { - "builders": [{"type": "something"}] - } - ` - - tf, err := ioutil.TempFile("", "packer") - if err != nil { - t.Fatalf("err: %s", err) - } - defer tf.Close() - tf.Write([]byte(data)) - - // Sync and seek to the beginning so that we can re-read the contents - tf.Sync() - tf.Seek(0, 0) - - // Set stdin to something we control - oldStdin := os.Stdin - defer func() { os.Stdin = oldStdin }() - os.Stdin = tf - - result, err := ParseTemplateFile("-", nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if len(result.Builders) != 1 { - t.Fatalf("bad: %#v", result.Builders) - } -} - -func TestParseTemplate_Basic(t *testing.T) { - data := ` - { - "builders": [{"type": "something"}] - } - ` - - result, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if result == nil { - t.Fatal("should have result") - } - if len(result.Builders) != 1 { - t.Fatalf("bad: %#v", result.Builders) - } -} - -func TestParseTemplate_Description(t *testing.T) { - data := ` - { - "description": "Foo", - "builders": [{"type": "something"}] - } - ` - - result, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if result == nil { - t.Fatal("should have result") - } - if result.Description != "Foo" { - t.Fatalf("bad: %#v", result.Description) - } -} - -func TestParseTemplate_Invalid(t *testing.T) { - // Note there is an extra comma below for a purposeful - // syntax error in the JSON. - data := ` - { - "builders": [], - } - ` - - result, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("shold have error") - } - if result != nil { - t.Fatal("should not have result") - } -} - -func TestParseTemplate_InvalidKeys(t *testing.T) { - // Note there is an extra comma below for a purposeful - // syntax error in the JSON. - data := ` - { - "builders": [{"type": "foo"}], - "what is this": "" - } - ` - - result, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("should have error") - } - if result != nil { - t.Fatal("should not have result") - } -} - -func TestParseTemplate_BuilderWithoutType(t *testing.T) { - data := ` - { - "builders": [{}] - } - ` - - _, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("should have error") - } -} - -func TestParseTemplate_BuilderWithNonStringType(t *testing.T) { - data := ` - { - "builders": [{ - "type": 42 - }] - } - ` - - _, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("should have error") - } -} - -func TestParseTemplate_BuilderWithoutName(t *testing.T) { - data := ` - { - "builders": [ - { - "type": "amazon-ebs" - } - ] - } - ` - - result, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if result == nil { - t.Fatal("should have result") - } - if len(result.Builders) != 1 { - t.Fatalf("bad: %#v", result.Builders) - } - - builder, ok := result.Builders["amazon-ebs"] - if !ok { - t.Fatal("should be ok") - } - if builder.Type != "amazon-ebs" { - t.Fatalf("bad: %#v", builder.Type) - } -} - -func TestParseTemplate_BuilderWithName(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "bob", - "type": "amazon-ebs" - } - ] - } - ` - - result, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if result == nil { - t.Fatal("should have result") - } - if len(result.Builders) != 1 { - t.Fatalf("bad: %#v", result.Builders) - } - - builder, ok := result.Builders["bob"] - if !ok { - t.Fatal("should be ok") - } - if builder.Type != "amazon-ebs" { - t.Fatalf("bad: %#v", builder.Type) - } - - RawConfig := builder.RawConfig - if RawConfig == nil { - t.Fatal("missing builder raw config") - } - - expected := map[string]interface{}{ - "type": "amazon-ebs", - } - - if !reflect.DeepEqual(RawConfig, expected) { - t.Fatalf("bad raw: %#v", RawConfig) - } -} - -func TestParseTemplate_BuilderWithConflictingName(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "bob", - "type": "amazon-ebs" - }, - { - "name": "bob", - "type": "foo", - } - ] - } - ` - - _, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("should have error") - } -} - -func TestParseTemplate_Hooks(t *testing.T) { - data := ` - { - - "builders": [{"type": "foo"}], - - "hooks": { - "event": ["foo", "bar"] - } - } - ` - - result, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if result == nil { - t.Fatal("should have result") - } - if len(result.Hooks) != 1 { - t.Fatalf("bad: %#v", result.Hooks) - } - - hooks, ok := result.Hooks["event"] - if !ok { - t.Fatal("should be okay") - } - if !reflect.DeepEqual(hooks, []string{"foo", "bar"}) { - t.Fatalf("bad: %#v", hooks) - } -} - -func TestParseTemplate_PostProcessors(t *testing.T) { - data := ` - { - "builders": [{"type": "foo"}], - - "post-processors": [ - "simple", - - { "type": "detailed" }, - - [ "foo", { "type": "bar" } ] - ] - } - ` - - tpl, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("error parsing: %s", err) - } - - if len(tpl.PostProcessors) != 3 { - t.Fatalf("bad number of post-processors: %d", len(tpl.PostProcessors)) - } - - pp := tpl.PostProcessors[0] - if len(pp) != 1 { - t.Fatalf("wrong number of configs in simple: %d", len(pp)) - } - - if pp[0].Type != "simple" { - t.Fatalf("wrong type for simple: %s", pp[0].Type) - } - - pp = tpl.PostProcessors[1] - if len(pp) != 1 { - t.Fatalf("wrong number of configs in detailed: %d", len(pp)) - } - - if pp[0].Type != "detailed" { - t.Fatalf("wrong type for detailed: %s", pp[0].Type) - } - - pp = tpl.PostProcessors[2] - if len(pp) != 2 { - t.Fatalf("wrong number of configs for sequence: %d", len(pp)) - } - - if pp[0].Type != "foo" { - t.Fatalf("wrong type for sequence 0: %s", pp[0].Type) - } - - if pp[1].Type != "bar" { - t.Fatalf("wrong type for sequence 1: %s", pp[1].Type) - } -} - -func TestParseTemplate_ProvisionerWithoutType(t *testing.T) { - data := ` - { - "builders": [{"type": "foo"}], - - "provisioners": [{}] - } - ` - - _, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("err should not be nil") - } -} - -func TestParseTemplate_ProvisionerWithNonStringType(t *testing.T) { - data := ` - { - "builders": [{"type": "foo"}], - - "provisioners": [{ - "type": 42 - }] - } - ` - - _, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("should have error") - } -} - -func TestParseTemplate_Provisioners(t *testing.T) { - data := ` - { - "builders": [{"type": "foo"}], - - "provisioners": [ - { - "type": "shell" - } - ] - } - ` - - result, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if result == nil { - t.Fatal("should have result") - } - if len(result.Provisioners) != 1 { - t.Fatalf("bad: %#v", result.Provisioners) - } - if result.Provisioners[0].Type != "shell" { - t.Fatalf("bad: %#v", result.Provisioners[0].Type) - } - if result.Provisioners[0].RawConfig == nil { - t.Fatal("should have raw config") - } -} - -func TestParseTemplate_ProvisionerPauseBefore(t *testing.T) { - data := ` - { - "builders": [{"type": "foo"}], - - "provisioners": [ - { - "type": "shell", - "pause_before": "10s" - } - ] - } - ` - - result, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if result == nil { - t.Fatal("should have result") - } - if len(result.Provisioners) != 1 { - t.Fatalf("bad: %#v", result.Provisioners) - } - if result.Provisioners[0].Type != "shell" { - t.Fatalf("bad: %#v", result.Provisioners[0].Type) - } - if result.Provisioners[0].pauseBefore != 10*time.Second { - t.Fatalf("bad: %s", result.Provisioners[0].pauseBefore) - } -} - -func TestParseTemplateFile_push(t *testing.T) { - data := ` - { - "builders": [{"type": "something"}], - - "push": { - "name": "hello", - "include": ["one"], - "exclude": ["two"] - } - } - ` - - tf, err := ioutil.TempFile("", "packer") - if err != nil { - t.Fatalf("err: %s", err) - } - tf.Write([]byte(data)) - tf.Close() - - result, err := ParseTemplateFile(tf.Name(), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := &PushConfig{ - Name: "hello", - Include: []string{"one"}, - Exclude: []string{"two"}, - } - if !reflect.DeepEqual(result.Push, expected) { - t.Fatalf("bad: %#v", result.Push) - } -} - -func TestParseTemplate_Variables(t *testing.T) { - data := ` - { - "variables": { - "foo": "bar", - "bar": null, - "baz": 27 - }, - - "builders": [{"type": "something"}] - } - ` - - result, err := ParseTemplate([]byte(data), map[string]string{ - "bar": "bar", - }) - if err != nil { - t.Fatalf("err: %s", err) - } - - if result.Variables == nil || len(result.Variables) != 3 { - t.Fatalf("bad vars: %#v", result.Variables) - } - - if result.Variables["foo"].Default != "bar" { - t.Fatal("foo default is not right") - } - if result.Variables["foo"].Required { - t.Fatal("foo should not be required") - } - if result.Variables["foo"].HasValue { - t.Fatal("foo should not have value") - } - - if result.Variables["bar"].Default != "" { - t.Fatal("default should be empty") - } - if !result.Variables["bar"].Required { - t.Fatal("bar should be required") - } - if !result.Variables["bar"].HasValue { - t.Fatal("bar should have value") - } - if result.Variables["bar"].Value != "bar" { - t.Fatal("bad value") - } - - if result.Variables["baz"].Default != "27" { - t.Fatal("default should be empty") - } - - if result.Variables["baz"].Required { - t.Fatal("baz should not be required") - } -} - -func TestParseTemplate_variablesSet(t *testing.T) { - data := ` - { - "variables": { - "foo": "bar" - }, - - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), map[string]string{ - "foo": "value", - }) - if err != nil { - t.Fatalf("err: %s", err) - } - - if len(template.Variables) != 1 { - t.Fatalf("bad vars: %#v", template.Variables) - } - if template.Variables["foo"].Value != "value" { - t.Fatalf("bad: %#v", template.Variables["foo"]) - } -} - -func TestParseTemplate_variablesSetUnknown(t *testing.T) { - data := ` - { - "variables": { - "foo": "bar" - }, - - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ] - } - ` - - _, err := ParseTemplate([]byte(data), map[string]string{ - "what": "value", - }) - if err == nil { - t.Fatal("should error") - } -} - -func TestParseTemplate_variablesBadDefault(t *testing.T) { - data := ` - { - "variables": { - "foo": 7, - }, - - "builders": [{"type": "something"}] - } - ` - - _, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("should have error") - } -} - -func TestTemplate_BuildNames(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "bob", - "type": "amazon-ebs" - }, - { - "name": "chris", - "type": "another" - } - ] - } - ` - - result, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - buildNames := result.BuildNames() - sort.Strings(buildNames) - if !reflect.DeepEqual(buildNames, []string{"bob", "chris"}) { - t.Fatalf("bad: %#v", buildNames) - } -} - -func TestTemplate_BuildUnknown(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("bad: %s", err) - } - - build, err := template.Build("nope", nil) - if build != nil { - t.Fatalf("build should be nil: %#v", build) - } - if err == nil { - t.Fatal("should have error") - } -} - -func TestTemplate_BuildUnknownBuilder(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - builderFactory := func(string) (Builder, error) { return nil, nil } - components := &ComponentFinder{Builder: builderFactory} - build, err := template.Build("test1", components) - if err == nil { - t.Fatal("should have error") - } - if build != nil { - t.Fatalf("bad: %#v", build) - } -} - -func TestTemplateBuild_envInVars(t *testing.T) { - data := ` - { - "variables": { - "foo": "{{env \"foo\"}}" - }, - - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ] - } - ` - - defer os.Setenv("foo", os.Getenv("foo")) - if err := os.Setenv("foo", "bar"); err != nil { - t.Fatalf("err: %s", err) - } - - template, err := ParseTemplate([]byte(data), map[string]string{}) - if err != nil { - t.Fatalf("err: %s", err) - } - - b, err := template.Build("test1", testComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - coreBuild, ok := b.(*coreBuild) - if !ok { - t.Fatal("should be ok") - } - - if coreBuild.variables["foo"] != "bar" { - t.Fatalf("bad: %#v", coreBuild.variables) - } -} - -func TestTemplateBuild_names(t *testing.T) { - data := ` - { - "variables": { - "foo": null - }, - - "builders": [ - { - "name": "test1", - "type": "test-builder" - }, - { - "name": "test2-{{user \"foo\"}}", - "type": "test-builder" - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), map[string]string{"foo": "bar"}) - if err != nil { - t.Fatalf("err: %s", err) - } - - b, err := template.Build("test1", testComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - if b.Name() != "test1" { - t.Fatalf("bad: %#v", b.Name()) - } - - b, err = template.Build("test2-{{user \"foo\"}}", testComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - if b.Name() != "test2-bar" { - t.Fatalf("bad: %#v", b.Name()) - } -} - -func TestTemplate_Build_NilBuilderFunc(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ], - - "provisioners": [ - { - "type": "test-prov" - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - defer func() { - p := recover() - if p == nil { - t.Fatal("should panic") - } - - if p.(string) != "no builder function" { - t.Fatalf("bad panic: %s", p.(string)) - } - }() - - template.Build("test1", &ComponentFinder{}) -} - -func TestTemplate_Build_NilProvisionerFunc(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ], - - "provisioners": [ - { - "type": "test-prov" - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - defer func() { - p := recover() - if p == nil { - t.Fatal("should panic") - } - - if p.(string) != "no provisioner function" { - t.Fatalf("bad panic: %s", p.(string)) - } - }() - - template.Build("test1", &ComponentFinder{ - Builder: func(string) (Builder, error) { return nil, nil }, - }) -} - -func TestTemplate_Build_NilProvisionerFunc_WithNoProvisioners(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ], - - "provisioners": [] - } - ` - - template, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - template.Build("test1", &ComponentFinder{ - Builder: func(string) (Builder, error) { return nil, nil }, - }) -} - -func TestTemplate_Build(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ], - - "provisioners": [ - { - "type": "test-prov" - } - ], - - "post-processors": [ - "simple", - [ - "simple", - { "type": "simple", "keep_input_artifact": true } - ] - ] - } - ` - - expectedConfig := map[string]interface{}{ - "type": "test-builder", - } - - template, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - builder := new(MockBuilder) - builderMap := map[string]Builder{ - "test-builder": builder, - } - - provisioner := &MockProvisioner{} - provisionerMap := map[string]Provisioner{ - "test-prov": provisioner, - } - - pp := new(MockPostProcessor) - ppMap := map[string]PostProcessor{ - "simple": pp, - } - - builderFactory := func(n string) (Builder, error) { return builderMap[n], nil } - ppFactory := func(n string) (PostProcessor, error) { return ppMap[n], nil } - provFactory := func(n string) (Provisioner, error) { return provisionerMap[n], nil } - components := &ComponentFinder{ - Builder: builderFactory, - PostProcessor: ppFactory, - Provisioner: provFactory, - } - - // Get the build, verifying we can get it without issue, but also - // that the proper builder was looked up and used for the build. - build, err := template.Build("test1", components) - if err != nil { - t.Fatalf("err: %s", err) - } - - coreBuild, ok := build.(*coreBuild) - if !ok { - t.Fatal("should be ok") - } - if coreBuild.builder != builder { - t.Fatalf("bad: %#v", coreBuild.builder) - } - if !reflect.DeepEqual(coreBuild.builderConfig, expectedConfig) { - t.Fatalf("bad: %#v", coreBuild.builderConfig) - } - if len(coreBuild.provisioners) != 1 { - t.Fatalf("bad: %#v", coreBuild.provisioners) - } - if len(coreBuild.postProcessors) != 2 { - t.Fatalf("bad: %#v", coreBuild.postProcessors) - } - - if len(coreBuild.postProcessors[0]) != 1 { - t.Fatalf("bad: %#v", coreBuild.postProcessors[0]) - } - if len(coreBuild.postProcessors[1]) != 2 { - t.Fatalf("bad: %#v", coreBuild.postProcessors[1]) - } - - if coreBuild.postProcessors[1][0].keepInputArtifact { - t.Fatal("postProcessors[1][0] should not keep input artifact") - } - if !coreBuild.postProcessors[1][1].keepInputArtifact { - t.Fatal("postProcessors[1][1] should keep input artifact") - } - - config := coreBuild.postProcessors[1][1].config - if _, ok := config["keep_input_artifact"]; ok { - t.Fatal("should not have keep_input_artifact") - } -} - -func TestTemplateBuild_exceptOnlyPP(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - }, - { - "name": "test2", - "type": "test-builder" - } - ], - - "post-processors": [ - { - "type": "test-pp", - "except": ["test1"], - "only": ["test1"] - } - ] - } - ` - - _, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("should have error") - } -} - -func TestTemplateBuild_exceptOnlyProv(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - }, - { - "name": "test2", - "type": "test-builder" - } - ], - - "provisioners": [ - { - "type": "test-prov", - "except": ["test1"], - "only": ["test1"] - } - ] - } - ` - - _, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("should have error") - } -} - -func TestTemplateBuild_exceptPPInvalid(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - }, - { - "name": "test2", - "type": "test-builder" - } - ], - - "post-processors": [ - { - "type": "test-pp", - "except": ["test5"] - } - ] - } - ` - - _, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("should have error") - } -} - -func TestTemplateBuild_exceptPP(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - }, - { - "name": "test2", - "type": "test-builder" - } - ], - - "post-processors": [ - { - "type": "test-pp", - "except": ["test1"] - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Verify test1 has no post-processors - build, err := template.Build("test1", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild := build.(*coreBuild) - if len(cbuild.postProcessors) > 0 { - t.Fatal("should have no postProcessors") - } - - // Verify test2 has one post-processors - build, err = template.Build("test2", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild = build.(*coreBuild) - if len(cbuild.postProcessors) != 1 { - t.Fatalf("invalid: %d", len(cbuild.postProcessors)) - } -} - -func TestTemplateBuild_exceptPPConfigTemplateName(t *testing.T) { - data := ` - { - "variables": { - "foo": null - }, - - "builders": [ - { - "name": "test1-{{user \"foo\"}}", - "type": "test-builder" - }, - { - "name": "test2", - "type": "test-builder" - } - ], - - "post-processors": [ - { - "type": "test-pp", - "except": ["test1-{{user \"foo\"}}"] - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), map[string]string{"foo": "bar"}) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Verify test1 has no post-processors - build, err := template.Build("test1-{{user \"foo\"}}", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild := build.(*coreBuild) - if len(cbuild.postProcessors) > 0 { - t.Fatal("should have no postProcessors") - } - - // Verify test2 has one post-processors - build, err = template.Build("test2", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild = build.(*coreBuild) - if len(cbuild.postProcessors) != 1 { - t.Fatalf("invalid: %d", len(cbuild.postProcessors)) - } -} - -func TestTemplateBuild_exceptProvInvalid(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - }, - { - "name": "test2", - "type": "test-builder" - } - ], - - "provisioners": [ - { - "type": "test-prov", - "except": ["test5"] - } - ] - } - ` - - _, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("should have error") - } -} - -func TestTemplateBuild_exceptProv(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - }, - { - "name": "test2", - "type": "test-builder" - } - ], - - "provisioners": [ - { - "type": "test-prov", - "except": ["test1"] - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Verify test1 has no provisioners - build, err := template.Build("test1", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild := build.(*coreBuild) - if len(cbuild.provisioners) > 0 { - t.Fatal("should have no provisioners") - } - - // Verify test2 has one provisioners - build, err = template.Build("test2", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild = build.(*coreBuild) - if len(cbuild.provisioners) != 1 { - t.Fatalf("invalid: %d", len(cbuild.provisioners)) - } -} - -func TestTemplateBuild_exceptProvConfigTemplateName(t *testing.T) { - data := ` - { - "variables": { - "foo": null - }, - - "builders": [ - { - "name": "test1-{{user \"foo\"}}", - "type": "test-builder" - }, - { - "name": "test2", - "type": "test-builder" - } - ], - - "provisioners": [ - { - "type": "test-prov", - "except": ["test1-{{user \"foo\"}}"] - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), map[string]string{"foo": "bar"}) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Verify test1 has no provisioners - build, err := template.Build("test1-{{user \"foo\"}}", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild := build.(*coreBuild) - if len(cbuild.provisioners) > 0 { - t.Fatal("should have no provisioners") - } - - // Verify test2 has one provisioners - build, err = template.Build("test2", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild = build.(*coreBuild) - if len(cbuild.provisioners) != 1 { - t.Fatalf("invalid: %d", len(cbuild.provisioners)) - } -} - -func TestTemplateBuild_onlyPPInvalid(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - }, - { - "name": "test2", - "type": "test-builder" - } - ], - - "post-processors": [ - { - "type": "test-pp", - "only": ["test5"] - } - ] - } - ` - - _, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("should have error") - } -} - -func TestTemplateBuild_onlyPP(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - }, - { - "name": "test2", - "type": "test-builder" - } - ], - - "post-processors": [ - { - "type": "test-pp", - "only": ["test2"] - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Verify test1 has no post-processors - build, err := template.Build("test1", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild := build.(*coreBuild) - if len(cbuild.postProcessors) > 0 { - t.Fatal("should have no postProcessors") - } - - // Verify test2 has one post-processors - build, err = template.Build("test2", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild = build.(*coreBuild) - if len(cbuild.postProcessors) != 1 { - t.Fatalf("invalid: %d", len(cbuild.postProcessors)) - } -} - -func TestTemplateBuild_onlyPPConfigTemplateName(t *testing.T) { - data := ` - { - "variables": { - "foo": null - }, - - "builders": [ - { - "name": "test1", - "type": "test-builder" - }, - { - "name": "test2-{{user \"foo\"}}", - "type": "test-builder" - } - ], - - "post-processors": [ - { - "type": "test-pp", - "only": ["test2-{{user \"foo\"}}"] - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), map[string]string{"foo": "bar"}) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Verify test1 has no post-processors - build, err := template.Build("test1", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild := build.(*coreBuild) - if len(cbuild.postProcessors) > 0 { - t.Fatal("should have no postProcessors") - } - - // Verify test2 has one post-processors - build, err = template.Build("test2-{{user \"foo\"}}", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild = build.(*coreBuild) - if len(cbuild.postProcessors) != 1 { - t.Fatalf("invalid: %d", len(cbuild.postProcessors)) - } -} - -func TestTemplateBuild_onlyProvInvalid(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - }, - { - "name": "test2", - "type": "test-builder" - } - ], - - "provisioners": [ - { - "type": "test-prov", - "only": ["test5"] - } - ] - } - ` - - _, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("should have error") - } -} - -func TestTemplateBuild_onlyProv(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - }, - { - "name": "test2", - "type": "test-builder" - } - ], - - "provisioners": [ - { - "type": "test-prov", - "only": ["test2"] - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Verify test1 has no provisioners - build, err := template.Build("test1", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild := build.(*coreBuild) - if len(cbuild.provisioners) > 0 { - t.Fatal("should have no provisioners") - } - - // Verify test2 has one provisioners - build, err = template.Build("test2", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild = build.(*coreBuild) - if len(cbuild.provisioners) != 1 { - t.Fatalf("invalid: %d", len(cbuild.provisioners)) - } -} - -func TestTemplateBuild_onlyProvConfigTemplateName(t *testing.T) { - data := ` - { - "variables": { - "foo": null - }, - - "builders": [ - { - "name": "test1", - "type": "test-builder" - }, - { - "name": "test2-{{user \"foo\"}}", - "type": "test-builder" - } - ], - - "provisioners": [ - { - "type": "test-prov", - "only": ["test2-{{user \"foo\"}}"] - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), map[string]string{"foo": "bar"}) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Verify test1 has no provisioners - build, err := template.Build("test1", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild := build.(*coreBuild) - if len(cbuild.provisioners) > 0 { - t.Fatal("should have no provisioners") - } - - // Verify test2 has one provisioners - build, err = template.Build("test2-{{user \"foo\"}}", testTemplateComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - cbuild = build.(*coreBuild) - if len(cbuild.provisioners) != 1 { - t.Fatalf("invalid: %d", len(cbuild.provisioners)) - } -} - -func TestTemplate_Build_ProvisionerOverride(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ], - - "provisioners": [ - { - "type": "test-prov", - - "override": { - "test1": {} - } - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - RawConfig := template.Provisioners[0].RawConfig - if RawConfig == nil { - t.Fatal("missing provisioner raw config") - } - - expected := map[string]interface{}{ - "type": "test-prov", - } - - if !reflect.DeepEqual(RawConfig, expected) { - t.Fatalf("bad raw: %#v", RawConfig) - } - - builder := new(MockBuilder) - builderMap := map[string]Builder{ - "test-builder": builder, - } - - provisioner := &MockProvisioner{} - provisionerMap := map[string]Provisioner{ - "test-prov": provisioner, - } - - builderFactory := func(n string) (Builder, error) { return builderMap[n], nil } - provFactory := func(n string) (Provisioner, error) { return provisionerMap[n], nil } - components := &ComponentFinder{ - Builder: builderFactory, - Provisioner: provFactory, - } - - // Get the build, verifying we can get it without issue, but also - // that the proper builder was looked up and used for the build. - build, err := template.Build("test1", components) - if err != nil { - t.Fatalf("err: %s", err) - } - - coreBuild, ok := build.(*coreBuild) - if !ok { - t.Fatal("should be okay") - } - if len(coreBuild.provisioners) != 1 { - t.Fatalf("bad: %#v", coreBuild.provisioners) - } - if len(coreBuild.provisioners[0].config) != 2 { - t.Fatalf("bad: %#v", coreBuild.provisioners[0].config) - } -} - -func TestTemplate_Build_ProvisionerOverrideBad(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ], - - "provisioners": [ - { - "type": "test-prov", - - "override": { - "testNope": {} - } - } - ] - } - ` - - _, err := ParseTemplate([]byte(data), nil) - if err == nil { - t.Fatal("should have error") - } -} - -func TestTemplateBuild_ProvisionerPauseBefore(t *testing.T) { - data := ` - { - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ], - - "provisioners": [ - { - "type": "test-prov", - "pause_before": "5s" - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - builder := new(MockBuilder) - builderMap := map[string]Builder{ - "test-builder": builder, - } - - provisioner := &MockProvisioner{} - provisionerMap := map[string]Provisioner{ - "test-prov": provisioner, - } - - builderFactory := func(n string) (Builder, error) { return builderMap[n], nil } - provFactory := func(n string) (Provisioner, error) { return provisionerMap[n], nil } - components := &ComponentFinder{ - Builder: builderFactory, - Provisioner: provFactory, - } - - // Get the build, verifying we can get it without issue, but also - // that the proper builder was looked up and used for the build. - build, err := template.Build("test1", components) - if err != nil { - t.Fatalf("err: %s", err) - } - - coreBuild, ok := build.(*coreBuild) - if !ok { - t.Fatal("should be okay") - } - if len(coreBuild.provisioners) != 1 { - t.Fatalf("bad: %#v", coreBuild.provisioners) - } - if pp, ok := coreBuild.provisioners[0].provisioner.(*PausedProvisioner); !ok { - t.Fatalf("should be paused provisioner") - } else { - if pp.PauseBefore != 5*time.Second { - t.Fatalf("bad: %#v", pp.PauseBefore) - } - } - - config := coreBuild.provisioners[0].config[0].(map[string]interface{}) - if _, ok := config["pause_before"]; ok { - t.Fatal("pause_before should be removed") - } -} - -func TestTemplateBuild_variables(t *testing.T) { - data := ` - { - "variables": { - "foo": "bar" - }, - - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - build, err := template.Build("test1", testComponentFinder()) - if err != nil { - t.Fatalf("err: %s", err) - } - - coreBuild, ok := build.(*coreBuild) - if !ok { - t.Fatalf("couldn't convert!") - } - - expected := map[string]string{"foo": "bar"} - if !reflect.DeepEqual(coreBuild.variables, expected) { - t.Fatalf("bad vars: %#v", coreBuild.variables) - } -} - -func TestTemplateBuild_variablesRequiredNotSet(t *testing.T) { - data := ` - { - "variables": { - "foo": null - }, - - "builders": [ - { - "name": "test1", - "type": "test-builder" - } - ] - } - ` - - template, err := ParseTemplate([]byte(data), map[string]string{}) - if err != nil { - t.Fatalf("err: %s", err) - } - - _, err = template.Build("test1", testComponentFinder()) - if err == nil { - t.Fatal("should error") - } -} From b9eea82a36cda8c90ef58b131cc7599b62e11c64 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 09:41:42 -0700 Subject: [PATCH 106/956] template: add tests for RawContents --- template/parse_test.go | 14 ++++++++++++++ template/test-fixtures/parse-contents.json | 1 + 2 files changed, 15 insertions(+) create mode 100644 template/test-fixtures/parse-contents.json diff --git a/template/parse_test.go b/template/parse_test.go index d5d9fcd8e..e99f35e51 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -2,6 +2,7 @@ package template import ( "reflect" + "strings" "testing" "time" ) @@ -284,3 +285,16 @@ func TestParse(t *testing.T) { } } } + +func TestParse_contents(t *testing.T) { + tpl, err := ParseFile(fixtureDir("parse-contents.json")) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(string(tpl.RawContents)) + expected := `{"builders":[{"type":"test"}]}` + if actual != expected { + t.Fatalf("bad: %s\n\n%s", actual, expected) + } +} diff --git a/template/test-fixtures/parse-contents.json b/template/test-fixtures/parse-contents.json new file mode 100644 index 000000000..edd70c12a --- /dev/null +++ b/template/test-fixtures/parse-contents.json @@ -0,0 +1 @@ +{"builders":[{"type":"test"}]} From 53e77eaceab1a1e4dfec7e764b7ec42f1eb1c673 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 09:46:04 -0700 Subject: [PATCH 107/956] packer: overrides work --- packer/core.go | 7 ++- packer/core_test.go | 47 +++++++++++++++++++ packer/test-fixtures/build-prov-override.json | 14 ++++++ 3 files changed, 66 insertions(+), 2 deletions(-) create mode 100644 packer/test-fixtures/build-prov-override.json diff --git a/packer/core.go b/packer/core.go index a372c7ee8..3969da9c9 100644 --- a/packer/core.go +++ b/packer/core.go @@ -144,8 +144,11 @@ func (c *Core) Build(n string) (Build, error) { // Get the configuration config := make([]interface{}, 1, 2) config[0] = rawP.Config - - // TODO override + if rawP.Override != nil { + if override, ok := rawP.Override[rawName]; ok { + config = append(config, override) + } + } // If we're pausing, we wrap the provisioner in a special pauser. if rawP.PauseBefore > 0 { diff --git a/packer/core_test.go b/packer/core_test.go index 712694766..8cec16bae 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -222,6 +222,53 @@ func TestCoreBuild_provSkipInclude(t *testing.T) { } } +func TestCoreBuild_provOverride(t *testing.T) { + config := TestCoreConfig(t) + testCoreTemplate(t, config, fixtureDir("build-prov-override.json")) + b := TestBuilder(t, config, "test") + p := TestProvisioner(t, config, "test") + core := TestCore(t, config) + + b.ArtifactId = "hello" + + build, err := core.Build("test") + if err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := build.Prepare(); err != nil { + t.Fatalf("err: %s", err) + } + + artifact, err := build.Run(nil, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(artifact) != 1 { + t.Fatalf("bad: %#v", artifact) + } + + if artifact[0].Id() != b.ArtifactId { + t.Fatalf("bad: %s", artifact[0].Id()) + } + if !p.ProvCalled { + t.Fatal("provisioner not called") + } + + found := false + for _, raw := range p.PrepConfigs { + if m, ok := raw.(map[string]interface{}); ok { + if _, ok := m["foo"]; ok { + found = true + break + } + } + } + if !found { + t.Fatal("override not called") + } +} + func TestCoreBuild_postProcess(t *testing.T) { config := TestCoreConfig(t) testCoreTemplate(t, config, fixtureDir("build-pp.json")) diff --git a/packer/test-fixtures/build-prov-override.json b/packer/test-fixtures/build-prov-override.json new file mode 100644 index 000000000..eb3554792 --- /dev/null +++ b/packer/test-fixtures/build-prov-override.json @@ -0,0 +1,14 @@ +{ + "builders": [{ + "type": "test" + }], + + "provisioners": [{ + "type": "test", + "override": { + "test": { + "foo": "bar" + } + } + }] +} From d4b489a9ec60b825ee7fb47fd14c2db0eeedb561 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 09:46:11 -0700 Subject: [PATCH 108/956] update todo --- TODO.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/TODO.txt b/TODO.txt index 031ec3ae3..944dc80e3 100644 --- a/TODO.txt +++ b/TODO.txt @@ -1,2 +1 @@ - var-file doesn't work -- prov/post-processors/hooks don't work From dd0a77550041228678b08b59449ca898526d3374 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 09:51:47 -0700 Subject: [PATCH 109/956] common/command: delete --- common/command/build_flags.go | 39 ---- common/command/build_flags_test.go | 104 ----------- common/command/flag_slice_value.go | 34 ---- common/command/flag_slice_value_test.go | 54 ------ common/command/template.go | 162 ----------------- common/command/template_test.go | 228 ------------------------ 6 files changed, 621 deletions(-) delete mode 100644 common/command/build_flags.go delete mode 100644 common/command/build_flags_test.go delete mode 100644 common/command/flag_slice_value.go delete mode 100644 common/command/flag_slice_value_test.go delete mode 100644 common/command/template.go delete mode 100644 common/command/template_test.go diff --git a/common/command/build_flags.go b/common/command/build_flags.go deleted file mode 100644 index d08ca58b8..000000000 --- a/common/command/build_flags.go +++ /dev/null @@ -1,39 +0,0 @@ -package command - -import ( - "flag" - "fmt" - "strings" -) - -// BuildOptionFlags sets the proper command line flags needed for -// build options. -func BuildOptionFlags(fs *flag.FlagSet, f *BuildOptions) { - fs.Var((*SliceValue)(&f.Except), "except", "build all builds except these") - fs.Var((*SliceValue)(&f.Only), "only", "only build the given builds by name") - fs.Var((*userVarValue)(&f.UserVars), "var", "specify a user variable") - fs.Var((*AppendSliceValue)(&f.UserVarFiles), "var-file", "file with user variables") -} - -// userVarValue is a flag.Value that parses out user variables in -// the form of 'key=value' and sets it on this map. -type userVarValue map[string]string - -func (v *userVarValue) String() string { - return "" -} - -func (v *userVarValue) Set(raw string) error { - idx := strings.Index(raw, "=") - if idx == -1 { - return fmt.Errorf("No '=' value in arg: %s", raw) - } - - if *v == nil { - *v = make(map[string]string) - } - - key, value := raw[0:idx], raw[idx+1:] - (*v)[key] = value - return nil -} diff --git a/common/command/build_flags_test.go b/common/command/build_flags_test.go deleted file mode 100644 index 5d39eb946..000000000 --- a/common/command/build_flags_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package command - -import ( - "flag" - "reflect" - "testing" -) - -func TestBuildOptionFlags(t *testing.T) { - opts := new(BuildOptions) - fs := flag.NewFlagSet("test", flag.ContinueOnError) - BuildOptionFlags(fs, opts) - - args := []string{ - "-except=foo,bar,baz", - "-only=a,b", - "-var=foo=bar", - "-var", "bar=baz", - "-var=foo=bang", - "-var-file=foo", - "-var-file=bar", - } - - err := fs.Parse(args) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := []string{"foo", "bar", "baz"} - if !reflect.DeepEqual(opts.Except, expected) { - t.Fatalf("bad: %#v", opts.Except) - } - - expected = []string{"a", "b"} - if !reflect.DeepEqual(opts.Only, expected) { - t.Fatalf("bad: %#v", opts.Only) - } - - if len(opts.UserVars) != 2 { - t.Fatalf("bad: %#v", opts.UserVars) - } - - if opts.UserVars["foo"] != "bang" { - t.Fatalf("bad: %#v", opts.UserVars) - } - - if opts.UserVars["bar"] != "baz" { - t.Fatalf("bad: %#v", opts.UserVars) - } - - expected = []string{"foo", "bar"} - if !reflect.DeepEqual(opts.UserVarFiles, expected) { - t.Fatalf("bad: %#v", opts.UserVarFiles) - } -} - -func TestUserVarValue_implements(t *testing.T) { - var raw interface{} - raw = new(userVarValue) - if _, ok := raw.(flag.Value); !ok { - t.Fatalf("userVarValue should be a Value") - } -} - -func TestUserVarValueSet(t *testing.T) { - sv := new(userVarValue) - err := sv.Set("key=value") - if err != nil { - t.Fatalf("err: %s", err) - } - - vars := map[string]string(*sv) - if vars["key"] != "value" { - t.Fatalf("Bad: %#v", vars) - } - - // Empty value - err = sv.Set("key=") - if err != nil { - t.Fatalf("err: %s", err) - } - - vars = map[string]string(*sv) - if vars["key"] != "" { - t.Fatalf("Bad: %#v", vars) - } - - // Equal in value - err = sv.Set("key=foo=bar") - if err != nil { - t.Fatalf("err: %s", err) - } - - vars = map[string]string(*sv) - if vars["key"] != "foo=bar" { - t.Fatalf("Bad: %#v", vars) - } - - // No equal - err = sv.Set("key") - if err == nil { - t.Fatal("should have error") - } -} diff --git a/common/command/flag_slice_value.go b/common/command/flag_slice_value.go deleted file mode 100644 index 8989dedad..000000000 --- a/common/command/flag_slice_value.go +++ /dev/null @@ -1,34 +0,0 @@ -package command - -import "strings" - -// AppendSliceValue implements the flag.Value interface and allows multiple -// calls to the same variable to append a list. -type AppendSliceValue []string - -func (s *AppendSliceValue) String() string { - return strings.Join(*s, ",") -} - -func (s *AppendSliceValue) Set(value string) error { - if *s == nil { - *s = make([]string, 0, 1) - } - - *s = append(*s, value) - return nil -} - -// SliceValue implements the flag.Value interface and allows a list of -// strings to be given on the command line and properly parsed into a slice -// of strings internally. -type SliceValue []string - -func (s *SliceValue) String() string { - return strings.Join(*s, ",") -} - -func (s *SliceValue) Set(value string) error { - *s = strings.Split(value, ",") - return nil -} diff --git a/common/command/flag_slice_value_test.go b/common/command/flag_slice_value_test.go deleted file mode 100644 index ca80c9d9f..000000000 --- a/common/command/flag_slice_value_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package command - -import ( - "flag" - "reflect" - "testing" -) - -func TestAppendSliceValue_implements(t *testing.T) { - var raw interface{} - raw = new(AppendSliceValue) - if _, ok := raw.(flag.Value); !ok { - t.Fatalf("AppendSliceValue should be a Value") - } -} - -func TestAppendSliceValueSet(t *testing.T) { - sv := new(AppendSliceValue) - err := sv.Set("foo") - if err != nil { - t.Fatalf("err: %s", err) - } - - err = sv.Set("bar") - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := []string{"foo", "bar"} - if !reflect.DeepEqual([]string(*sv), expected) { - t.Fatalf("Bad: %#v", sv) - } -} - -func TestSliceValue_implements(t *testing.T) { - var raw interface{} - raw = new(SliceValue) - if _, ok := raw.(flag.Value); !ok { - t.Fatalf("SliceValue should be a Value") - } -} - -func TestSliceValueSet(t *testing.T) { - sv := new(SliceValue) - err := sv.Set("foo,bar,baz") - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := []string{"foo", "bar", "baz"} - if !reflect.DeepEqual([]string(*sv), expected) { - t.Fatalf("Bad: %#v", sv) - } -} diff --git a/common/command/template.go b/common/command/template.go deleted file mode 100644 index 27a42f901..000000000 --- a/common/command/template.go +++ /dev/null @@ -1,162 +0,0 @@ -package command - -import ( - "errors" - "fmt" - jsonutil "github.com/mitchellh/packer/common/json" - "github.com/mitchellh/packer/packer" - "io/ioutil" - "log" - "os" -) - -// BuildOptions is a set of options related to builds that can be set -// from the command line. -type BuildOptions struct { - UserVarFiles []string - UserVars map[string]string - Except []string - Only []string -} - -// Validate validates the options -func (f *BuildOptions) Validate() error { - if len(f.Except) > 0 && len(f.Only) > 0 { - return errors.New("Only one of '-except' or '-only' may be specified.") - } - - if len(f.UserVarFiles) > 0 { - for _, path := range f.UserVarFiles { - if _, err := os.Stat(path); err != nil { - return fmt.Errorf("Cannot access: %s", path) - } - } - } - - return nil -} - -// AllUserVars returns the user variables, compiled from both the -// file paths and the vars on the command line. -func (f *BuildOptions) AllUserVars() (map[string]string, error) { - all := make(map[string]string) - - // Copy in the variables from the files - for _, path := range f.UserVarFiles { - fileVars, err := readFileVars(path) - if err != nil { - return nil, err - } - - for k, v := range fileVars { - all[k] = v - } - } - - // Copy in the command-line vars - for k, v := range f.UserVars { - all[k] = v - } - - return all, nil -} - -// Builds returns the builds out of the given template that pass the -// configured options. -func (f *BuildOptions) Builds(t *packer.Template, cf *packer.ComponentFinder) ([]packer.Build, error) { - buildNames := t.BuildNames() - - // Process the name - tpl, _, err := t.NewConfigTemplate() - if err != nil { - return nil, err - } - - checks := make(map[string][]string) - checks["except"] = f.Except - checks["only"] = f.Only - for t, ns := range checks { - for _, n := range ns { - found := false - for _, actual := range buildNames { - var processed string - processed, err = tpl.Process(actual, nil) - if err != nil { - return nil, err - } - if actual == n || processed == n { - found = true - break - } - } - - if !found { - return nil, fmt.Errorf( - "Unknown build in '%s' flag: %s", t, n) - } - } - } - - builds := make([]packer.Build, 0, len(buildNames)) - for _, buildName := range buildNames { - var processedBuildName string - processedBuildName, err = tpl.Process(buildName, nil) - if err != nil { - return nil, err - } - if len(f.Except) > 0 { - found := false - for _, except := range f.Except { - if buildName == except || processedBuildName == except { - found = true - break - } - } - - if found { - log.Printf("Skipping build '%s' because specified by -except.", processedBuildName) - continue - } - } - - if len(f.Only) > 0 { - found := false - for _, only := range f.Only { - if buildName == only || processedBuildName == only { - found = true - break - } - } - - if !found { - log.Printf("Skipping build '%s' because not specified by -only.", processedBuildName) - continue - } - } - - log.Printf("Creating build: %s", processedBuildName) - build, err := t.Build(buildName, cf) - if err != nil { - return nil, fmt.Errorf("Failed to create build '%s': \n\n%s", buildName, err) - } - - builds = append(builds, build) - } - - return builds, nil -} - -func readFileVars(path string) (map[string]string, error) { - bytes, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - - vars := make(map[string]string) - err = jsonutil.Unmarshal(bytes, &vars) - if err != nil { - return nil, err - } - - return vars, nil -} diff --git a/common/command/template_test.go b/common/command/template_test.go deleted file mode 100644 index 419ee7012..000000000 --- a/common/command/template_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package command - -import ( - "github.com/mitchellh/packer/packer" - "testing" -) - -func testTemplate() (*packer.Template, *packer.ComponentFinder) { - tplData := `{ - "variables": { - "foo": null - }, - - "builders": [ - { - "type": "foo" - }, - { - "name": "{{user \"foo\"}}", - "type": "bar" - } - ] - } - ` - - tpl, err := packer.ParseTemplate([]byte(tplData), map[string]string{"foo": "bar"}) - if err != nil { - panic(err) - } - - cf := &packer.ComponentFinder{ - Builder: func(string) (packer.Builder, error) { return new(packer.MockBuilder), nil }, - } - - return tpl, cf -} - -func TestBuildOptionsBuilds(t *testing.T) { - opts := new(BuildOptions) - bs, err := opts.Builds(testTemplate()) - if err != nil { - t.Fatalf("err: %s", err) - } - - if len(bs) != 2 { - t.Fatalf("bad: %d", len(bs)) - } -} - -func TestBuildOptionsBuilds_except(t *testing.T) { - opts := new(BuildOptions) - opts.Except = []string{"foo"} - - bs, err := opts.Builds(testTemplate()) - if err != nil { - t.Fatalf("err: %s", err) - } - - if len(bs) != 1 { - t.Fatalf("bad: %d", len(bs)) - } - - if bs[0].Name() != "bar" { - t.Fatalf("bad: %s", bs[0].Name()) - } -} - -//Test to make sure the build name pattern matches -func TestBuildOptionsBuilds_exceptConfigTemplateRaw(t *testing.T) { - opts := new(BuildOptions) - opts.Except = []string{"{{user \"foo\"}}"} - - bs, err := opts.Builds(testTemplate()) - if err != nil { - t.Fatalf("err: %s", err) - } - - if len(bs) != 1 { - t.Fatalf("bad: %d", len(bs)) - } - - if bs[0].Name() != "foo" { - t.Fatalf("bad: %s", bs[0].Name()) - } -} - -//Test to make sure the processed build name matches -func TestBuildOptionsBuilds_exceptConfigTemplateProcessed(t *testing.T) { - opts := new(BuildOptions) - opts.Except = []string{"bar"} - - bs, err := opts.Builds(testTemplate()) - if err != nil { - t.Fatalf("err: %s", err) - } - - if len(bs) != 1 { - t.Fatalf("bad: %d", len(bs)) - } - - if bs[0].Name() != "foo" { - t.Fatalf("bad: %s", bs[0].Name()) - } -} - -func TestBuildOptionsBuilds_only(t *testing.T) { - opts := new(BuildOptions) - opts.Only = []string{"foo"} - - bs, err := opts.Builds(testTemplate()) - if err != nil { - t.Fatalf("err: %s", err) - } - - if len(bs) != 1 { - t.Fatalf("bad: %d", len(bs)) - } - - if bs[0].Name() != "foo" { - t.Fatalf("bad: %s", bs[0].Name()) - } -} - -//Test to make sure the build name pattern matches -func TestBuildOptionsBuilds_onlyConfigTemplateRaw(t *testing.T) { - opts := new(BuildOptions) - opts.Only = []string{"{{user \"foo\"}}"} - - bs, err := opts.Builds(testTemplate()) - if err != nil { - t.Fatalf("err: %s", err) - } - - if len(bs) != 1 { - t.Fatalf("bad: %d", len(bs)) - } - - if bs[0].Name() != "bar" { - t.Fatalf("bad: %s", bs[0].Name()) - } -} - -//Test to make sure the processed build name matches -func TestBuildOptionsBuilds_onlyConfigTemplateProcessed(t *testing.T) { - opts := new(BuildOptions) - opts.Only = []string{"bar"} - - bs, err := opts.Builds(testTemplate()) - if err != nil { - t.Fatalf("err: %s", err) - } - - if len(bs) != 1 { - t.Fatalf("bad: %d", len(bs)) - } - - if bs[0].Name() != "bar" { - t.Fatalf("bad: %s", bs[0].Name()) - } -} - -func TestBuildOptionsBuilds_exceptNonExistent(t *testing.T) { - opts := new(BuildOptions) - opts.Except = []string{"i-dont-exist"} - - _, err := opts.Builds(testTemplate()) - if err == nil { - t.Fatal("err should not be nil") - } -} - -func TestBuildOptionsBuilds_onlyNonExistent(t *testing.T) { - opts := new(BuildOptions) - opts.Only = []string{"i-dont-exist"} - - _, err := opts.Builds(testTemplate()) - if err == nil { - t.Fatal("err should not be nil") - } -} - -func TestBuildOptionsValidate(t *testing.T) { - bf := new(BuildOptions) - - err := bf.Validate() - if err != nil { - t.Fatalf("err: %s", err) - } - - // Both set - bf.Except = make([]string, 1) - bf.Only = make([]string, 1) - err = bf.Validate() - if err == nil { - t.Fatal("should error") - } - - // One set - bf.Except = make([]string, 1) - bf.Only = make([]string, 0) - err = bf.Validate() - if err != nil { - t.Fatalf("err: %s", err) - } - - bf.Except = make([]string, 0) - bf.Only = make([]string, 1) - err = bf.Validate() - if err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestBuildOptionsValidate_userVarFiles(t *testing.T) { - bf := new(BuildOptions) - - err := bf.Validate() - if err != nil { - t.Fatalf("err: %s", err) - } - - // Non-existent file - bf.UserVarFiles = []string{"ireallyshouldntexistanywhere"} - err = bf.Validate() - if err == nil { - t.Fatal("should error") - } -} From 7f78a2c5d91a5e6f6f596201d5ded5eeab468907 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 09:58:04 -0700 Subject: [PATCH 110/956] helper/flag-kv: can parse JSON files --- helper/flag-kv/flag_json.go | 34 ++++++++++++++ helper/flag-kv/flag_json_test.go | 59 +++++++++++++++++++++++++ helper/flag-kv/test-fixtures/basic.json | 3 ++ 3 files changed, 96 insertions(+) create mode 100644 helper/flag-kv/flag_json.go create mode 100644 helper/flag-kv/flag_json_test.go create mode 100644 helper/flag-kv/test-fixtures/basic.json diff --git a/helper/flag-kv/flag_json.go b/helper/flag-kv/flag_json.go new file mode 100644 index 000000000..9af9fe1da --- /dev/null +++ b/helper/flag-kv/flag_json.go @@ -0,0 +1,34 @@ +package kvflag + +import ( + "encoding/json" + "fmt" + "os" +) + +// FlagJSON is a flag.Value implementation for parsing user variables +// from the command-line using JSON files. +type FlagJSON map[string]string + +func (v *FlagJSON) String() string { + return "" +} + +func (v *FlagJSON) Set(raw string) error { + f, err := os.Open(raw) + if err != nil { + return err + } + defer f.Close() + + if *v == nil { + *v = make(map[string]string) + } + + if err := json.NewDecoder(f).Decode(v); err != nil { + return fmt.Errorf( + "Error reading variables in '%s': %s", raw, err) + } + + return nil +} diff --git a/helper/flag-kv/flag_json_test.go b/helper/flag-kv/flag_json_test.go new file mode 100644 index 000000000..df5a99e64 --- /dev/null +++ b/helper/flag-kv/flag_json_test.go @@ -0,0 +1,59 @@ +package kvflag + +import ( + "flag" + "path/filepath" + "reflect" + "testing" +) + +func TestFlagJSON_impl(t *testing.T) { + var _ flag.Value = new(FlagJSON) +} + +func TestFlagJSON(t *testing.T) { + cases := []struct { + Input string + Initial map[string]string + Output map[string]string + Error bool + }{ + { + "basic.json", + nil, + map[string]string{"key": "value"}, + false, + }, + + { + "basic.json", + map[string]string{"foo": "bar"}, + map[string]string{"foo": "bar", "key": "value"}, + false, + }, + + { + "basic.json", + map[string]string{"key": "bar"}, + map[string]string{"key": "value"}, + false, + }, + } + + for _, tc := range cases { + f := new(FlagJSON) + if tc.Initial != nil { + f = (*FlagJSON)(&tc.Initial) + } + + err := f.Set(filepath.Join("./test-fixtures", tc.Input)) + if (err != nil) != tc.Error { + t.Fatalf("bad error. Input: %#v\n\n%s", tc.Input, err) + } + + actual := map[string]string(*f) + if !reflect.DeepEqual(actual, tc.Output) { + t.Fatalf("bad: %#v", actual) + } + } +} diff --git a/helper/flag-kv/test-fixtures/basic.json b/helper/flag-kv/test-fixtures/basic.json new file mode 100644 index 000000000..21da3b262 --- /dev/null +++ b/helper/flag-kv/test-fixtures/basic.json @@ -0,0 +1,3 @@ +{ + "key": "value" +} From 8df1bca5a1f1590e4672b1fda29ad3ba7161fc4d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 09:58:36 -0700 Subject: [PATCH 111/956] command/meta: parse var-files --- TODO.txt | 1 - command/meta.go | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) delete mode 100644 TODO.txt diff --git a/TODO.txt b/TODO.txt deleted file mode 100644 index 944dc80e3..000000000 --- a/TODO.txt +++ /dev/null @@ -1 +0,0 @@ -- var-file doesn't work diff --git a/command/meta.go b/command/meta.go index e62577df9..0dc721bd1 100644 --- a/command/meta.go +++ b/command/meta.go @@ -32,7 +32,6 @@ type Meta struct { flagBuildExcept []string flagBuildOnly []string flagVars map[string]string - flagVarFiles []string } // Core returns the core for the given template given the configured @@ -122,7 +121,7 @@ func (m *Meta) FlagSet(n string, fs FlagSetFlags) *flag.FlagSet { // FlagSetVars tells us what variables to use if fs&FlagSetVars != 0 { f.Var((*kvflag.Flag)(&m.flagVars), "var", "") - f.Var((*sliceflag.StringFlag)(&m.flagVarFiles), "var-file", "") + f.Var((*kvflag.FlagJSON)(&m.flagVars), "var-file", "") } // Create an io.Writer that writes to our Ui properly for errors. From 485825fe12b83a4981fb6f305af4b288eaff34a6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 13:21:13 -0700 Subject: [PATCH 112/956] helper/builder/testing for acceptance tests --- helper/builder/testing/testing.go | 175 +++++++++++++++++++++++++ helper/builder/testing/testing_test.go | 88 +++++++++++++ 2 files changed, 263 insertions(+) create mode 100644 helper/builder/testing/testing.go create mode 100644 helper/builder/testing/testing_test.go diff --git a/helper/builder/testing/testing.go b/helper/builder/testing/testing.go new file mode 100644 index 000000000..bc5cfa236 --- /dev/null +++ b/helper/builder/testing/testing.go @@ -0,0 +1,175 @@ +package testing + +import ( + "fmt" + "log" + "os" + "strings" + "testing" + + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template" +) + +// TestEnvVar must be set to a non-empty value for acceptance tests to run. +const TestEnvVar = "PACKER_ACC" + +// TestCase is a single set of tests to run for a backend. A TestCase +// should generally map 1:1 to each test method for your acceptance +// tests. +type TestCase struct { + // Precheck, if non-nil, will be called once before the test case + // runs at all. This can be used for some validation prior to the + // test running. + PreCheck func() + + // Builder is the Builder that will be tested. It will be available + // as the "test" builder in the template. + Builder packer.Builder + + // Template is a path to a text template. We use a text file + // so we can use the entire machinery to test this builder. + Template string + + // Check is called after this step is executed in order to test that + // the step executed successfully. If this is not set, then the next + // step will be called + Check TestCheckFunc + + // Teardown will be called before the test case is over regardless + // of if the test succeeded or failed. This should return an error + // in the case that the test can't guarantee all resources were + // properly cleaned up. + Teardown TestTeardownFunc +} + +// TestCheckFunc is the callback used for Check in TestStep. +type TestCheckFunc func([]packer.Artifact) error + +// TestTeardownFunc is the callback used for Teardown in TestCase. +type TestTeardownFunc func() error + +// TestT is the interface used to handle the test lifecycle of a test. +// +// Users should just use a *testing.T object, which implements this. +type TestT interface { + Error(args ...interface{}) + Fatal(args ...interface{}) + Skip(args ...interface{}) +} + +// Test performs an acceptance test on a backend with the given test case. +// +// Tests are not run unless an environmental variable "TF_ACC" is +// set to some non-empty value. This is to avoid test cases surprising +// a user by creating real resources. +// +// Tests will fail unless the verbose flag (`go test -v`, or explicitly +// the "-test.v" flag) is set. Because some acceptance tests take quite +// long, we require the verbose flag so users are able to see progress +// output. +func Test(t TestT, c TestCase) { + // We only run acceptance tests if an env var is set because they're + // slow and generally require some outside configuration. + if os.Getenv(TestEnvVar) == "" { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", + TestEnvVar)) + return + } + + // We require verbose mode so that the user knows what is going on. + if !testTesting && !testing.Verbose() { + t.Fatal("Acceptance tests must be run with the -v flag on tests") + return + } + + // Run the PreCheck if we have it + if c.PreCheck != nil { + c.PreCheck() + } + + // Parse the template + log.Printf("[DEBUG] Parsing template: %s", c.Template) + tpl, err := template.ParseFile(c.Template) + if err != nil { + t.Fatal(fmt.Sprintf("Failed to parse template: %s", err)) + return + } + + // Build the core + log.Printf("[DEBUG] Initializing core...") + core, err := packer.NewCore(&packer.CoreConfig{ + Components: packer.ComponentFinder{ + Builder: func(n string) (packer.Builder, error) { + if n == "test" { + return c.Builder, nil + } + + return nil, nil + }, + }, + Template: tpl, + }) + if err != nil { + t.Fatal(fmt.Sprintf("Failed to init core: %s", err)) + return + } + + // Get the build + log.Printf("[DEBUG] Retrieving 'test' build") + build, err := core.Build("test") + if err != nil { + t.Fatal(fmt.Sprintf("Failed to get 'test' build: %s", err)) + return + } + + // Prepare it + log.Printf("[DEBUG] Preparing 'test' build") + warnings, err := build.Prepare() + if err != nil { + t.Fatal(fmt.Sprintf("Prepare error: %s", err)) + return + } + if len(warnings) > 0 { + t.Fatal(fmt.Sprintf( + "Prepare warnings:\n\n%s", + strings.Join(warnings, "\n"))) + return + } + + // Run it! + log.Printf("[DEBUG] Running 'test' build") + cache := &packer.FileCache{CacheDir: os.TempDir()} + ui := &packer.BasicUi{ + Reader: os.Stdin, + Writer: os.Stdout, + ErrorWriter: os.Stdout, + } + artifacts, err := build.Run(ui, cache) + if err != nil { + t.Fatal(fmt.Sprintf("Run error:\n\n%s", err)) + return + } + + // Check function + if c.Check != nil { + log.Printf("[DEBUG] Running check function") + if err := c.Check(artifacts); err != nil { + t.Fatal(fmt.Sprintf("Check error:\n\n%s", err)) + return + } + } + + // Teardown + if c.Teardown != nil { + log.Printf("[DEBUG] Running teardown function") + if err := c.Teardown(); err != nil { + t.Fatal(fmt.Sprintf("Teardown failure:\n\n%s", err)) + return + } + } +} + +// This is for unit tests of this package. +var testTesting = false diff --git a/helper/builder/testing/testing_test.go b/helper/builder/testing/testing_test.go new file mode 100644 index 000000000..d0e4dbc60 --- /dev/null +++ b/helper/builder/testing/testing_test.go @@ -0,0 +1,88 @@ +package testing + +import ( + "os" + "testing" +) + +func init() { + testTesting = true + + if err := os.Setenv(TestEnvVar, "1"); err != nil { + panic(err) + } +} + +func TestTest_noEnv(t *testing.T) { + // Unset the variable + if err := os.Setenv(TestEnvVar, ""); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Setenv(TestEnvVar, "1") + + mt := new(mockT) + Test(mt, TestCase{}) + + if !mt.SkipCalled { + t.Fatal("skip not called") + } +} + +func TestTest_preCheck(t *testing.T) { + called := false + + mt := new(mockT) + Test(mt, TestCase{ + PreCheck: func() { called = true }, + }) + + if !called { + t.Fatal("precheck should be called") + } +} + +// mockT implements TestT for testing +type mockT struct { + ErrorCalled bool + ErrorArgs []interface{} + FatalCalled bool + FatalArgs []interface{} + SkipCalled bool + SkipArgs []interface{} + + f bool +} + +func (t *mockT) Error(args ...interface{}) { + t.ErrorCalled = true + t.ErrorArgs = args + t.f = true +} + +func (t *mockT) Fatal(args ...interface{}) { + t.FatalCalled = true + t.FatalArgs = args + t.f = true +} + +func (t *mockT) Skip(args ...interface{}) { + t.SkipCalled = true + t.SkipArgs = args + t.f = true +} + +func (t *mockT) failed() bool { + return t.f +} + +func (t *mockT) failMessage() string { + if t.FatalCalled { + return t.FatalArgs[0].(string) + } else if t.ErrorCalled { + return t.ErrorArgs[0].(string) + } else if t.SkipCalled { + return t.SkipArgs[0].(string) + } + + return "unknown" +} From 0e1fd516c2907e7a94f127c558f77f32dd1409c6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 13:26:22 -0700 Subject: [PATCH 113/956] Add testacc to Makefile --- Makefile | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index d0d67dab8..d440fd501 100644 --- a/Makefile +++ b/Makefile @@ -10,10 +10,23 @@ bin: dev: @TF_DEV=1 sh -c "$(CURDIR)/scripts/build.sh" +# generate runs `go generate` to build the dynamically generated +# source files. +generate: + go generate ./... + test: go test $(TEST) $(TESTARGS) -timeout=10s @$(MAKE) vet +# testacc runs acceptance tests +testacc: generate + @if [ "$(TEST)" = "./..." ]; then \ + echo "ERROR: Set TEST to a specific package"; \ + exit 1; \ + fi + PACKER_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 45m + testrace: go test -race $(TEST) $(TESTARGS) @@ -30,4 +43,4 @@ vet: echo "and fix them if necessary before submitting the code for reviewal."; \ fi -.PHONY: bin default test updatedeps vet +.PHONY: bin default generate test testacc updatedeps vet From 49e29d5a6b47a97d7e21a4373a5a38aaa500c1d5 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 13:47:17 -0700 Subject: [PATCH 114/956] builder/testing: delete artifacts --- helper/builder/testing/testing.go | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/helper/builder/testing/testing.go b/helper/builder/testing/testing.go index bc5cfa236..29200e108 100644 --- a/helper/builder/testing/testing.go +++ b/helper/builder/testing/testing.go @@ -2,6 +2,7 @@ package testing import ( "fmt" + "io/ioutil" "log" "os" "strings" @@ -27,8 +28,7 @@ type TestCase struct { // as the "test" builder in the template. Builder packer.Builder - // Template is a path to a text template. We use a text file - // so we can use the entire machinery to test this builder. + // Template is the template contents to use. Template string // Check is called after this step is executed in order to test that @@ -90,8 +90,8 @@ func Test(t TestT, c TestCase) { } // Parse the template - log.Printf("[DEBUG] Parsing template: %s", c.Template) - tpl, err := template.ParseFile(c.Template) + log.Printf("[DEBUG] Parsing template...") + tpl, err := template.Parse(strings.NewReader(c.Template)) if err != nil { t.Fatal(fmt.Sprintf("Failed to parse template: %s", err)) return @@ -138,18 +138,19 @@ func Test(t TestT, c TestCase) { return } - // Run it! + // Run it! We use a temporary directory for caching and discard + // any UI output. We discard since it shows up in logs anyways. log.Printf("[DEBUG] Running 'test' build") cache := &packer.FileCache{CacheDir: os.TempDir()} ui := &packer.BasicUi{ Reader: os.Stdin, - Writer: os.Stdout, - ErrorWriter: os.Stdout, + Writer: ioutil.Discard, + ErrorWriter: ioutil.Discard, } artifacts, err := build.Run(ui, cache) if err != nil { t.Fatal(fmt.Sprintf("Run error:\n\n%s", err)) - return + goto TEARDOWN } // Check function @@ -157,7 +158,17 @@ func Test(t TestT, c TestCase) { log.Printf("[DEBUG] Running check function") if err := c.Check(artifacts); err != nil { t.Fatal(fmt.Sprintf("Check error:\n\n%s", err)) - return + goto TEARDOWN + } + } + +TEARDOWN: + // Delete all artifacts + for _, a := range artifacts { + if err := a.Destroy(); err != nil { + t.Error(fmt.Sprintf( + "!!! ERROR REMOVING ARTIFACT '%s': %s !!!", + a.String(), err)) } } From fea8bb3566750c811c853654a7fdb4e306ffcaca Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 13:47:24 -0700 Subject: [PATCH 115/956] amazon/ebs: basic acceptance test --- builder/amazon/ebs/builder_acc_test.go | 39 ++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 builder/amazon/ebs/builder_acc_test.go diff --git a/builder/amazon/ebs/builder_acc_test.go b/builder/amazon/ebs/builder_acc_test.go new file mode 100644 index 000000000..19af43512 --- /dev/null +++ b/builder/amazon/ebs/builder_acc_test.go @@ -0,0 +1,39 @@ +package ebs + +import ( + "os" + "testing" + + builderT "github.com/mitchellh/packer/helper/builder/testing" +) + +func TestBuilderAcc_basic(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Builder: &Builder{}, + Template: testBuilderAccBasic, + }) +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("AWS_ACCESS_KEY_ID"); v == "" { + t.Fatal("AWS_ACCESS_KEY_ID must be set for acceptance tests") + } + + if v := os.Getenv("AWS_SECRET_ACCESS_KEY"); v == "" { + t.Fatal("AWS_SECRET_ACCESS_KEY must be set for acceptance tests") + } +} + +const testBuilderAccBasic = ` +{ + "builders": [{ + "type": "test", + "region": "us-east-1", + "instance_type": "m3.medium", + "source_ami": "ami-76b2a71e", + "ssh_username": "ubuntu", + "ami_name": "packer-test {{timestamp}}" + }] +} +` From 4ea821737a7de4ffe21defcf4322caa6937ea048 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 May 2015 13:50:45 -0700 Subject: [PATCH 116/956] update README for acceptance tests --- README.md | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/README.md b/README.md index 46ceb377a..ab069a213 100644 --- a/README.md +++ b/README.md @@ -121,3 +121,35 @@ package by specifying the `TEST` variable. For example below, only $ make test TEST=./packer ... + +### Acceptance Tests + +Packer has comprehensive [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing) +covering the builders of Packer. + +If you're working on a feature of a builder or a new builder and want +verify it is functioning (and also hasn't broken anything else), we recommend +running the acceptance tests. + +**Warning:** The acceptance tests create/destroy/modify *real resources*, which +may incur real costs in some cases. In the presence of a bug, it is technically +possible that broken backends could leave dangling data behind. Therefore, +please run the acceptance tests at your own risk. At the very least, +we recommend running them in their own private account for whatever builder +you're testing. + +To run the acceptance tests, invoke `make testacc`: + +```sh +$ make testacc TEST=./builder/amazon/ebs +... +``` + +The `TEST` variable is required, and you should specify the folder where the +backend is. The `TESTARGS` variable is recommended to filter down to a specific +resource to test, since testing all of them at once can sometimes take a very +long time. + +Acceptance tests typically require other environment variables to be set for +things such as access keys. The test itself should error early and tell +you what to set, so it is not documented here. From 9db0cced8ac5d1b37d8076d1c1cb7a9eb0172005 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 26 May 2015 14:01:50 -0700 Subject: [PATCH 117/956] Removed go 1.2 from Travis.ci config --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index b0de812ea..d67d52bfa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,6 @@ sudo: false language: go go: - - 1.2 - 1.3 - 1.4 - tip From 728c5cea6e9230ad7458d0c6954d6447b090884e Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 26 May 2015 14:12:04 -0700 Subject: [PATCH 118/956] Also deprecate go version 1.3 --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index d67d52bfa..00f3361b9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,6 @@ sudo: false language: go go: - - 1.3 - 1.4 - tip From 41a6fe9fda564e4add99254d3f1782a46fd4b2ff Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 10:07:51 -0700 Subject: [PATCH 119/956] template/interpolate: RenderMap to render a complex structure --- template/interpolate/render.go | 273 ++++++++++++++++++++++++++++ template/interpolate/render_test.go | 92 ++++++++++ 2 files changed, 365 insertions(+) create mode 100644 template/interpolate/render.go create mode 100644 template/interpolate/render_test.go diff --git a/template/interpolate/render.go b/template/interpolate/render.go new file mode 100644 index 000000000..0225f762c --- /dev/null +++ b/template/interpolate/render.go @@ -0,0 +1,273 @@ +package interpolate + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "github.com/mitchellh/mapstructure" + "github.com/mitchellh/reflectwalk" +) + +// RenderFilter is an option for filtering what gets rendered and +// doesn't within an interface. +type RenderFilter struct { + Include []string + + once sync.Once + includeSet map[string]struct{} +} + +// RenderMap renders all the strings in the given interface. The +// interface must decode into a map[string]interface{}, but is left +// as an interface{} type to ease backwards compatibility with the way +// arguments are passed around in Packer. +func RenderMap(v interface{}, ctx *Context, f *RenderFilter) (map[string]interface{}, error) { + // First decode it into the map + var m map[string]interface{} + if err := mapstructure.Decode(v, &m); err != nil { + return nil, err + } + + // Now go through each value and render it + for k, raw := range m { + if !f.include(k) { + continue + } + + raw, err := renderInterface(raw, ctx) + if err != nil { + return nil, fmt.Errorf("render '%s': %s", k, err) + } + + m[k] = raw + } + + return m, nil +} + +func renderInterface(v interface{}, ctx *Context) (interface{}, error) { + f := func(v string) (string, error) { + return Render(v, ctx) + } + + walker := &renderWalker{ + F: f, + Replace: true, + } + err := reflectwalk.Walk(v, walker) + if err != nil { + return nil, err + } + + if walker.Top != nil { + v = walker.Top + } + return v, nil +} + +// Include checks whether a key should be included. +func (f *RenderFilter) include(k string) bool { + if f == nil { + return true + } + + f.once.Do(f.init) + _, ok := f.includeSet[k] + return ok +} + +func (f *RenderFilter) init() { + f.includeSet = make(map[string]struct{}) + for _, v := range f.Include { + f.includeSet[v] = struct{}{} + } +} + +// renderWalker implements interfaces for the reflectwalk package +// (github.com/mitchellh/reflectwalk) that can be used to automatically +// execute a callback for an interpolation. +type renderWalker struct { + // F is the function to call for every interpolation. It can be nil. + // + // If Replace is true, then the return value of F will be used to + // replace the interpolation. + F renderWalkerFunc + Replace bool + + // ContextF is an advanced version of F that also receives the + // location of where it is in the structure. This lets you do + // context-aware validation. + ContextF renderWalkerContextFunc + + // Top is the top value of the walk. This might get replaced if the + // top value needs to be modified. It is valid to read after any walk. + // If it is nil, it means the top wasn't replaced. + Top interface{} + + key []string + lastValue reflect.Value + loc reflectwalk.Location + cs []reflect.Value + csKey []reflect.Value + csData interface{} + sliceIndex int + unknownKeys []string +} + +// renderWalkerFunc is the callback called by interpolationWalk. +// It is called with any interpolation found. It should return a value +// to replace the interpolation with, along with any errors. +// +// If Replace is set to false in renderWalker, then the replace +// value can be anything as it will have no effect. +type renderWalkerFunc func(string) (string, error) + +// renderWalkerContextFunc is called by interpolationWalk if +// ContextF is set. This receives both the interpolation and the location +// where the interpolation is. +// +// This callback can be used to validate the location of the interpolation +// within the configuration. +type renderWalkerContextFunc func(reflectwalk.Location, string) + +func (w *renderWalker) Enter(loc reflectwalk.Location) error { + w.loc = loc + return nil +} + +func (w *renderWalker) Exit(loc reflectwalk.Location) error { + w.loc = reflectwalk.None + + switch loc { + case reflectwalk.Map: + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + w.key = w.key[:len(w.key)-1] + w.csKey = w.csKey[:len(w.csKey)-1] + case reflectwalk.Slice: + // Split any values that need to be split + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.SliceElem: + w.csKey = w.csKey[:len(w.csKey)-1] + } + + return nil +} + +func (w *renderWalker) Map(m reflect.Value) error { + w.cs = append(w.cs, m) + return nil +} + +func (w *renderWalker) MapElem(m, k, v reflect.Value) error { + w.csData = k + w.csKey = append(w.csKey, k) + w.key = append(w.key, k.String()) + w.lastValue = v + return nil +} + +func (w *renderWalker) Slice(s reflect.Value) error { + w.cs = append(w.cs, s) + return nil +} + +func (w *renderWalker) SliceElem(i int, elem reflect.Value) error { + w.csKey = append(w.csKey, reflect.ValueOf(i)) + w.sliceIndex = i + return nil +} + +func (w *renderWalker) Primitive(v reflect.Value) error { + setV := v + + // We only care about strings + if v.Kind() == reflect.Interface { + setV = v + v = v.Elem() + } + if v.Kind() != reflect.String { + return nil + } + + strV := v.String() + if w.ContextF != nil { + w.ContextF(w.loc, strV) + } + + if w.F == nil { + return nil + } + + replaceVal, err := w.F(strV) + if err != nil { + return fmt.Errorf( + "%s in:\n\n%s", + err, v.String()) + } + + if w.Replace { + resultVal := reflect.ValueOf(replaceVal) + switch w.loc { + case reflectwalk.MapKey: + m := w.cs[len(w.cs)-1] + + // Delete the old value + var zero reflect.Value + m.SetMapIndex(w.csData.(reflect.Value), zero) + + // Set the new key with the existing value + m.SetMapIndex(resultVal, w.lastValue) + + // Set the key to be the new key + w.csData = resultVal + case reflectwalk.MapValue: + // If we're in a map, then the only way to set a map value is + // to set it directly. + m := w.cs[len(w.cs)-1] + mk := w.csData.(reflect.Value) + m.SetMapIndex(mk, resultVal) + case reflectwalk.WalkLoc: + // At the root element, we can't write that, so we just save it + w.Top = resultVal.Interface() + default: + // Otherwise, we should be addressable + setV.Set(resultVal) + } + } + + return nil +} + +func (w *renderWalker) removeCurrent() { + // Append the key to the unknown keys + w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) + + for i := 1; i <= len(w.cs); i++ { + c := w.cs[len(w.cs)-i] + switch c.Kind() { + case reflect.Map: + // Zero value so that we delete the map key + var val reflect.Value + + // Get the key and delete it + k := w.csData.(reflect.Value) + c.SetMapIndex(k, val) + return + } + } + + panic("No container found for removeCurrent") +} + +func (w *renderWalker) replaceCurrent(v reflect.Value) { + c := w.cs[len(w.cs)-2] + switch c.Kind() { + case reflect.Map: + // Get the key and delete it + k := w.csKey[len(w.csKey)-1] + c.SetMapIndex(k, v) + } +} diff --git a/template/interpolate/render_test.go b/template/interpolate/render_test.go new file mode 100644 index 000000000..f6e466029 --- /dev/null +++ b/template/interpolate/render_test.go @@ -0,0 +1,92 @@ +package interpolate + +import ( + "reflect" + "testing" +) + +func TestRenderMap(t *testing.T) { + cases := map[string]struct { + Input interface{} + Output interface{} + Filter *RenderFilter + }{ + "basic": { + map[string]interface{}{ + "foo": "{{upper `bar`}}", + }, + map[string]interface{}{ + "foo": "BAR", + }, + nil, + }, + + "map keys shouldn't be interpolated": { + map[string]interface{}{ + "{{foo}}": "{{upper `bar`}}", + }, + map[string]interface{}{ + "{{foo}}": "BAR", + }, + nil, + }, + + "nested values": { + map[string]interface{}{ + "foo": map[string]string{ + "bar": "{{upper `baz`}}", + }, + }, + map[string]interface{}{ + "foo": map[string]string{ + "bar": "BAZ", + }, + }, + nil, + }, + + "nested value keys": { + map[string]interface{}{ + "foo": map[string]string{ + "{{upper `bar`}}": "{{upper `baz`}}", + }, + }, + map[string]interface{}{ + "foo": map[string]string{ + "BAR": "BAZ", + }, + }, + nil, + }, + + "filter": { + map[string]interface{}{ + "bar": "{{upper `baz`}}", + "foo": map[string]string{ + "{{upper `bar`}}": "{{upper `baz`}}", + }, + }, + map[string]interface{}{ + "bar": "BAZ", + "foo": map[string]string{ + "{{upper `bar`}}": "{{upper `baz`}}", + }, + }, + &RenderFilter{ + Include: []string{"bar"}, + }, + }, + } + + ctx := &Context{} + for k, tc := range cases { + actual, err := RenderMap(tc.Input, ctx, tc.Filter) + if err != nil { + t.Fatalf("err: %s\n\n%s", k, err) + } + + if !reflect.DeepEqual(actual, tc.Output) { + t.Fatalf("err: %s\n\n%#v\n\n%#v", k, actual, tc.Output) + } + } +} From 9d2e9268081d4d9e517a1a9782da5fa85a024a98 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 10:09:11 -0700 Subject: [PATCH 120/956] template/interpolate: filter is case insensitive --- template/interpolate/render.go | 4 ++-- template/interpolate/render_test.go | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/template/interpolate/render.go b/template/interpolate/render.go index 0225f762c..9fa1c0f7d 100644 --- a/template/interpolate/render.go +++ b/template/interpolate/render.go @@ -74,14 +74,14 @@ func (f *RenderFilter) include(k string) bool { } f.once.Do(f.init) - _, ok := f.includeSet[k] + _, ok := f.includeSet[strings.ToLower(k)] return ok } func (f *RenderFilter) init() { f.includeSet = make(map[string]struct{}) for _, v := range f.Include { - f.includeSet[v] = struct{}{} + f.includeSet[strings.ToLower(v)] = struct{}{} } } diff --git a/template/interpolate/render_test.go b/template/interpolate/render_test.go index f6e466029..60a88f6fb 100644 --- a/template/interpolate/render_test.go +++ b/template/interpolate/render_test.go @@ -76,6 +76,24 @@ func TestRenderMap(t *testing.T) { Include: []string{"bar"}, }, }, + + "filter case-insensitive": { + map[string]interface{}{ + "bar": "{{upper `baz`}}", + "foo": map[string]string{ + "{{upper `bar`}}": "{{upper `baz`}}", + }, + }, + map[string]interface{}{ + "bar": "BAZ", + "foo": map[string]string{ + "{{upper `bar`}}": "{{upper `baz`}}", + }, + }, + &RenderFilter{ + Include: []string{"baR"}, + }, + }, } ctx := &Context{} From 71932cccc9c5aaa7b6c16a730615259b486cb6c8 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 10:10:58 -0700 Subject: [PATCH 121/956] template/interpolate: export RenderInterface --- template/interpolate/render.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/template/interpolate/render.go b/template/interpolate/render.go index 9fa1c0f7d..a16ae94e9 100644 --- a/template/interpolate/render.go +++ b/template/interpolate/render.go @@ -36,7 +36,7 @@ func RenderMap(v interface{}, ctx *Context, f *RenderFilter) (map[string]interfa continue } - raw, err := renderInterface(raw, ctx) + raw, err := RenderInterface(raw, ctx) if err != nil { return nil, fmt.Errorf("render '%s': %s", k, err) } @@ -47,7 +47,8 @@ func RenderMap(v interface{}, ctx *Context, f *RenderFilter) (map[string]interfa return m, nil } -func renderInterface(v interface{}, ctx *Context) (interface{}, error) { +// RenderInterface renders any value and returns the resulting value. +func RenderInterface(v interface{}, ctx *Context) (interface{}, error) { f := func(v string) (string, error) { return Render(v, ctx) } From 241f76b5b1440bbdac3c95b3c6b21bf594b68b6a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 10:44:10 -0700 Subject: [PATCH 122/956] helper/config: decoder --- helper/config/decode.go | 108 +++++++++++++++++++++++++++++++++++ helper/config/decode_test.go | 86 ++++++++++++++++++++++++++++ 2 files changed, 194 insertions(+) create mode 100644 helper/config/decode.go create mode 100644 helper/config/decode_test.go diff --git a/helper/config/decode.go b/helper/config/decode.go new file mode 100644 index 000000000..b9dafc915 --- /dev/null +++ b/helper/config/decode.go @@ -0,0 +1,108 @@ +package config + +import ( + "reflect" + + "github.com/mitchellh/mapstructure" + "github.com/mitchellh/packer/template/interpolate" +) + +// DecodeOpts are the options for decoding configuration. +type DecodeOpts struct { + // Interpolate, if true, will automatically interpolate the + // configuration with the given InterpolateContext. User variables + // will be automatically detected and added in-place to the given + // context. + Interpolate bool + InterpolateContext *interpolate.Context + InterpolateFilter *interpolate.RenderFilter +} + +// Decode decodes the configuration into the target and optionally +// automatically interpolates all the configuration as it goes. +func Decode(target interface{}, config *DecodeOpts, raws ...interface{}) error { + if config == nil { + config = &DecodeOpts{Interpolate: true} + } + + // Interpolate first + if config.Interpolate { + // Detect user variables from the raws and merge them into our context + ctx, err := DetectContext(raws...) + if err != nil { + return err + } + if config.InterpolateContext == nil { + config.InterpolateContext = ctx + } else { + config.InterpolateContext.UserVariables = ctx.UserVariables + } + ctx = config.InterpolateContext + + // Render everything + for i, raw := range raws { + m, err := interpolate.RenderMap(raw, ctx, config.InterpolateFilter) + if err != nil { + return err + } + + raws[i] = m + } + } + + // Build our decoder + var md mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Result: target, + Metadata: &md, + WeaklyTypedInput: true, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + uint8ToStringHook, + mapstructure.StringToSliceHookFunc(","), + ), + }) + if err != nil { + return err + } + for _, raw := range raws { + if err := decoder.Decode(raw); err != nil { + return err + } + } + + return nil +} + +// DetectContext builds a base interpolate.Context, automatically +// detecting things like user variables from the raw configuration params. +func DetectContext(raws ...interface{}) (*interpolate.Context, error) { + var s struct { + Vars map[string]string `mapstructure:"packer_user_variables"` + } + + for _, r := range raws { + if err := mapstructure.Decode(r, &s); err != nil { + return nil, err + } + } + + return &interpolate.Context{ + UserVariables: s.Vars, + }, nil +} + +func uint8ToStringHook(f reflect.Kind, t reflect.Kind, v interface{}) (interface{}, error) { + // We need to convert []uint8 to string. We have to do this + // because internally Packer uses MsgPack for RPC and the MsgPack + // codec turns strings into []uint8 + if f == reflect.Slice && t == reflect.String { + dataVal := reflect.ValueOf(v) + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + v = string(dataVal.Interface().([]uint8)) + } + } + + return v, nil +} diff --git a/helper/config/decode_test.go b/helper/config/decode_test.go new file mode 100644 index 000000000..43aa615a7 --- /dev/null +++ b/helper/config/decode_test.go @@ -0,0 +1,86 @@ +package config + +import ( + "reflect" + "testing" + + "github.com/mitchellh/packer/template/interpolate" +) + +func TestDecode(t *testing.T) { + type Target struct { + Name string + Address string + } + + cases := map[string]struct { + Input []interface{} + Output *Target + Opts *DecodeOpts + }{ + "basic": { + []interface{}{ + map[string]interface{}{ + "name": "bar", + }, + }, + &Target{ + Name: "bar", + }, + nil, + }, + + "variables": { + []interface{}{ + map[string]interface{}{ + "name": "{{user `name`}}", + }, + map[string]interface{}{ + "packer_user_variables": map[string]string{ + "name": "bar", + }, + }, + }, + &Target{ + Name: "bar", + }, + nil, + }, + + "filter": { + []interface{}{ + map[string]interface{}{ + "name": "{{user `name`}}", + "address": "{{user `name`}}", + }, + map[string]interface{}{ + "packer_user_variables": map[string]string{ + "name": "bar", + }, + }, + }, + &Target{ + Name: "bar", + Address: "{{user `name`}}", + }, + &DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Include: []string{"name"}, + }, + }, + }, + } + + for k, tc := range cases { + var result Target + err := Decode(&result, tc.Opts, tc.Input...) + if err != nil { + t.Fatalf("err: %s\n\n%s", k, err) + } + + if !reflect.DeepEqual(&result, tc.Output) { + t.Fatalf("bad:\n\n%#v\n\n%#v", &result, tc.Output) + } + } +} From becd6dacd71a64df6d60b34cebdba44360272271 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 11:10:09 -0700 Subject: [PATCH 123/956] template/interpolate: support custom functions --- template/interpolate/funcs.go | 5 +++++ template/interpolate/i.go | 3 +++ 2 files changed, 8 insertions(+) diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go index 1ddbbe167..ca51fea81 100644 --- a/template/interpolate/funcs.go +++ b/template/interpolate/funcs.go @@ -45,6 +45,11 @@ func Funcs(ctx *Context) template.FuncMap { for k, v := range FuncGens { result[k] = v(ctx) } + if ctx != nil { + for k, v := range ctx.Funcs { + result[k] = v + } + } return template.FuncMap(result) } diff --git a/template/interpolate/i.go b/template/interpolate/i.go index d52653fcf..fa3a31395 100644 --- a/template/interpolate/i.go +++ b/template/interpolate/i.go @@ -11,6 +11,9 @@ type Context struct { // Data is the data for the template that is available Data interface{} + // Funcs are extra functions available in the template + Funcs map[string]interface{} + // UserVariables is the mapping of user variables that the // "user" function reads from. UserVariables map[string]string From bdb9bd7dc56afde764a4133769442d107e369d43 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 11:34:47 -0700 Subject: [PATCH 124/956] helper/config: error if unused keys --- helper/config/decode.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/helper/config/decode.go b/helper/config/decode.go index b9dafc915..af4ca1aa2 100644 --- a/helper/config/decode.go +++ b/helper/config/decode.go @@ -1,8 +1,12 @@ package config import ( + "fmt" "reflect" + "sort" + "strings" + "github.com/hashicorp/go-multierror" "github.com/mitchellh/mapstructure" "github.com/mitchellh/packer/template/interpolate" ) @@ -70,6 +74,21 @@ func Decode(target interface{}, config *DecodeOpts, raws ...interface{}) error { } } + // If we have unused keys, it is an error + if len(md.Unused) > 0 { + var err error + sort.Strings(md.Unused) + for _, unused := range md.Unused { + if unused != "type" && !strings.HasPrefix(unused, "packer_") { + err = multierror.Append(err, fmt.Errorf( + "unknown configuration key: %q", unused)) + } + } + if err != nil { + return err + } + } + return nil } From 50d7c598e99a6ee921087d3006375cc0c6099156 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 11:35:56 -0700 Subject: [PATCH 125/956] amazon/ebs: use new interpolation stuff --- builder/amazon/common/access_config.go | 32 ++------- builder/amazon/common/ami_config.go | 68 +------------------ builder/amazon/common/block_device.go | 47 +------------ builder/amazon/common/run_config.go | 79 ++-------------------- builder/amazon/ebs/builder.go | 30 ++++---- builder/amazon/ebs/step_create_ami.go | 2 +- builder/amazon/ebs/step_modify_instance.go | 2 +- 7 files changed, 31 insertions(+), 229 deletions(-) diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go index a9a60d7cf..559726c9f 100644 --- a/builder/amazon/common/access_config.go +++ b/builder/amazon/common/access_config.go @@ -2,10 +2,11 @@ package common import ( "fmt" - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/packer/packer" "strings" "unicode" + + "github.com/mitchellh/goamz/aws" + "github.com/mitchellh/packer/template/interpolate" ) // AccessConfig is for common configuration related to AWS access @@ -49,31 +50,8 @@ func (c *AccessConfig) Region() (aws.Region, error) { return aws.Regions[region], nil } -func (c *AccessConfig) Prepare(t *packer.ConfigTemplate) []error { - if t == nil { - var err error - t, err = packer.NewConfigTemplate() - if err != nil { - return []error{err} - } - } - - templates := map[string]*string{ - "access_key": &c.AccessKey, - "secret_key": &c.SecretKey, - "region": &c.RawRegion, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - +func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error { + var errs []error if c.RawRegion != "" { if _, ok := aws.Regions[c.RawRegion]; !ok { errs = append(errs, fmt.Errorf("Unknown region: %s", c.RawRegion)) diff --git a/builder/amazon/common/ami_config.go b/builder/amazon/common/ami_config.go index 91c2d12d0..aa515c3db 100644 --- a/builder/amazon/common/ami_config.go +++ b/builder/amazon/common/ami_config.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // AMIConfig is for common configuration related to creating AMIs. @@ -20,49 +20,8 @@ type AMIConfig struct { AMIEnhancedNetworking bool `mapstructure:"enhanced_networking"` } -func (c *AMIConfig) Prepare(t *packer.ConfigTemplate) []error { - if t == nil { - var err error - t, err = packer.NewConfigTemplate() - if err != nil { - return []error{err} - } - } - - templates := map[string]*string{ - "ami_name": &c.AMIName, - "ami_description": &c.AMIDescription, - "ami_virtualization_type": &c.AMIVirtType, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - sliceTemplates := map[string][]string{ - "ami_users": c.AMIUsers, - "ami_groups": c.AMIGroups, - "ami_product_codes": c.AMIProductCodes, - "ami_regions": c.AMIRegions, - } - - for n, slice := range sliceTemplates { - for i, elem := range slice { - var err error - slice[i], err = t.Process(elem, nil) - if err != nil { - errs = append( - errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err)) - } - } - } - +func (c *AMIConfig) Prepare(ctx *interpolate.Context) []error { + var errs []error if c.AMIName == "" { errs = append(errs, fmt.Errorf("ami_name must be specified")) } @@ -92,27 +51,6 @@ func (c *AMIConfig) Prepare(t *packer.ConfigTemplate) []error { c.AMIRegions = regions } - newTags := make(map[string]string) - for k, v := range c.AMITags { - k, err := t.Process(k, nil) - if err != nil { - errs = append(errs, - fmt.Errorf("Error processing tag key %s: %s", k, err)) - continue - } - - v, err := t.Process(v, nil) - if err != nil { - errs = append(errs, - fmt.Errorf("Error processing tag value '%s': %s", v, err)) - continue - } - - newTags[k] = v - } - - c.AMITags = newTags - if len(errs) > 0 { return errs } diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index 9557cc579..a8402f2d7 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -1,10 +1,8 @@ package common import ( - "fmt" - "github.com/mitchellh/goamz/ec2" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // BlockDevice @@ -44,48 +42,7 @@ func buildBlockDevices(b []BlockDevice) []ec2.BlockDeviceMapping { return blockDevices } -func (b *BlockDevices) Prepare(t *packer.ConfigTemplate) []error { - if t == nil { - var err error - t, err = packer.NewConfigTemplate() - if err != nil { - return []error{err} - } - } - - lists := map[string][]BlockDevice{ - "ami_block_device_mappings": b.AMIMappings, - "launch_block_device_mappings": b.LaunchMappings, - } - - var errs []error - for outer, bds := range lists { - for i := 0; i < len(bds); i++ { - templates := map[string]*string{ - "device_name": &bds[i].DeviceName, - "snapshot_id": &bds[i].SnapshotId, - "virtual_name": &bds[i].VirtualName, - "volume_type": &bds[i].VolumeType, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append( - errs, fmt.Errorf( - "Error processing %s[%d].%s: %s", - outer, i, n, err)) - } - } - } - } - - if len(errs) > 0 { - return errs - } - +func (b *BlockDevices) Prepare(ctx *interpolate.Context) []error { return nil } diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index a71387623..0afdeb07c 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -7,7 +7,7 @@ import ( "time" "github.com/mitchellh/packer/common/uuid" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // RunConfig contains configuration for running an instance from a source @@ -38,43 +38,7 @@ type RunConfig struct { sshTimeout time.Duration } -func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { - if t == nil { - var err error - t, err = packer.NewConfigTemplate() - if err != nil { - return []error{err} - } - } - - templates := map[string]*string{ - "iam_instance_profile": &c.IamInstanceProfile, - "instance_type": &c.InstanceType, - "spot_price": &c.SpotPrice, - "spot_price_auto_product": &c.SpotPriceAutoProduct, - "ssh_timeout": &c.RawSSHTimeout, - "ssh_username": &c.SSHUsername, - "ssh_private_key_file": &c.SSHPrivateKeyFile, - "source_ami": &c.SourceAmi, - "subnet_id": &c.SubnetId, - "temporary_key_pair_name": &c.TemporaryKeyPairName, - "vpc_id": &c.VpcId, - "availability_zone": &c.AvailabilityZone, - "user_data": &c.UserData, - "user_data_file": &c.UserDataFile, - "security_group_id": &c.SecurityGroupId, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - +func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { // Defaults if c.SSHPort == 0 { c.SSHPort = 22 @@ -90,7 +54,7 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { } // Validation - var err error + var errs []error if c.SourceAmi == "" { errs = append(errs, errors.New("A source_ami must be specified")) } @@ -127,42 +91,7 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { } } - sliceTemplates := map[string][]string{ - "security_group_ids": c.SecurityGroupIds, - } - - for n, slice := range sliceTemplates { - for i, elem := range slice { - var err error - slice[i], err = t.Process(elem, nil) - if err != nil { - errs = append( - errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err)) - } - } - } - - newTags := make(map[string]string) - for k, v := range c.RunTags { - k, err := t.Process(k, nil) - if err != nil { - errs = append(errs, - fmt.Errorf("Error processing tag key %s: %s", k, err)) - continue - } - - v, err := t.Process(v, nil) - if err != nil { - errs = append(errs, - fmt.Errorf("Error processing tag value '%s': %s", v, err)) - continue - } - - newTags[k] = v - } - - c.RunTags = newTags - + var err error c.sshTimeout, err = time.ParseDuration(c.RawSSHTimeout) if err != nil { errs = append(errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 889cc7b60..cbf1709e0 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -13,13 +13,15 @@ import ( "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // The unique ID for this builder const BuilderId = "mitchellh.amazonebs" -type config struct { +type Config struct { common.PackerConfig `mapstructure:",squash"` awscommon.AccessConfig `mapstructure:",squash"` awscommon.AMIConfig `mapstructure:",squash"` @@ -27,32 +29,30 @@ type config struct { awscommon.RunConfig `mapstructure:",squash"` tpl *packer.ConfigTemplate + ctx *interpolate.Context } type Builder struct { - config config + config Config runner multistep.Runner } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - md, err := common.DecodeConfig(&b.config, raws...) + b.config.ctx = &interpolate.Context{Funcs: awscommon.TemplateFuncs} + err := config.Decode(&b.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateContext: b.config.ctx, + }, raws...) if err != nil { return nil, err } - b.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, err - } - b.config.tpl.UserVars = b.config.PackerUserVars - b.config.tpl.Funcs(awscommon.TemplateFuncs) - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) + var errs *packer.MultiError + errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.ctx)...) if errs != nil && len(errs.Errors) > 0 { return nil, errs diff --git a/builder/amazon/ebs/step_create_ami.go b/builder/amazon/ebs/step_create_ami.go index f380ea0b1..eb8ddea7c 100644 --- a/builder/amazon/ebs/step_create_ami.go +++ b/builder/amazon/ebs/step_create_ami.go @@ -13,7 +13,7 @@ type stepCreateAMI struct { } func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(config) + config := state.Get("config").(Config) ec2conn := state.Get("ec2").(*ec2.EC2) instance := state.Get("instance").(*ec2.Instance) ui := state.Get("ui").(packer.Ui) diff --git a/builder/amazon/ebs/step_modify_instance.go b/builder/amazon/ebs/step_modify_instance.go index 21c5e7de9..8b3f19b4d 100644 --- a/builder/amazon/ebs/step_modify_instance.go +++ b/builder/amazon/ebs/step_modify_instance.go @@ -11,7 +11,7 @@ import ( type stepModifyInstance struct{} func (s *stepModifyInstance) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(config) + config := state.Get("config").(Config) ec2conn := state.Get("ec2").(*ec2.EC2) instance := state.Get("instance").(*ec2.Instance) ui := state.Get("ui").(packer.Ui) From 034e4e676ceb46bb70e98dc61ef227f3b2c61d1f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 11:47:45 -0700 Subject: [PATCH 126/956] amazon/*: use new interpolation functions --- builder/amazon/chroot/builder.go | 72 ++++++------------- builder/amazon/chroot/step_mount_device.go | 14 ++-- builder/amazon/ebs/builder.go | 1 - builder/amazon/instance/builder.go | 65 +++++------------ builder/amazon/instance/step_bundle_volume.go | 6 +- builder/amazon/instance/step_upload_bundle.go | 6 +- template/interpolate/render.go | 21 +++++- 7 files changed, 75 insertions(+), 110 deletions(-) diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index 832a26022..addf5c464 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -6,7 +6,6 @@ package chroot import ( "errors" - "fmt" "log" "runtime" @@ -14,7 +13,9 @@ import ( "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // The unique ID for this builder @@ -34,7 +35,7 @@ type Config struct { MountPath string `mapstructure:"mount_path"` SourceAmi string `mapstructure:"source_ami"` - tpl *packer.ConfigTemplate + ctx *interpolate.Context } type wrappedCommandTemplate struct { @@ -47,18 +48,21 @@ type Builder struct { } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - md, err := common.DecodeConfig(&b.config, raws...) + b.config.ctx = &interpolate.Context{Funcs: awscommon.TemplateFuncs} + err := config.Decode(&b.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateContext: b.config.ctx, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "command_wrapper", + "mount_path", + }, + }, + }, raws...) if err != nil { return nil, err } - b.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, err - } - b.config.tpl.UserVars = b.config.PackerUserVars - b.config.tpl.Funcs(awscommon.TemplateFuncs) - // Defaults if b.config.ChrootMounts == nil { b.config.ChrootMounts = make([][]string, 0) @@ -91,55 +95,22 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } // Accumulate any errors - errs := common.CheckUnusedConfig(md) - errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...) + var errs *packer.MultiError + errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.ctx)...) - for i, mounts := range b.config.ChrootMounts { + for _, mounts := range b.config.ChrootMounts { if len(mounts) != 3 { errs = packer.MultiErrorAppend( errs, errors.New("Each chroot_mounts entry should be three elements.")) break } - - for j, entry := range mounts { - b.config.ChrootMounts[i][j], err = b.config.tpl.Process(entry, nil) - if err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing chroot_mounts[%d][%d]: %s", - i, j, err)) - } - } - } - - for i, file := range b.config.CopyFiles { - var err error - b.config.CopyFiles[i], err = b.config.tpl.Process(file, nil) - if err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing copy_files[%d]: %s", - i, err)) - } } if b.config.SourceAmi == "" { errs = packer.MultiErrorAppend(errs, errors.New("source_ami is required.")) } - templates := map[string]*string{ - "device_path": &b.config.DevicePath, - "source_ami": &b.config.SourceAmi, - } - - for n, ptr := range templates { - var err error - *ptr, err = b.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - if errs != nil && len(errs.Errors) > 0 { return nil, errs } @@ -166,10 +137,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ec2conn := ec2.New(auth, region) wrappedCommand := func(command string) (string, error) { - return b.config.tpl.Process( - b.config.CommandWrapper, &wrappedCommandTemplate{ - Command: command, - }) + ctx := *b.config.ctx + ctx.Data = &wrappedCommandTemplate{Command: command} + return interpolate.Render(b.config.CommandWrapper, &ctx) } // Setup the state bag and initial state for the steps diff --git a/builder/amazon/chroot/step_mount_device.go b/builder/amazon/chroot/step_mount_device.go index 3c3d959c1..ef531ecc3 100644 --- a/builder/amazon/chroot/step_mount_device.go +++ b/builder/amazon/chroot/step_mount_device.go @@ -3,12 +3,14 @@ package chroot import ( "bytes" "fmt" - "github.com/mitchellh/goamz/ec2" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "log" "os" "path/filepath" + + "github.com/mitchellh/goamz/ec2" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type mountPathData struct { @@ -31,9 +33,9 @@ func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction { device := state.Get("device").(string) wrappedCommand := state.Get("wrappedCommand").(CommandWrapper) - mountPath, err := config.tpl.Process(config.MountPath, &mountPathData{ - Device: filepath.Base(device), - }) + ctx := *config.ctx + ctx.Data = &mountPathData{Device: filepath.Base(device)} + mountPath, err := interpolate.Render(config.MountPath, &ctx) if err != nil { err := fmt.Errorf("Error preparing mount directory: %s", err) diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index cbf1709e0..a41828e30 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -28,7 +28,6 @@ type Config struct { awscommon.BlockDevices `mapstructure:",squash"` awscommon.RunConfig `mapstructure:",squash"` - tpl *packer.ConfigTemplate ctx *interpolate.Context } diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 63b8442ac..2845aa25d 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -13,7 +13,9 @@ import ( "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // The unique ID for this builder @@ -38,7 +40,7 @@ type Config struct { X509KeyPath string `mapstructure:"x509_key_path"` X509UploadPath string `mapstructure:"x509_upload_path"` - tpl *packer.ConfigTemplate + ctx *interpolate.Context } type Builder struct { @@ -47,18 +49,21 @@ type Builder struct { } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - md, err := common.DecodeConfig(&b.config, raws...) + b.config.ctx = &interpolate.Context{Funcs: awscommon.TemplateFuncs} + err := config.Decode(&b.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateContext: b.config.ctx, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "bundle_upload_command", + "bundle_vol_command", + }, + }, + }, raws...) if err != nil { return nil, err } - b.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, err - } - b.config.tpl.UserVars = b.config.PackerUserVars - b.config.tpl.Funcs(awscommon.TemplateFuncs) - if b.config.BundleDestination == "" { b.config.BundleDestination = "/tmp" } @@ -97,43 +102,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } // Accumulate any errors - errs := common.CheckUnusedConfig(md) - errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) - - validates := map[string]*string{ - "bundle_upload_command": &b.config.BundleUploadCommand, - "bundle_vol_command": &b.config.BundleVolCommand, - } - - for n, ptr := range validates { - if err := b.config.tpl.Validate(*ptr); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error parsing %s: %s", n, err)) - } - } - - templates := map[string]*string{ - "account_id": &b.config.AccountId, - "ami_name": &b.config.AMIName, - "bundle_destination": &b.config.BundleDestination, - "bundle_prefix": &b.config.BundlePrefix, - "s3_bucket": &b.config.S3Bucket, - "x509_cert_path": &b.config.X509CertPath, - "x509_key_path": &b.config.X509KeyPath, - "x509_upload_path": &b.config.X509UploadPath, - } - - for n, ptr := range templates { - var err error - *ptr, err = b.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } + var errs *packer.MultiError + errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.ctx)...) if b.config.AccountId == "" { errs = packer.MultiErrorAppend(errs, errors.New("account_id is required")) diff --git a/builder/amazon/instance/step_bundle_volume.go b/builder/amazon/instance/step_bundle_volume.go index 736e1adb2..8fd320e4a 100644 --- a/builder/amazon/instance/step_bundle_volume.go +++ b/builder/amazon/instance/step_bundle_volume.go @@ -6,6 +6,7 @@ import ( "github.com/mitchellh/goamz/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type bundleCmdData struct { @@ -32,7 +33,7 @@ func (s *StepBundleVolume) Run(state multistep.StateBag) multistep.StepAction { // Bundle the volume var err error - config.BundleVolCommand, err = config.tpl.Process(config.BundleVolCommand, bundleCmdData{ + config.ctx.Data = bundleCmdData{ AccountId: config.AccountId, Architecture: instance.Architecture, CertPath: x509RemoteCertPath, @@ -40,7 +41,8 @@ func (s *StepBundleVolume) Run(state multistep.StateBag) multistep.StepAction { KeyPath: x509RemoteKeyPath, Prefix: config.BundlePrefix, PrivatePath: config.X509UploadPath, - }) + } + config.BundleVolCommand, err = interpolate.Render(config.BundleVolCommand, config.ctx) if err != nil { err := fmt.Errorf("Error processing bundle volume command: %s", err) state.Put("error", err) diff --git a/builder/amazon/instance/step_upload_bundle.go b/builder/amazon/instance/step_upload_bundle.go index dbf6a0c29..c30ec9415 100644 --- a/builder/amazon/instance/step_upload_bundle.go +++ b/builder/amazon/instance/step_upload_bundle.go @@ -5,6 +5,7 @@ import ( "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type uploadCmdData struct { @@ -35,14 +36,15 @@ func (s *StepUploadBundle) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - config.BundleUploadCommand, err = config.tpl.Process(config.BundleUploadCommand, uploadCmdData{ + config.ctx.Data = uploadCmdData{ AccessKey: config.AccessKey, BucketName: config.S3Bucket, BundleDirectory: config.BundleDestination, ManifestPath: manifestPath, Region: region.Name, SecretKey: config.SecretKey, - }) + } + config.BundleUploadCommand, err = interpolate.Render(config.BundleUploadCommand, config.ctx) if err != nil { err := fmt.Errorf("Error processing bundle upload command: %s", err) state.Put("error", err) diff --git a/template/interpolate/render.go b/template/interpolate/render.go index a16ae94e9..a4c33222d 100644 --- a/template/interpolate/render.go +++ b/template/interpolate/render.go @@ -14,8 +14,10 @@ import ( // doesn't within an interface. type RenderFilter struct { Include []string + Exclude []string once sync.Once + excludeSet map[string]struct{} includeSet map[string]struct{} } @@ -74,9 +76,19 @@ func (f *RenderFilter) include(k string) bool { return true } + k = strings.ToLower(k) + f.once.Do(f.init) - _, ok := f.includeSet[strings.ToLower(k)] - return ok + if len(f.includeSet) > 0 { + _, ok := f.includeSet[k] + return ok + } + if len(f.excludeSet) > 0 { + _, ok := f.excludeSet[k] + return !ok + } + + return true } func (f *RenderFilter) init() { @@ -84,6 +96,11 @@ func (f *RenderFilter) init() { for _, v := range f.Include { f.includeSet[strings.ToLower(v)] = struct{}{} } + + f.excludeSet = make(map[string]struct{}) + for _, v := range f.Exclude { + f.excludeSet[strings.ToLower(v)] = struct{}{} + } } // renderWalker implements interfaces for the reflectwalk package From 7d0f94834ed1b41fe18ebd8607fefc8ac3f6f59f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 12:50:43 -0700 Subject: [PATCH 127/956] builder/digitalocean: interpolation change --- builder/digitalocean/builder.go | 45 +++++---------------- builder/digitalocean/ssh.go | 4 +- builder/digitalocean/step_create_droplet.go | 4 +- builder/digitalocean/step_create_ssh_key.go | 2 +- builder/digitalocean/step_droplet_info.go | 2 +- builder/digitalocean/step_power_off.go | 2 +- builder/digitalocean/step_snapshot.go | 2 +- 7 files changed, 17 insertions(+), 44 deletions(-) diff --git a/builder/digitalocean/builder.go b/builder/digitalocean/builder.go index cf76f6970..57e6018a1 100644 --- a/builder/digitalocean/builder.go +++ b/builder/digitalocean/builder.go @@ -13,7 +13,9 @@ import ( "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/common/uuid" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // see https://api.digitalocean.com/images/?client_id=[client_id]&api_key=[api_key] @@ -34,7 +36,7 @@ const BuilderId = "pearkes.digitalocean" // Configuration tells the builder the credentials // to use while communicating with DO and describes the image // you are creating -type config struct { +type Config struct { common.PackerConfig `mapstructure:",squash"` ClientID string `mapstructure:"client_id"` @@ -63,29 +65,23 @@ type config struct { sshTimeout time.Duration stateTimeout time.Duration + ctx *interpolate.Context tpl *packer.ConfigTemplate } type Builder struct { - config config + config Config runner multistep.Runner } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - md, err := common.DecodeConfig(&b.config, raws...) + err := config.Decode(&b.config, &config.DecodeOpts{ + Interpolate: true, + }, raws...) if err != nil { return nil, err } - b.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, err - } - b.config.tpl.UserVars = b.config.PackerUserVars - - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - // Optional configuration with defaults if b.config.APIKey == "" { // Default to environment variable for api_key, if it exists @@ -163,30 +159,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.RawStateTimeout = "6m" } - templates := map[string]*string{ - "region": &b.config.Region, - "size": &b.config.Size, - "image": &b.config.Image, - "client_id": &b.config.ClientID, - "api_key": &b.config.APIKey, - "api_url": &b.config.APIURL, - "api_token": &b.config.APIToken, - "snapshot_name": &b.config.SnapshotName, - "droplet_name": &b.config.DropletName, - "ssh_username": &b.config.SSHUsername, - "ssh_timeout": &b.config.RawSSHTimeout, - "state_timeout": &b.config.RawStateTimeout, - } - - for n, ptr := range templates { - var err error - *ptr, err = b.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs *packer.MultiError if b.config.APIToken == "" { // Required configurations that will display errors if not set if b.config.ClientID == "" { diff --git a/builder/digitalocean/ssh.go b/builder/digitalocean/ssh.go index 2e9afcf7a..c53262ccc 100644 --- a/builder/digitalocean/ssh.go +++ b/builder/digitalocean/ssh.go @@ -7,13 +7,13 @@ import ( ) func sshAddress(state multistep.StateBag) (string, error) { - config := state.Get("config").(config) + config := state.Get("config").(Config) ipAddress := state.Get("droplet_ip").(string) return fmt.Sprintf("%s:%d", ipAddress, config.SSHPort), nil } func sshConfig(state multistep.StateBag) (*ssh.ClientConfig, error) { - config := state.Get("config").(config) + config := state.Get("config").(Config) privateKey := state.Get("privateKey").(string) signer, err := ssh.ParsePrivateKey([]byte(privateKey)) diff --git a/builder/digitalocean/step_create_droplet.go b/builder/digitalocean/step_create_droplet.go index 85164abaf..afb3e5814 100644 --- a/builder/digitalocean/step_create_droplet.go +++ b/builder/digitalocean/step_create_droplet.go @@ -14,7 +14,7 @@ type stepCreateDroplet struct { func (s *stepCreateDroplet) Run(state multistep.StateBag) multistep.StepAction { client := state.Get("client").(DigitalOceanClient) ui := state.Get("ui").(packer.Ui) - c := state.Get("config").(config) + c := state.Get("config").(Config) sshKeyId := state.Get("ssh_key_id").(uint) ui.Say("Creating droplet...") @@ -46,7 +46,7 @@ func (s *stepCreateDroplet) Cleanup(state multistep.StateBag) { client := state.Get("client").(DigitalOceanClient) ui := state.Get("ui").(packer.Ui) - c := state.Get("config").(config) + c := state.Get("config").(Config) // Destroy the droplet we just created ui.Say("Destroying droplet...") diff --git a/builder/digitalocean/step_create_ssh_key.go b/builder/digitalocean/step_create_ssh_key.go index 78bb474c1..db1ad9c16 100644 --- a/builder/digitalocean/step_create_ssh_key.go +++ b/builder/digitalocean/step_create_ssh_key.go @@ -73,7 +73,7 @@ func (s *stepCreateSSHKey) Cleanup(state multistep.StateBag) { client := state.Get("client").(DigitalOceanClient) ui := state.Get("ui").(packer.Ui) - c := state.Get("config").(config) + c := state.Get("config").(Config) ui.Say("Deleting temporary ssh key...") err := client.DestroyKey(s.keyId) diff --git a/builder/digitalocean/step_droplet_info.go b/builder/digitalocean/step_droplet_info.go index ea08599ce..8e9b69927 100644 --- a/builder/digitalocean/step_droplet_info.go +++ b/builder/digitalocean/step_droplet_info.go @@ -12,7 +12,7 @@ type stepDropletInfo struct{} func (s *stepDropletInfo) Run(state multistep.StateBag) multistep.StepAction { client := state.Get("client").(DigitalOceanClient) ui := state.Get("ui").(packer.Ui) - c := state.Get("config").(config) + c := state.Get("config").(Config) dropletId := state.Get("droplet_id").(uint) ui.Say("Waiting for droplet to become active...") diff --git a/builder/digitalocean/step_power_off.go b/builder/digitalocean/step_power_off.go index efead2d8a..d6ef49a22 100644 --- a/builder/digitalocean/step_power_off.go +++ b/builder/digitalocean/step_power_off.go @@ -12,7 +12,7 @@ type stepPowerOff struct{} func (s *stepPowerOff) Run(state multistep.StateBag) multistep.StepAction { client := state.Get("client").(DigitalOceanClient) - c := state.Get("config").(config) + c := state.Get("config").(Config) ui := state.Get("ui").(packer.Ui) dropletId := state.Get("droplet_id").(uint) diff --git a/builder/digitalocean/step_snapshot.go b/builder/digitalocean/step_snapshot.go index 4fdf20bc1..1903c1a34 100644 --- a/builder/digitalocean/step_snapshot.go +++ b/builder/digitalocean/step_snapshot.go @@ -14,7 +14,7 @@ type stepSnapshot struct{} func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { client := state.Get("client").(DigitalOceanClient) ui := state.Get("ui").(packer.Ui) - c := state.Get("config").(config) + c := state.Get("config").(Config) dropletId := state.Get("droplet_id").(uint) ui.Say(fmt.Sprintf("Creating snapshot: %v", c.SnapshotName)) From faf327eed09586ed18d79e15d6df6ae578437950 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 12:55:36 -0700 Subject: [PATCH 128/956] builder/docker: convert to new interpolation --- builder/docker/builder.go | 2 +- builder/docker/config.go | 57 ++++++++++----------------------- builder/docker/driver_docker.go | 7 ++-- 3 files changed, 23 insertions(+), 43 deletions(-) diff --git a/builder/docker/builder.go b/builder/docker/builder.go index 6ab6fb593..a94177070 100644 --- a/builder/docker/builder.go +++ b/builder/docker/builder.go @@ -26,7 +26,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { - driver := &DockerDriver{Tpl: b.config.tpl, Ui: ui} + driver := &DockerDriver{Ctx: b.config.ctx, Ui: ui} if err := driver.Verify(); err != nil { return nil, err } diff --git a/builder/docker/config.go b/builder/docker/config.go index dda726225..dd525497e 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -2,8 +2,12 @@ package docker import ( "fmt" + + "github.com/mitchellh/mapstructure" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type Config struct { @@ -22,23 +26,26 @@ type Config struct { LoginPassword string `mapstructure:"login_password"` LoginServer string `mapstructure:"login_server"` - tpl *packer.ConfigTemplate + ctx *interpolate.Context } func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) - md, err := common.DecodeConfig(c, raws...) + + var md mapstructure.Metadata + err := config.Decode(&c, &config.DecodeOpts{ + Metadata: &md, + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "run_command", + }, + }, + }, raws...) if err != nil { return nil, nil, err } - c.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, nil, err - } - - c.tpl.UserVars = c.PackerUserVars - // Defaults if len(c.RunCommand) == 0 { c.RunCommand = []string{ @@ -61,37 +68,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { c.Pull = true } - errs := common.CheckUnusedConfig(md) - - templates := map[string]*string{ - "export_path": &c.ExportPath, - "image": &c.Image, - "login_email": &c.LoginEmail, - "login_username": &c.LoginUsername, - "login_password": &c.LoginPassword, - "login_server": &c.LoginServer, - } - - for n, ptr := range templates { - var err error - *ptr, err = c.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - for k, v := range c.Volumes { - var err error - v, err = c.tpl.Process(v, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing volumes[%s]: %s", k, err)) - } - - c.Volumes[k] = v - } - + var errs *packer.MultiError if c.Image == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("image must be specified")) diff --git a/builder/docker/driver_docker.go b/builder/docker/driver_docker.go index f724e37ec..038aa046e 100644 --- a/builder/docker/driver_docker.go +++ b/builder/docker/driver_docker.go @@ -11,11 +11,12 @@ import ( "sync" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type DockerDriver struct { Ui packer.Ui - Tpl *packer.ConfigTemplate + Ctx *interpolate.Context l sync.Mutex } @@ -185,6 +186,8 @@ func (d *DockerDriver) StartContainer(config *ContainerConfig) (string, error) { // Build up the template data var tplData startContainerTemplate tplData.Image = config.Image + ctx := *d.Ctx + ctx.Data = &tplData // Args that we're going to pass to Docker args := []string{"run"} @@ -192,7 +195,7 @@ func (d *DockerDriver) StartContainer(config *ContainerConfig) (string, error) { args = append(args, "-v", fmt.Sprintf("%s:%s", host, guest)) } for _, v := range config.RunCommand { - v, err := d.Tpl.Process(v, &tplData) + v, err := interpolate.Render(v, &ctx) if err != nil { return "", err } From 931f3eb7bc82149a4e5363da0d7ac65447998e10 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 12:55:41 -0700 Subject: [PATCH 129/956] helper/config: support outputting metadata --- helper/config/decode.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/helper/config/decode.go b/helper/config/decode.go index af4ca1aa2..73e470d27 100644 --- a/helper/config/decode.go +++ b/helper/config/decode.go @@ -13,6 +13,9 @@ import ( // DecodeOpts are the options for decoding configuration. type DecodeOpts struct { + // Metadata, if non-nil, will be set to the metadata post-decode + Metadata *mapstructure.Metadata + // Interpolate, if true, will automatically interpolate the // configuration with the given InterpolateContext. User variables // will be automatically detected and added in-place to the given @@ -74,6 +77,11 @@ func Decode(target interface{}, config *DecodeOpts, raws ...interface{}) error { } } + // Set the metadata if it is set + if config.Metadata != nil { + *config.Metadata = md + } + // If we have unused keys, it is an error if len(md.Unused) > 0 { var err error From 5aa30caa64e106703a7808c3ce569a1449f3e60e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 12:57:48 -0700 Subject: [PATCH 130/956] builder/googlecompute: new interpolation --- builder/googlecompute/config.go | 50 ++++++++------------------------- 1 file changed, 12 insertions(+), 38 deletions(-) diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go index 96557da10..743e4745c 100644 --- a/builder/googlecompute/config.go +++ b/builder/googlecompute/config.go @@ -7,7 +7,9 @@ import ( "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/common/uuid" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // Config is the configuration structure for the GCE builder. It stores @@ -40,25 +42,23 @@ type Config struct { privateKeyBytes []byte sshTimeout time.Duration stateTimeout time.Duration - tpl *packer.ConfigTemplate + ctx *interpolate.Context } func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) - md, err := common.DecodeConfig(c, raws...) + err := config.Decode(&c, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "run_command", + }, + }, + }, raws...) if err != nil { return nil, nil, err } - c.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, nil, err - } - c.tpl.UserVars = c.PackerUserVars - - // Prepare the errors - errs := common.CheckUnusedConfig(md) - // Set defaults. if c.Network == "" { c.Network = "default" @@ -104,33 +104,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { c.SSHPort = 22 } - // Process Templates - templates := map[string]*string{ - "account_file": &c.AccountFile, - - "disk_name": &c.DiskName, - "image_name": &c.ImageName, - "image_description": &c.ImageDescription, - "instance_name": &c.InstanceName, - "machine_type": &c.MachineType, - "network": &c.Network, - "project_id": &c.ProjectId, - "source_image": &c.SourceImage, - "source_image_project_id": &c.SourceImageProjectId, - "ssh_username": &c.SSHUsername, - "ssh_timeout": &c.RawSSHTimeout, - "state_timeout": &c.RawStateTimeout, - "zone": &c.Zone, - } - - for n, ptr := range templates { - var err error - *ptr, err = c.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } + var errs *packer.MultiError // Process required parameters. if c.ProjectId == "" { From 31bdb4853c6d5aa69462c6e03f07da0635791ac5 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 12:59:14 -0700 Subject: [PATCH 131/956] builder/null: interpolations --- builder/null/config.go | 40 ++++++++++++---------------------------- 1 file changed, 12 insertions(+), 28 deletions(-) diff --git a/builder/null/config.go b/builder/null/config.go index 4823df8ae..7665aec51 100644 --- a/builder/null/config.go +++ b/builder/null/config.go @@ -3,7 +3,9 @@ package null import ( "fmt" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type Config struct { @@ -14,46 +16,28 @@ type Config struct { SSHUsername string `mapstructure:"ssh_username"` SSHPassword string `mapstructure:"ssh_password"` SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file"` - - tpl *packer.ConfigTemplate } func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) - md, err := common.DecodeConfig(c, raws...) + + err := config.Decode(&c, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "run_command", + }, + }, + }, raws...) if err != nil { return nil, nil, err } - c.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, nil, err - } - - c.tpl.UserVars = c.PackerUserVars - if c.Port == 0 { c.Port = 22 } - errs := common.CheckUnusedConfig(md) - - templates := map[string]*string{ - "host": &c.Host, - "ssh_username": &c.SSHUsername, - "ssh_password": &c.SSHPassword, - "ssh_private_key_file": &c.SSHPrivateKeyFile, - } - - for n, ptr := range templates { - var err error - *ptr, err = c.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs *packer.MultiError if c.Host == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("host must be specified")) From 3b29fa5e40ab5dd2cf90ae373cddbe2238b790f9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 13:02:57 -0700 Subject: [PATCH 132/956] builder/openstack: convert interpolation --- builder/openstack/access_config.go | 34 +++----------------------- builder/openstack/builder.go | 30 +++++++++++------------ builder/openstack/image_config.go | 26 +++----------------- builder/openstack/run_config.go | 32 +++--------------------- builder/openstack/step_create_image.go | 2 +- 5 files changed, 24 insertions(+), 100 deletions(-) diff --git a/builder/openstack/access_config.go b/builder/openstack/access_config.go index 502df161f..cb1c9d7bd 100644 --- a/builder/openstack/access_config.go +++ b/builder/openstack/access_config.go @@ -3,14 +3,14 @@ package openstack import ( "crypto/tls" "fmt" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "net/http" "net/url" "os" "strings" "github.com/mitchellh/gophercloud-fork-40444fb" + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/template/interpolate" ) // AccessConfig is for common configuration related to openstack access @@ -86,36 +86,8 @@ func (c *AccessConfig) Region() string { return common.ChooseString(c.RawRegion, os.Getenv("SDK_REGION"), os.Getenv("OS_REGION_NAME")) } -func (c *AccessConfig) Prepare(t *packer.ConfigTemplate) []error { - if t == nil { - var err error - t, err = packer.NewConfigTemplate() - if err != nil { - return []error{err} - } - } - - templates := map[string]*string{ - "username": &c.Username, - "password": &c.Password, - "api_key": &c.ApiKey, - "provider": &c.Provider, - "project": &c.Project, - "tenant_id": &c.TenantId, - "region": &c.RawRegion, - "proxy_url": &c.ProxyUrl, - } - +func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error { errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - if strings.HasPrefix(c.Provider, "rackspace") { if c.Region() == "" { errs = append(errs, fmt.Errorf("region must be specified when using rackspace")) diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index 3b2834063..e6e5c6675 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -7,46 +7,44 @@ import ( "fmt" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "log" "github.com/mitchellh/gophercloud-fork-40444fb" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // The unique ID for this builder const BuilderId = "mitchellh.openstack" -type config struct { +type Config struct { common.PackerConfig `mapstructure:",squash"` AccessConfig `mapstructure:",squash"` ImageConfig `mapstructure:",squash"` RunConfig `mapstructure:",squash"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } type Builder struct { - config config + config Config runner multistep.Runner } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - md, err := common.DecodeConfig(&b.config, raws...) + err := config.Decode(&b.config, &config.DecodeOpts{ + Interpolate: true, + }, raws...) if err != nil { return nil, err } - b.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, err - } - b.config.tpl.UserVars = b.config.PackerUserVars - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.ImageConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) + var errs *packer.MultiError + errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.ImageConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...) if errs != nil && len(errs.Errors) > 0 { return nil, errs @@ -96,7 +94,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Networks: b.config.Networks, }, &StepWaitForRackConnect{ - Wait: b.config.RackconnectWait, + Wait: b.config.RackconnectWait, }, &StepAllocateIp{ FloatingIpPool: b.config.FloatingIpPool, diff --git a/builder/openstack/image_config.go b/builder/openstack/image_config.go index 652029b26..124449eab 100644 --- a/builder/openstack/image_config.go +++ b/builder/openstack/image_config.go @@ -2,7 +2,8 @@ package openstack import ( "fmt" - "github.com/mitchellh/packer/packer" + + "github.com/mitchellh/packer/template/interpolate" ) // ImageConfig is for common configuration related to creating Images. @@ -10,29 +11,8 @@ type ImageConfig struct { ImageName string `mapstructure:"image_name"` } -func (c *ImageConfig) Prepare(t *packer.ConfigTemplate) []error { - if t == nil { - var err error - t, err = packer.NewConfigTemplate() - if err != nil { - return []error{err} - } - } - - templates := map[string]*string{ - "image_name": &c.ImageName, - } - +func (c *ImageConfig) Prepare(ctx *interpolate.Context) []error { errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - if c.ImageName == "" { errs = append(errs, fmt.Errorf("An image_name must be specified")) } diff --git a/builder/openstack/run_config.go b/builder/openstack/run_config.go index c2b59a118..e5d73c9c1 100644 --- a/builder/openstack/run_config.go +++ b/builder/openstack/run_config.go @@ -3,8 +3,9 @@ package openstack import ( "errors" "fmt" - "github.com/mitchellh/packer/packer" "time" + + "github.com/mitchellh/packer/template/interpolate" ) // RunConfig contains configuration for running an instance from a source @@ -28,15 +29,7 @@ type RunConfig struct { sshTimeout time.Duration } -func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { - if t == nil { - var err error - t, err = packer.NewConfigTemplate() - if err != nil { - return []error{err} - } - } - +func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { // Defaults if c.SSHUsername == "" { c.SSHUsername = "root" @@ -69,25 +62,6 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { errs = append(errs, errors.New("An ssh_username must be specified")) } - templates := map[string]*string{ - "flavor": &c.Flavor, - "ssh_timeout": &c.RawSSHTimeout, - "ssh_username": &c.SSHUsername, - "ssh_interface": &c.SSHInterface, - "source_image": &c.SourceImage, - "openstack_provider": &c.OpenstackProvider, - "floating_ip_pool": &c.FloatingIpPool, - "floating_ip": &c.FloatingIp, - } - - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - c.sshTimeout, err = time.ParseDuration(c.RawSSHTimeout) if err != nil { errs = append(errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) diff --git a/builder/openstack/step_create_image.go b/builder/openstack/step_create_image.go index a95eaff34..52a2ec4d1 100644 --- a/builder/openstack/step_create_image.go +++ b/builder/openstack/step_create_image.go @@ -14,7 +14,7 @@ type stepCreateImage struct{} func (s *stepCreateImage) Run(state multistep.StateBag) multistep.StepAction { csp := state.Get("csp").(gophercloud.CloudServersProvider) - config := state.Get("config").(config) + config := state.Get("config").(Config) server := state.Get("server").(*gophercloud.Server) ui := state.Get("ui").(packer.Ui) From 4cae87645f66f90d4d4a6522d41a04e4d0c2e973 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 13:03:47 -0700 Subject: [PATCH 133/956] builder/docker: fix issue where docker might get nil interp context --- builder/docker/builder.go | 2 +- builder/docker/config.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/docker/builder.go b/builder/docker/builder.go index a94177070..2dddbf94e 100644 --- a/builder/docker/builder.go +++ b/builder/docker/builder.go @@ -26,7 +26,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { - driver := &DockerDriver{Ctx: b.config.ctx, Ui: ui} + driver := &DockerDriver{Ctx: &b.config.ctx, Ui: ui} if err := driver.Verify(); err != nil { return nil, err } diff --git a/builder/docker/config.go b/builder/docker/config.go index dd525497e..e261068df 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -26,7 +26,7 @@ type Config struct { LoginPassword string `mapstructure:"login_password"` LoginServer string `mapstructure:"login_server"` - ctx *interpolate.Context + ctx interpolate.Context } func NewConfig(raws ...interface{}) (*Config, []string, error) { From 3e76547bff6edd6a84632560401461a77c70cf06 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 13:39:43 -0700 Subject: [PATCH 134/956] bulder/qemu: convert interpolation --- builder/qemu/builder.go | 95 +++++-------------------- builder/qemu/ssh.go | 2 +- builder/qemu/step_boot_wait.go | 2 +- builder/qemu/step_configure_vnc.go | 2 +- builder/qemu/step_copy_disk.go | 2 +- builder/qemu/step_create_disk.go | 2 +- builder/qemu/step_forward_ssh.go | 2 +- builder/qemu/step_http_server.go | 2 +- builder/qemu/step_prepare_output_dir.go | 4 +- builder/qemu/step_resize_disk.go | 2 +- builder/qemu/step_run.go | 12 ++-- builder/qemu/step_shutdown.go | 2 +- builder/qemu/step_type_boot_command.go | 15 ++-- 13 files changed, 43 insertions(+), 101 deletions(-) diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index e3bc6e4a1..9b4abb437 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -13,7 +13,9 @@ import ( "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common" commonssh "github.com/mitchellh/packer/common/ssh" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) const BuilderId = "transcend.qemu" @@ -65,11 +67,11 @@ var diskCache = map[string]bool{ } type Builder struct { - config config + config Config runner multistep.Runner } -type config struct { +type Config struct { common.PackerConfig `mapstructure:",squash"` Accelerator string `mapstructure:"accelerator"` @@ -114,25 +116,26 @@ type config struct { bootWait time.Duration `` shutdownTimeout time.Duration `` sshWaitTimeout time.Duration `` - tpl *packer.ConfigTemplate + ctx interpolate.Context } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - md, err := common.DecodeConfig(&b.config, raws...) + err := config.Decode(&b.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "boot_command", + "qemuargs", + }, + }, + }, raws...) if err != nil { return nil, err } + + var errs *packer.MultiError warnings := make([]string, 0) - b.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, err - } - b.config.tpl.UserVars = b.config.PackerUserVars - - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - if b.config.DiskSize == 0 { b.config.DiskSize = 40000 } @@ -189,15 +192,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.VNCPortMax = 6000 } - for i, args := range b.config.QemuArgs { - for j, arg := range args { - if err := b.config.tpl.Validate(arg); err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing qemu-system_x86-64[%d][%d]: %s", i, j, err)) - } - } - } - if b.config.VMName == "" { b.config.VMName = fmt.Sprintf("packer-%s", b.config.PackerBuildName) } @@ -218,63 +212,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.DiskInterface = "virtio" } - // Errors - templates := map[string]*string{ - "http_directory": &b.config.HTTPDir, - "iso_checksum": &b.config.ISOChecksum, - "iso_checksum_type": &b.config.ISOChecksumType, - "iso_url": &b.config.RawSingleISOUrl, - "output_directory": &b.config.OutputDir, - "shutdown_command": &b.config.ShutdownCommand, - "ssh_key_path": &b.config.SSHKeyPath, - "ssh_password": &b.config.SSHPassword, - "ssh_username": &b.config.SSHUser, - "vm_name": &b.config.VMName, - "format": &b.config.Format, - "boot_wait": &b.config.RawBootWait, - "shutdown_timeout": &b.config.RawShutdownTimeout, - "ssh_wait_timeout": &b.config.RawSSHWaitTimeout, - "accelerator": &b.config.Accelerator, - "machine_type": &b.config.MachineType, - "net_device": &b.config.NetDevice, - "disk_interface": &b.config.DiskInterface, - } - - for n, ptr := range templates { - var err error - *ptr, err = b.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - for i, url := range b.config.ISOUrls { - var err error - b.config.ISOUrls[i], err = b.config.tpl.Process(url, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing iso_urls[%d]: %s", i, err)) - } - } - - for i, command := range b.config.BootCommand { - if err := b.config.tpl.Validate(command); err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing boot_command[%d]: %s", i, err)) - } - } - - for i, file := range b.config.FloppyFiles { - var err error - b.config.FloppyFiles[i], err = b.config.tpl.Process(file, nil) - if err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing floppy_files[%d]: %s", - i, err)) - } - } - if !(b.config.Format == "qcow2" || b.config.Format == "raw") { errs = packer.MultiErrorAppend( errs, errors.New("invalid format, only 'qcow2' or 'raw' are allowed")) diff --git a/builder/qemu/ssh.go b/builder/qemu/ssh.go index de7858166..dfd87ee20 100644 --- a/builder/qemu/ssh.go +++ b/builder/qemu/ssh.go @@ -15,7 +15,7 @@ func sshAddress(state multistep.StateBag) (string, error) { } func sshConfig(state multistep.StateBag) (*gossh.ClientConfig, error) { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) auth := []gossh.AuthMethod{ gossh.Password(config.SSHPassword), diff --git a/builder/qemu/step_boot_wait.go b/builder/qemu/step_boot_wait.go index 46a48dcbc..8557900a5 100644 --- a/builder/qemu/step_boot_wait.go +++ b/builder/qemu/step_boot_wait.go @@ -11,7 +11,7 @@ import ( type stepBootWait struct{} func (s *stepBootWait) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) ui := state.Get("ui").(packer.Ui) if int64(config.bootWait) > 0 { diff --git a/builder/qemu/step_configure_vnc.go b/builder/qemu/step_configure_vnc.go index 097271b51..be452620d 100644 --- a/builder/qemu/step_configure_vnc.go +++ b/builder/qemu/step_configure_vnc.go @@ -20,7 +20,7 @@ import ( type stepConfigureVNC struct{} func (stepConfigureVNC) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) ui := state.Get("ui").(packer.Ui) // Find an open VNC port. Note that this can still fail later on diff --git a/builder/qemu/step_copy_disk.go b/builder/qemu/step_copy_disk.go index 54c3084ac..6afb70cb0 100644 --- a/builder/qemu/step_copy_disk.go +++ b/builder/qemu/step_copy_disk.go @@ -14,7 +14,7 @@ import ( type stepCopyDisk struct{} func (s *stepCopyDisk) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) driver := state.Get("driver").(Driver) isoPath := state.Get("iso_path").(string) ui := state.Get("ui").(packer.Ui) diff --git a/builder/qemu/step_create_disk.go b/builder/qemu/step_create_disk.go index 986df1d2b..a1b5623a4 100644 --- a/builder/qemu/step_create_disk.go +++ b/builder/qemu/step_create_disk.go @@ -13,7 +13,7 @@ import ( type stepCreateDisk struct{} func (s *stepCreateDisk) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) name := config.VMName + "." + strings.ToLower(config.Format) diff --git a/builder/qemu/step_forward_ssh.go b/builder/qemu/step_forward_ssh.go index 3b84d26c1..c88d5623d 100644 --- a/builder/qemu/step_forward_ssh.go +++ b/builder/qemu/step_forward_ssh.go @@ -19,7 +19,7 @@ import ( type stepForwardSSH struct{} func (s *stepForwardSSH) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) ui := state.Get("ui").(packer.Ui) log.Printf("Looking for available SSH port between %d and %d", config.SSHHostPortMin, config.SSHHostPortMax) diff --git a/builder/qemu/step_http_server.go b/builder/qemu/step_http_server.go index 831253853..a5d784ec2 100644 --- a/builder/qemu/step_http_server.go +++ b/builder/qemu/step_http_server.go @@ -25,7 +25,7 @@ type stepHTTPServer struct { } func (s *stepHTTPServer) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) ui := state.Get("ui").(packer.Ui) var httpPort uint = 0 diff --git a/builder/qemu/step_prepare_output_dir.go b/builder/qemu/step_prepare_output_dir.go index 43320399a..0f3bd1278 100644 --- a/builder/qemu/step_prepare_output_dir.go +++ b/builder/qemu/step_prepare_output_dir.go @@ -11,7 +11,7 @@ import ( type stepPrepareOutputDir struct{} func (stepPrepareOutputDir) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) ui := state.Get("ui").(packer.Ui) if _, err := os.Stat(config.OutputDir); err == nil && config.PackerForce { @@ -32,7 +32,7 @@ func (stepPrepareOutputDir) Cleanup(state multistep.StateBag) { _, halted := state.GetOk(multistep.StateHalted) if cancelled || halted { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) ui := state.Get("ui").(packer.Ui) ui.Say("Deleting output directory...") diff --git a/builder/qemu/step_resize_disk.go b/builder/qemu/step_resize_disk.go index 4e8536c32..58e405747 100644 --- a/builder/qemu/step_resize_disk.go +++ b/builder/qemu/step_resize_disk.go @@ -13,7 +13,7 @@ import ( type stepResizeDisk struct{} func (s *stepResizeDisk) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) path := filepath.Join(config.OutputDir, fmt.Sprintf("%s.%s", config.VMName, diff --git a/builder/qemu/step_run.go b/builder/qemu/step_run.go index 3f900d651..aa13a1076 100644 --- a/builder/qemu/step_run.go +++ b/builder/qemu/step_run.go @@ -8,6 +8,7 @@ import ( "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // stepRun runs the virtual machine @@ -56,7 +57,7 @@ func (s *stepRun) Cleanup(state multistep.StateBag) { } func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error) { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) isoPath := state.Get("iso_path").(string) vncPort := state.Get("vnc_port").(uint) sshHostPort := state.Get("sshHostPort").(uint) @@ -109,14 +110,15 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error ui.Say("Overriding defaults Qemu arguments with QemuArgs...") httpPort := state.Get("http_port").(uint) - tplData := qemuArgsTemplateData{ + ctx := config.ctx + ctx.Data = qemuArgsTemplateData{ "10.0.2.2", httpPort, config.HTTPDir, config.OutputDir, config.VMName, } - newQemuArgs, err := processArgs(config.QemuArgs, config.tpl, &tplData) + newQemuArgs, err := processArgs(config.QemuArgs, &ctx) if err != nil { return nil, err } @@ -160,7 +162,7 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error return outArgs, nil } -func processArgs(args [][]string, tpl *packer.ConfigTemplate, tplData *qemuArgsTemplateData) ([][]string, error) { +func processArgs(args [][]string, ctx *interpolate.Context) ([][]string, error) { var err error if args == nil { @@ -172,7 +174,7 @@ func processArgs(args [][]string, tpl *packer.ConfigTemplate, tplData *qemuArgsT parms := make([]string, len(rowArgs)) newArgs[argsIdx] = parms for i, parm := range rowArgs { - parms[i], err = tpl.Process(parm, &tplData) + parms[i], err = interpolate.Render(parm, ctx) if err != nil { return nil, err } diff --git a/builder/qemu/step_shutdown.go b/builder/qemu/step_shutdown.go index a225496b2..127dcff12 100644 --- a/builder/qemu/step_shutdown.go +++ b/builder/qemu/step_shutdown.go @@ -24,7 +24,7 @@ type stepShutdown struct{} func (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction { comm := state.Get("communicator").(packer.Communicator) - config := state.Get("config").(*config) + config := state.Get("config").(*Config) driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) diff --git a/builder/qemu/step_type_boot_command.go b/builder/qemu/step_type_boot_command.go index 6416e0ea1..e42903f55 100644 --- a/builder/qemu/step_type_boot_command.go +++ b/builder/qemu/step_type_boot_command.go @@ -2,15 +2,17 @@ package qemu import ( "fmt" - "github.com/mitchellh/go-vnc" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "log" "net" "strings" "time" "unicode" "unicode/utf8" + + "github.com/mitchellh/go-vnc" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) const KeyLeftShift uint32 = 0xFFE1 @@ -34,7 +36,7 @@ type bootCommandTemplateData struct { type stepTypeBootCommand struct{} func (s *stepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) httpPort := state.Get("http_port").(uint) ui := state.Get("ui").(packer.Ui) vncPort := state.Get("vnc_port").(uint) @@ -61,7 +63,8 @@ func (s *stepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction log.Printf("Connected to VNC desktop: %s", c.DesktopName) - tplData := &bootCommandTemplateData{ + ctx := config.ctx + ctx.Data = &bootCommandTemplateData{ "10.0.2.2", httpPort, config.VMName, @@ -69,7 +72,7 @@ func (s *stepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction ui.Say("Typing the boot command over VNC...") for _, command := range config.BootCommand { - command, err := config.tpl.Process(command, tplData) + command, err := interpolate.Render(command, &ctx) if err != nil { err := fmt.Errorf("Error preparing boot command: %s", err) state.Put("error", err) From 76c2d2cb0e4e18bf1f026fe7c32f61c5ad3788ff Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 13:49:31 -0700 Subject: [PATCH 135/956] builder/parallels/iso: new interpolation --- builder/parallels/common/floppy_config.go | 18 +--- builder/parallels/common/output_config.go | 21 +---- builder/parallels/common/prlctl_config.go | 17 +--- .../parallels/common/prlctl_version_config.go | 20 +--- builder/parallels/common/run_config.go | 22 +---- builder/parallels/common/shutdown_config.go | 20 +--- builder/parallels/common/ssh_config.go | 21 +---- builder/parallels/common/step_prlctl.go | 10 +- .../common/step_type_boot_command.go | 7 +- .../common/step_upload_parallels_tools.go | 12 ++- builder/parallels/common/tools_config.go | 25 +---- builder/parallels/iso/builder.go | 91 ++++++------------- builder/parallels/iso/step_create_disk.go | 2 +- builder/parallels/iso/step_create_vm.go | 2 +- builder/parallels/iso/step_http_server.go | 2 +- 15 files changed, 80 insertions(+), 210 deletions(-) diff --git a/builder/parallels/common/floppy_config.go b/builder/parallels/common/floppy_config.go index 5bcdd4b4a..d656e103a 100644 --- a/builder/parallels/common/floppy_config.go +++ b/builder/parallels/common/floppy_config.go @@ -1,9 +1,7 @@ package common import ( - "fmt" - - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // FloppyConfig is configuration related to created floppy disks and attaching @@ -12,20 +10,10 @@ type FloppyConfig struct { FloppyFiles []string `mapstructure:"floppy_files"` } -func (c *FloppyConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *FloppyConfig) Prepare(ctx *interpolate.Context) []error { if c.FloppyFiles == nil { c.FloppyFiles = make([]string, 0) } - errs := make([]error, 0) - for i, file := range c.FloppyFiles { - var err error - c.FloppyFiles[i], err = t.Process(file, nil) - if err != nil { - errs = append(errs, fmt.Errorf( - "Error processing floppy_files[%d]: %s", i, err)) - } - } - - return errs + return nil } diff --git a/builder/parallels/common/output_config.go b/builder/parallels/common/output_config.go index 19be1ba00..f3427183c 100644 --- a/builder/parallels/common/output_config.go +++ b/builder/parallels/common/output_config.go @@ -2,33 +2,22 @@ package common import ( "fmt" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "os" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/template/interpolate" ) type OutputConfig struct { OutputDir string `mapstructure:"output_directory"` } -func (c *OutputConfig) Prepare(t *packer.ConfigTemplate, pc *common.PackerConfig) []error { +func (c *OutputConfig) Prepare(ctx *interpolate.Context, pc *common.PackerConfig) []error { if c.OutputDir == "" { c.OutputDir = fmt.Sprintf("output-%s", pc.PackerBuildName) } - templates := map[string]*string{ - "output_directory": &c.OutputDir, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs []error if !pc.PackerForce { if _, err := os.Stat(c.OutputDir); err == nil { errs = append(errs, fmt.Errorf( diff --git a/builder/parallels/common/prlctl_config.go b/builder/parallels/common/prlctl_config.go index eff0618b3..4748bdeb7 100644 --- a/builder/parallels/common/prlctl_config.go +++ b/builder/parallels/common/prlctl_config.go @@ -1,28 +1,17 @@ package common import ( - "fmt" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type PrlctlConfig struct { Prlctl [][]string `mapstructure:"prlctl"` } -func (c *PrlctlConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *PrlctlConfig) Prepare(ctx *interpolate.Context) []error { if c.Prlctl == nil { c.Prlctl = make([][]string, 0) } - errs := make([]error, 0) - for i, args := range c.Prlctl { - for j, arg := range args { - if err := t.Validate(arg); err != nil { - errs = append(errs, - fmt.Errorf("Error processing prlctl[%d][%d]: %s", i, j, err)) - } - } - } - - return errs + return nil } diff --git a/builder/parallels/common/prlctl_version_config.go b/builder/parallels/common/prlctl_version_config.go index 2103d9a59..770001e0d 100644 --- a/builder/parallels/common/prlctl_version_config.go +++ b/builder/parallels/common/prlctl_version_config.go @@ -1,31 +1,17 @@ package common import ( - "fmt" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type PrlctlVersionConfig struct { PrlctlVersionFile string `mapstructure:"prlctl_version_file"` } -func (c *PrlctlVersionConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *PrlctlVersionConfig) Prepare(ctx *interpolate.Context) []error { if c.PrlctlVersionFile == "" { c.PrlctlVersionFile = ".prlctl_version" } - templates := map[string]*string{ - "prlctl_version_file": &c.PrlctlVersionFile, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - return errs + return nil } diff --git a/builder/parallels/common/run_config.go b/builder/parallels/common/run_config.go index 755d0f1c1..c755cdafb 100644 --- a/builder/parallels/common/run_config.go +++ b/builder/parallels/common/run_config.go @@ -2,8 +2,9 @@ package common import ( "fmt" - "github.com/mitchellh/packer/packer" "time" + + "github.com/mitchellh/packer/template/interpolate" ) type RunConfig struct { @@ -13,29 +14,16 @@ type RunConfig struct { BootWait time.Duration `` } -func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { if c.RawBootWait == "" { c.RawBootWait = "10s" } - templates := map[string]*string{ - "boot_wait": &c.RawBootWait, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - var err error c.BootWait, err = time.ParseDuration(c.RawBootWait) if err != nil { - errs = append(errs, fmt.Errorf("Failed parsing boot_wait: %s", err)) + return []error{fmt.Errorf("Failed parsing boot_wait: %s", err)} } - return errs + return nil } diff --git a/builder/parallels/common/shutdown_config.go b/builder/parallels/common/shutdown_config.go index 05e5fdfeb..83d2224c3 100644 --- a/builder/parallels/common/shutdown_config.go +++ b/builder/parallels/common/shutdown_config.go @@ -2,8 +2,9 @@ package common import ( "fmt" - "github.com/mitchellh/packer/packer" "time" + + "github.com/mitchellh/packer/template/interpolate" ) type ShutdownConfig struct { @@ -13,25 +14,12 @@ type ShutdownConfig struct { ShutdownTimeout time.Duration `` } -func (c *ShutdownConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *ShutdownConfig) Prepare(ctx *interpolate.Context) []error { if c.RawShutdownTimeout == "" { c.RawShutdownTimeout = "5m" } - templates := map[string]*string{ - "shutdown_command": &c.ShutdownCommand, - "shutdown_timeout": &c.RawShutdownTimeout, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs []error var err error c.ShutdownTimeout, err = time.ParseDuration(c.RawShutdownTimeout) if err != nil { diff --git a/builder/parallels/common/ssh_config.go b/builder/parallels/common/ssh_config.go index d89daa103..9f1a9506f 100644 --- a/builder/parallels/common/ssh_config.go +++ b/builder/parallels/common/ssh_config.go @@ -7,7 +7,7 @@ import ( "time" commonssh "github.com/mitchellh/packer/common/ssh" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type SSHConfig struct { @@ -20,7 +20,7 @@ type SSHConfig struct { SSHWaitTimeout time.Duration } -func (c *SSHConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *SSHConfig) Prepare(ctx *interpolate.Context) []error { if c.SSHPort == 0 { c.SSHPort = 22 } @@ -29,22 +29,7 @@ func (c *SSHConfig) Prepare(t *packer.ConfigTemplate) []error { c.RawSSHWaitTimeout = "20m" } - templates := map[string]*string{ - "ssh_key_path": &c.SSHKeyPath, - "ssh_password": &c.SSHPassword, - "ssh_username": &c.SSHUser, - "ssh_wait_timeout": &c.RawSSHWaitTimeout, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs []error if c.SSHKeyPath != "" { if _, err := os.Stat(c.SSHKeyPath); err != nil { errs = append(errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) diff --git a/builder/parallels/common/step_prlctl.go b/builder/parallels/common/step_prlctl.go index 9bd7c4b73..048ab0a3f 100644 --- a/builder/parallels/common/step_prlctl.go +++ b/builder/parallels/common/step_prlctl.go @@ -2,9 +2,11 @@ package common import ( "fmt" + "strings" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" - "strings" + "github.com/mitchellh/packer/template/interpolate" ) type commandTemplate struct { @@ -22,7 +24,7 @@ type commandTemplate struct { // Produces: type StepPrlctl struct { Commands [][]string - Tpl *packer.ConfigTemplate + Ctx interpolate.Context } func (s *StepPrlctl) Run(state multistep.StateBag) multistep.StepAction { @@ -34,7 +36,7 @@ func (s *StepPrlctl) Run(state multistep.StateBag) multistep.StepAction { ui.Say("Executing custom prlctl commands...") } - tplData := &commandTemplate{ + s.Ctx.Data = &commandTemplate{ Name: vmName, } @@ -44,7 +46,7 @@ func (s *StepPrlctl) Run(state multistep.StateBag) multistep.StepAction { for i, arg := range command { var err error - command[i], err = s.Tpl.Process(arg, tplData) + command[i], err = interpolate.Render(arg, &s.Ctx) if err != nil { err := fmt.Errorf("Error preparing prlctl command: %s", err) state.Put("error", err) diff --git a/builder/parallels/common/step_type_boot_command.go b/builder/parallels/common/step_type_boot_command.go index dbc224d18..1f3a3ef4c 100644 --- a/builder/parallels/common/step_type_boot_command.go +++ b/builder/parallels/common/step_type_boot_command.go @@ -10,6 +10,7 @@ import ( "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) const KeyLeftShift uint32 = 0xFFE1 @@ -35,7 +36,7 @@ type StepTypeBootCommand struct { BootCommand []string HostInterfaces []string VMName string - Tpl *packer.ConfigTemplate + Ctx interpolate.Context } func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction { @@ -61,7 +62,7 @@ func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction ui.Say(fmt.Sprintf("Host IP for the Parallels machine: %s", hostIp)) - tplData := &bootCommandTemplateData{ + s.Ctx.Data = &bootCommandTemplateData{ hostIp, httpPort, s.VMName, @@ -69,7 +70,7 @@ func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction ui.Say("Typing the boot command...") for _, command := range s.BootCommand { - command, err := s.Tpl.Process(command, tplData) + command, err := interpolate.Render(command, &s.Ctx) if err != nil { err := fmt.Errorf("Error preparing boot command: %s", err) state.Put("error", err) diff --git a/builder/parallels/common/step_upload_parallels_tools.go b/builder/parallels/common/step_upload_parallels_tools.go index ae511a1e8..6afe49677 100644 --- a/builder/parallels/common/step_upload_parallels_tools.go +++ b/builder/parallels/common/step_upload_parallels_tools.go @@ -2,10 +2,12 @@ package common import ( "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "log" "os" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // This step uploads the Parallels Tools ISO to the virtual machine. @@ -25,7 +27,7 @@ type StepUploadParallelsTools struct { ParallelsToolsFlavor string ParallelsToolsGuestPath string ParallelsToolsMode string - Tpl *packer.ConfigTemplate + Ctx interpolate.Context } func (s *StepUploadParallelsTools) Run(state multistep.StateBag) multistep.StepAction { @@ -48,11 +50,11 @@ func (s *StepUploadParallelsTools) Run(state multistep.StateBag) multistep.StepA } defer f.Close() - tplData := &toolsPathTemplate{ + s.Ctx.Data = &toolsPathTemplate{ Flavor: s.ParallelsToolsFlavor, } - s.ParallelsToolsGuestPath, err = s.Tpl.Process(s.ParallelsToolsGuestPath, tplData) + s.ParallelsToolsGuestPath, err = interpolate.Render(s.ParallelsToolsGuestPath, &s.Ctx) if err != nil { err := fmt.Errorf("Error preparing Parallels Tools path: %s", err) state.Put("error", err) diff --git a/builder/parallels/common/tools_config.go b/builder/parallels/common/tools_config.go index 19993fb77..aa9361eb8 100644 --- a/builder/parallels/common/tools_config.go +++ b/builder/parallels/common/tools_config.go @@ -3,8 +3,8 @@ package common import ( "errors" "fmt" - "github.com/mitchellh/packer/packer" - "text/template" + + "github.com/mitchellh/packer/template/interpolate" ) // These are the different valid mode values for "parallels_tools_mode" which @@ -21,7 +21,7 @@ type ToolsConfig struct { ParallelsToolsMode string `mapstructure:"parallels_tools_mode"` } -func (c *ToolsConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *ToolsConfig) Prepare(ctx *interpolate.Context) []error { if c.ParallelsToolsMode == "" { c.ParallelsToolsMode = ParallelsToolsModeUpload } @@ -30,24 +30,6 @@ func (c *ToolsConfig) Prepare(t *packer.ConfigTemplate) []error { c.ParallelsToolsGuestPath = "prl-tools-{{.Flavor}}.iso" } - templates := map[string]*string{ - "parallels_tools_flavor": &c.ParallelsToolsFlavor, - "parallels_tools_mode": &c.ParallelsToolsMode, - } - - var err error - errs := make([]error, 0) - for n, ptr := range templates { - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - if _, err := template.New("path").Parse(c.ParallelsToolsGuestPath); err != nil { - errs = append(errs, fmt.Errorf("parallels_tools_guest_path invalid: %s", err)) - } - validMode := false validModes := []string{ ParallelsToolsModeDisable, @@ -62,6 +44,7 @@ func (c *ToolsConfig) Prepare(t *packer.ConfigTemplate) []error { } } + var errs []error if !validMode { errs = append(errs, fmt.Errorf("parallels_tools_mode is invalid. Must be one of: %v", diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index e5e42e8e7..24ef3c726 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -3,22 +3,25 @@ package iso import ( "errors" "fmt" + "log" + "strings" + "github.com/mitchellh/multistep" parallelscommon "github.com/mitchellh/packer/builder/parallels/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" - "log" - "strings" + "github.com/mitchellh/packer/template/interpolate" ) const BuilderId = "rickard-von-essen.parallels" type Builder struct { - config config + config Config runner multistep.Runner } -type config struct { +type Config struct { common.PackerConfig `mapstructure:",squash"` parallelscommon.FloppyConfig `mapstructure:",squash"` parallelscommon.OutputConfig `mapstructure:",squash"` @@ -48,33 +51,35 @@ type config struct { GuestOSDistribution string `mapstructure:"guest_os_distribution"` ParallelsToolsHostPath string `mapstructure:"parallels_tools_host_path"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - - md, err := common.DecodeConfig(&b.config, raws...) + err := config.Decode(&b.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "boot_command", + "prlctl", + "parallel_tools_guest_path", + }, + }, + }, raws...) if err != nil { return nil, err } - b.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, err - } - b.config.tpl.UserVars = b.config.PackerUserVars - // Accumulate any errors and warnings - errs := common.CheckUnusedConfig(md) - errs = packer.MultiErrorAppend(errs, b.config.FloppyConfig.Prepare(b.config.tpl)...) + var errs *packer.MultiError + errs = packer.MultiErrorAppend(errs, b.config.FloppyConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend( - errs, b.config.OutputConfig.Prepare(b.config.tpl, &b.config.PackerConfig)...) - errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.PrlctlConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.PrlctlVersionConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.ShutdownConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.SSHConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.ToolsConfig.Prepare(b.config.tpl)...) + errs, b.config.OutputConfig.Prepare(&b.config.ctx, &b.config.PackerConfig)...) + errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.PrlctlConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.PrlctlVersionConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.ShutdownConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.SSHConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.ToolsConfig.Prepare(&b.config.ctx)...) warnings := make([]string, 0) if b.config.DiskSize == 0 { @@ -116,42 +121,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.VMName = fmt.Sprintf("packer-%s", b.config.PackerBuildName) } - // Errors - templates := map[string]*string{ - "guest_os_type": &b.config.GuestOSType, - "hard_drive_interface": &b.config.HardDriveInterface, - "http_directory": &b.config.HTTPDir, - "iso_checksum": &b.config.ISOChecksum, - "iso_checksum_type": &b.config.ISOChecksumType, - "iso_url": &b.config.RawSingleISOUrl, - "vm_name": &b.config.VMName, - } - - for n, ptr := range templates { - var err error - *ptr, err = b.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - for i, url := range b.config.ISOUrls { - var err error - b.config.ISOUrls[i], err = b.config.tpl.Process(url, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing iso_urls[%d]: %s", i, err)) - } - } - - for i, command := range b.config.BootCommand { - if err := b.config.tpl.Validate(command); err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing boot_command[%d]: %s", i, err)) - } - } - if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" && b.config.HardDriveInterface != "scsi" { errs = packer.MultiErrorAppend( errs, errors.New("hard_drive_interface can only be ide, sata, or scsi")) @@ -264,7 +233,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe new(parallelscommon.StepAttachFloppy), ¶llelscommon.StepPrlctl{ Commands: b.config.Prlctl, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, ¶llelscommon.StepRun{ BootWait: b.config.BootWait, @@ -274,7 +243,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe BootCommand: b.config.BootCommand, HostInterfaces: b.config.HostInterfaces, VMName: b.config.VMName, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &common.StepConnectSSH{ SSHAddress: parallelscommon.SSHAddress, @@ -288,7 +257,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ParallelsToolsFlavor: b.config.ParallelsToolsFlavor, ParallelsToolsGuestPath: b.config.ParallelsToolsGuestPath, ParallelsToolsMode: b.config.ParallelsToolsMode, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, new(common.StepProvision), ¶llelscommon.StepShutdown{ diff --git a/builder/parallels/iso/step_create_disk.go b/builder/parallels/iso/step_create_disk.go index 8e5fb63d9..8416c0cc4 100644 --- a/builder/parallels/iso/step_create_disk.go +++ b/builder/parallels/iso/step_create_disk.go @@ -13,7 +13,7 @@ import ( type stepCreateDisk struct{} func (s *stepCreateDisk) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) driver := state.Get("driver").(parallelscommon.Driver) ui := state.Get("ui").(packer.Ui) vmName := state.Get("vmName").(string) diff --git a/builder/parallels/iso/step_create_vm.go b/builder/parallels/iso/step_create_vm.go index b48b73dbb..ebe7effa8 100644 --- a/builder/parallels/iso/step_create_vm.go +++ b/builder/parallels/iso/step_create_vm.go @@ -18,7 +18,7 @@ type stepCreateVM struct { func (s *stepCreateVM) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) driver := state.Get("driver").(parallelscommon.Driver) ui := state.Get("ui").(packer.Ui) name := config.VMName diff --git a/builder/parallels/iso/step_http_server.go b/builder/parallels/iso/step_http_server.go index 3d86139e3..4744af278 100644 --- a/builder/parallels/iso/step_http_server.go +++ b/builder/parallels/iso/step_http_server.go @@ -25,7 +25,7 @@ type stepHTTPServer struct { } func (s *stepHTTPServer) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) ui := state.Get("ui").(packer.Ui) var httpPort uint = 0 From b2b74431ecfa90f78996640ee0b9ee02653a3ae4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 13:51:24 -0700 Subject: [PATCH 136/956] builder/parallels/pvm: interpolation --- builder/parallels/pvm/builder.go | 6 ++-- builder/parallels/pvm/config.go | 60 ++++++++++++-------------------- 2 files changed, 25 insertions(+), 41 deletions(-) diff --git a/builder/parallels/pvm/builder.go b/builder/parallels/pvm/builder.go index 037641619..a0c7ca9cd 100644 --- a/builder/parallels/pvm/builder.go +++ b/builder/parallels/pvm/builder.go @@ -68,7 +68,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe new(parallelscommon.StepAttachFloppy), ¶llelscommon.StepPrlctl{ Commands: b.config.Prlctl, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, ¶llelscommon.StepRun{ BootWait: b.config.BootWait, @@ -78,7 +78,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe BootCommand: b.config.BootCommand, HostInterfaces: []string{}, VMName: b.config.VMName, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &common.StepConnectSSH{ SSHAddress: parallelscommon.SSHAddress, @@ -92,7 +92,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ParallelsToolsFlavor: b.config.ParallelsToolsFlavor, ParallelsToolsGuestPath: b.config.ParallelsToolsGuestPath, ParallelsToolsMode: b.config.ParallelsToolsMode, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, new(common.StepProvision), ¶llelscommon.StepShutdown{ diff --git a/builder/parallels/pvm/config.go b/builder/parallels/pvm/config.go index 83e643111..c3fab4446 100644 --- a/builder/parallels/pvm/config.go +++ b/builder/parallels/pvm/config.go @@ -6,7 +6,9 @@ import ( parallelscommon "github.com/mitchellh/packer/builder/parallels/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // Config is the configuration structure for the builder. @@ -26,57 +28,39 @@ type Config struct { VMName string `mapstructure:"vm_name"` ReassignMac bool `mapstructure:"reassign_mac"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) - md, err := common.DecodeConfig(c, raws...) + err := config.Decode(&c, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "boot_command", + "prlctl", + "parallel_tools_guest_path", + }, + }, + }, raws...) if err != nil { return nil, nil, err } - c.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, nil, err - } - c.tpl.UserVars = c.PackerUserVars - if c.VMName == "" { c.VMName = fmt.Sprintf("packer-%s-{{timestamp}}", c.PackerBuildName) } // Prepare the errors - errs := common.CheckUnusedConfig(md) - errs = packer.MultiErrorAppend(errs, c.FloppyConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.OutputConfig.Prepare(c.tpl, &c.PackerConfig)...) - errs = packer.MultiErrorAppend(errs, c.PrlctlConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.PrlctlVersionConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.RunConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.ShutdownConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.SSHConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.ToolsConfig.Prepare(c.tpl)...) - - templates := map[string]*string{ - "source_path": &c.SourcePath, - "vm_name": &c.VMName, - } - - for n, ptr := range templates { - var err error - *ptr, err = c.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - for i, command := range c.BootCommand { - if err := c.tpl.Validate(command); err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing boot_command[%d]: %s", i, err)) - } - } + var errs *packer.MultiError + errs = packer.MultiErrorAppend(errs, c.FloppyConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.OutputConfig.Prepare(&c.ctx, &c.PackerConfig)...) + errs = packer.MultiErrorAppend(errs, c.PrlctlConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.PrlctlVersionConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.RunConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.ShutdownConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.SSHConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.ToolsConfig.Prepare(&c.ctx)...) if c.SourcePath == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("source_path is required")) From d15bc904539af5e0b5c8509620b8e938289baba7 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 14:01:08 -0700 Subject: [PATCH 137/956] builder/virtualbox/iso: new interpolation --- builder/virtualbox/common/export_config.go | 20 +--- builder/virtualbox/common/export_opts.go | 16 +-- builder/virtualbox/common/floppy_config.go | 18 +-- builder/virtualbox/common/output_config.go | 21 +--- builder/virtualbox/common/run_config.go | 19 +-- builder/virtualbox/common/shutdown_config.go | 20 +--- builder/virtualbox/common/ssh_config.go | 21 +--- .../common/step_download_guest_additions.go | 14 ++- .../common/step_type_boot_command.go | 12 +- .../common/step_upload_guest_additions.go | 12 +- builder/virtualbox/common/step_vboxmanage.go | 10 +- .../virtualbox/common/vbox_version_config.go | 20 +--- .../virtualbox/common/vboxmanage_config.go | 17 +-- .../common/vboxmanage_post_config.go | 17 +-- builder/virtualbox/iso/builder.go | 109 ++++++------------ builder/virtualbox/iso/step_attach_iso.go | 4 +- builder/virtualbox/iso/step_create_disk.go | 2 +- builder/virtualbox/iso/step_create_vm.go | 2 +- builder/virtualbox/iso/step_http_server.go | 2 +- 19 files changed, 100 insertions(+), 256 deletions(-) diff --git a/builder/virtualbox/common/export_config.go b/builder/virtualbox/common/export_config.go index 364d86a84..62e11e607 100644 --- a/builder/virtualbox/common/export_config.go +++ b/builder/virtualbox/common/export_config.go @@ -2,32 +2,20 @@ package common import ( "errors" - "fmt" - "github.com/mitchellh/packer/packer" + + "github.com/mitchellh/packer/template/interpolate" ) type ExportConfig struct { Format string `mapstruture:"format"` } -func (c *ExportConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *ExportConfig) Prepare(ctx *interpolate.Context) []error { if c.Format == "" { c.Format = "ovf" } - templates := map[string]*string{ - "format": &c.Format, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs []error if c.Format != "ovf" && c.Format != "ova" { errs = append(errs, errors.New("invalid format, only 'ovf' or 'ova' are allowed")) diff --git a/builder/virtualbox/common/export_opts.go b/builder/virtualbox/common/export_opts.go index 36006aec6..cd66da586 100644 --- a/builder/virtualbox/common/export_opts.go +++ b/builder/virtualbox/common/export_opts.go @@ -1,27 +1,17 @@ package common import ( - "fmt" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type ExportOpts struct { ExportOpts []string `mapstructure:"export_opts"` } -func (c *ExportOpts) Prepare(t *packer.ConfigTemplate) []error { +func (c *ExportOpts) Prepare(ctx *interpolate.Context) []error { if c.ExportOpts == nil { c.ExportOpts = make([]string, 0) } - errs := make([]error, 0) - for i, str := range c.ExportOpts { - var err error - c.ExportOpts[i], err = t.Process(str, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", "export_opts", err)) - } - } - - return errs + return nil } diff --git a/builder/virtualbox/common/floppy_config.go b/builder/virtualbox/common/floppy_config.go index 35cd7aca4..0e3f5e669 100644 --- a/builder/virtualbox/common/floppy_config.go +++ b/builder/virtualbox/common/floppy_config.go @@ -1,9 +1,7 @@ package common import ( - "fmt" - - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // FloppyConfig is configuration related to created floppy disks and attaching @@ -12,20 +10,10 @@ type FloppyConfig struct { FloppyFiles []string `mapstructure:"floppy_files"` } -func (c *FloppyConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *FloppyConfig) Prepare(ctx *interpolate.Context) []error { if c.FloppyFiles == nil { c.FloppyFiles = make([]string, 0) } - errs := make([]error, 0) - for i, file := range c.FloppyFiles { - var err error - c.FloppyFiles[i], err = t.Process(file, nil) - if err != nil { - errs = append(errs, fmt.Errorf( - "Error processing floppy_files[%d]: %s", i, err)) - } - } - - return errs + return nil } diff --git a/builder/virtualbox/common/output_config.go b/builder/virtualbox/common/output_config.go index 19be1ba00..f3427183c 100644 --- a/builder/virtualbox/common/output_config.go +++ b/builder/virtualbox/common/output_config.go @@ -2,33 +2,22 @@ package common import ( "fmt" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "os" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/template/interpolate" ) type OutputConfig struct { OutputDir string `mapstructure:"output_directory"` } -func (c *OutputConfig) Prepare(t *packer.ConfigTemplate, pc *common.PackerConfig) []error { +func (c *OutputConfig) Prepare(ctx *interpolate.Context, pc *common.PackerConfig) []error { if c.OutputDir == "" { c.OutputDir = fmt.Sprintf("output-%s", pc.PackerBuildName) } - templates := map[string]*string{ - "output_directory": &c.OutputDir, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs []error if !pc.PackerForce { if _, err := os.Stat(c.OutputDir); err == nil { errs = append(errs, fmt.Errorf( diff --git a/builder/virtualbox/common/run_config.go b/builder/virtualbox/common/run_config.go index b024189a6..164e1993f 100644 --- a/builder/virtualbox/common/run_config.go +++ b/builder/virtualbox/common/run_config.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type RunConfig struct { @@ -19,7 +19,7 @@ type RunConfig struct { BootWait time.Duration `` } -func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { if c.RawBootWait == "" { c.RawBootWait = "10s" } @@ -32,20 +32,7 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { c.HTTPPortMax = 9000 } - templates := map[string]*string{ - "boot_wait": &c.RawBootWait, - "http_directory": &c.HTTPDir, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs []error var err error c.BootWait, err = time.ParseDuration(c.RawBootWait) if err != nil { diff --git a/builder/virtualbox/common/shutdown_config.go b/builder/virtualbox/common/shutdown_config.go index 05e5fdfeb..83d2224c3 100644 --- a/builder/virtualbox/common/shutdown_config.go +++ b/builder/virtualbox/common/shutdown_config.go @@ -2,8 +2,9 @@ package common import ( "fmt" - "github.com/mitchellh/packer/packer" "time" + + "github.com/mitchellh/packer/template/interpolate" ) type ShutdownConfig struct { @@ -13,25 +14,12 @@ type ShutdownConfig struct { ShutdownTimeout time.Duration `` } -func (c *ShutdownConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *ShutdownConfig) Prepare(ctx *interpolate.Context) []error { if c.RawShutdownTimeout == "" { c.RawShutdownTimeout = "5m" } - templates := map[string]*string{ - "shutdown_command": &c.ShutdownCommand, - "shutdown_timeout": &c.RawShutdownTimeout, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs []error var err error c.ShutdownTimeout, err = time.ParseDuration(c.RawShutdownTimeout) if err != nil { diff --git a/builder/virtualbox/common/ssh_config.go b/builder/virtualbox/common/ssh_config.go index 908fbb7d8..ddb113215 100644 --- a/builder/virtualbox/common/ssh_config.go +++ b/builder/virtualbox/common/ssh_config.go @@ -7,7 +7,7 @@ import ( "time" commonssh "github.com/mitchellh/packer/common/ssh" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type SSHConfig struct { @@ -22,7 +22,7 @@ type SSHConfig struct { SSHWaitTimeout time.Duration } -func (c *SSHConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *SSHConfig) Prepare(ctx *interpolate.Context) []error { if c.SSHHostPortMin == 0 { c.SSHHostPortMin = 2222 } @@ -39,22 +39,7 @@ func (c *SSHConfig) Prepare(t *packer.ConfigTemplate) []error { c.RawSSHWaitTimeout = "20m" } - templates := map[string]*string{ - "ssh_key_path": &c.SSHKeyPath, - "ssh_password": &c.SSHPassword, - "ssh_username": &c.SSHUser, - "ssh_wait_timeout": &c.RawSSHWaitTimeout, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs []error if c.SSHKeyPath != "" { if _, err := os.Stat(c.SSHKeyPath); err != nil { errs = append(errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) diff --git a/builder/virtualbox/common/step_download_guest_additions.go b/builder/virtualbox/common/step_download_guest_additions.go index 029c7d85b..5ade2457c 100644 --- a/builder/virtualbox/common/step_download_guest_additions.go +++ b/builder/virtualbox/common/step_download_guest_additions.go @@ -3,14 +3,16 @@ package common import ( "bytes" "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "io" "io/ioutil" "log" "os" "strings" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) var additionsVersionMap = map[string]string{ @@ -31,7 +33,7 @@ type StepDownloadGuestAdditions struct { GuestAdditionsMode string GuestAdditionsURL string GuestAdditionsSHA256 string - Tpl *packer.ConfigTemplate + Ctx interpolate.Context } func (s *StepDownloadGuestAdditions) Run(state multistep.StateBag) multistep.StepAction { @@ -67,11 +69,11 @@ func (s *StepDownloadGuestAdditions) Run(state multistep.StateBag) multistep.Ste // Use the provided source (URL or file path) or generate it url := s.GuestAdditionsURL if url != "" { - tplData := &guestAdditionsUrlTemplate{ + s.Ctx.Data = &guestAdditionsUrlTemplate{ Version: version, } - url, err = s.Tpl.Process(url, tplData) + url, err = interpolate.Render(url, &s.Ctx) if err != nil { err := fmt.Errorf("Error preparing guest additions url: %s", err) state.Put("error", err) diff --git a/builder/virtualbox/common/step_type_boot_command.go b/builder/virtualbox/common/step_type_boot_command.go index d22584951..4d0d3cfff 100644 --- a/builder/virtualbox/common/step_type_boot_command.go +++ b/builder/virtualbox/common/step_type_boot_command.go @@ -2,13 +2,15 @@ package common import ( "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "log" "strings" "time" "unicode" "unicode/utf8" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) const KeyLeftShift uint32 = 0xFFE1 @@ -32,7 +34,7 @@ type bootCommandTemplateData struct { type StepTypeBootCommand struct { BootCommand []string VMName string - Tpl *packer.ConfigTemplate + Ctx interpolate.Context } func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction { @@ -41,7 +43,7 @@ func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction ui := state.Get("ui").(packer.Ui) vmName := state.Get("vmName").(string) - tplData := &bootCommandTemplateData{ + s.Ctx.Data = &bootCommandTemplateData{ "10.0.2.2", httpPort, s.VMName, @@ -49,7 +51,7 @@ func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction ui.Say("Typing the boot command...") for _, command := range s.BootCommand { - command, err := s.Tpl.Process(command, tplData) + command, err := interpolate.Render(command, &s.Ctx) if err != nil { err := fmt.Errorf("Error preparing boot command: %s", err) state.Put("error", err) diff --git a/builder/virtualbox/common/step_upload_guest_additions.go b/builder/virtualbox/common/step_upload_guest_additions.go index 04d2dc7cc..ccc054616 100644 --- a/builder/virtualbox/common/step_upload_guest_additions.go +++ b/builder/virtualbox/common/step_upload_guest_additions.go @@ -2,10 +2,12 @@ package common import ( "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "log" "os" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type guestAdditionsPathTemplate struct { @@ -16,7 +18,7 @@ type guestAdditionsPathTemplate struct { type StepUploadGuestAdditions struct { GuestAdditionsMode string GuestAdditionsPath string - Tpl *packer.ConfigTemplate + Ctx interpolate.Context } func (s *StepUploadGuestAdditions) Run(state multistep.StateBag) multistep.StepAction { @@ -45,11 +47,11 @@ func (s *StepUploadGuestAdditions) Run(state multistep.StateBag) multistep.StepA return multistep.ActionHalt } - tplData := &guestAdditionsPathTemplate{ + s.Ctx.Data = &guestAdditionsPathTemplate{ Version: version, } - s.GuestAdditionsPath, err = s.Tpl.Process(s.GuestAdditionsPath, tplData) + s.GuestAdditionsPath, err = interpolate.Render(s.GuestAdditionsPath, &s.Ctx) if err != nil { err := fmt.Errorf("Error preparing guest additions path: %s", err) state.Put("error", err) diff --git a/builder/virtualbox/common/step_vboxmanage.go b/builder/virtualbox/common/step_vboxmanage.go index beb55da03..4eae387ee 100644 --- a/builder/virtualbox/common/step_vboxmanage.go +++ b/builder/virtualbox/common/step_vboxmanage.go @@ -2,9 +2,11 @@ package common import ( "fmt" + "strings" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" - "strings" + "github.com/mitchellh/packer/template/interpolate" ) type commandTemplate struct { @@ -22,7 +24,7 @@ type commandTemplate struct { // Produces: type StepVBoxManage struct { Commands [][]string - Tpl *packer.ConfigTemplate + Ctx interpolate.Context } func (s *StepVBoxManage) Run(state multistep.StateBag) multistep.StepAction { @@ -34,7 +36,7 @@ func (s *StepVBoxManage) Run(state multistep.StateBag) multistep.StepAction { ui.Say("Executing custom VBoxManage commands...") } - tplData := &commandTemplate{ + s.Ctx.Data = &commandTemplate{ Name: vmName, } @@ -44,7 +46,7 @@ func (s *StepVBoxManage) Run(state multistep.StateBag) multistep.StepAction { for i, arg := range command { var err error - command[i], err = s.Tpl.Process(arg, tplData) + command[i], err = interpolate.Render(arg, &s.Ctx) if err != nil { err := fmt.Errorf("Error preparing vboxmanage command: %s", err) state.Put("error", err) diff --git a/builder/virtualbox/common/vbox_version_config.go b/builder/virtualbox/common/vbox_version_config.go index ff2c14819..9f1479445 100644 --- a/builder/virtualbox/common/vbox_version_config.go +++ b/builder/virtualbox/common/vbox_version_config.go @@ -1,31 +1,17 @@ package common import ( - "fmt" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type VBoxVersionConfig struct { VBoxVersionFile string `mapstructure:"virtualbox_version_file"` } -func (c *VBoxVersionConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *VBoxVersionConfig) Prepare(ctx *interpolate.Context) []error { if c.VBoxVersionFile == "" { c.VBoxVersionFile = ".vbox_version" } - templates := map[string]*string{ - "virtualbox_version_file": &c.VBoxVersionFile, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - return errs + return nil } diff --git a/builder/virtualbox/common/vboxmanage_config.go b/builder/virtualbox/common/vboxmanage_config.go index b864a0422..1670aee0d 100644 --- a/builder/virtualbox/common/vboxmanage_config.go +++ b/builder/virtualbox/common/vboxmanage_config.go @@ -1,28 +1,17 @@ package common import ( - "fmt" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type VBoxManageConfig struct { VBoxManage [][]string `mapstructure:"vboxmanage"` } -func (c *VBoxManageConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *VBoxManageConfig) Prepare(ctx *interpolate.Context) []error { if c.VBoxManage == nil { c.VBoxManage = make([][]string, 0) } - errs := make([]error, 0) - for i, args := range c.VBoxManage { - for j, arg := range args { - if err := t.Validate(arg); err != nil { - errs = append(errs, - fmt.Errorf("Error processing vboxmanage[%d][%d]: %s", i, j, err)) - } - } - } - - return errs + return nil } diff --git a/builder/virtualbox/common/vboxmanage_post_config.go b/builder/virtualbox/common/vboxmanage_post_config.go index df683f100..570e9b1fc 100644 --- a/builder/virtualbox/common/vboxmanage_post_config.go +++ b/builder/virtualbox/common/vboxmanage_post_config.go @@ -1,28 +1,17 @@ package common import ( - "fmt" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type VBoxManagePostConfig struct { VBoxManagePost [][]string `mapstructure:"vboxmanage_post"` } -func (c *VBoxManagePostConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *VBoxManagePostConfig) Prepare(ctx *interpolate.Context) []error { if c.VBoxManagePost == nil { c.VBoxManagePost = make([][]string, 0) } - errs := make([]error, 0) - for i, args := range c.VBoxManagePost { - for j, arg := range args { - if err := t.Validate(arg); err != nil { - errs = append(errs, - fmt.Errorf("Error processing vboxmanage_post[%d][%d]: %s", i, j, err)) - } - } - } - - return errs + return nil } diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go index 826d3ffe9..d095eae2d 100644 --- a/builder/virtualbox/iso/builder.go +++ b/builder/virtualbox/iso/builder.go @@ -11,17 +11,19 @@ import ( "github.com/mitchellh/multistep" vboxcommon "github.com/mitchellh/packer/builder/virtualbox/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) const BuilderId = "mitchellh.virtualbox" type Builder struct { - config config + config Config runner multistep.Runner } -type config struct { +type Config struct { common.PackerConfig `mapstructure:",squash"` vboxcommon.ExportConfig `mapstructure:",squash"` vboxcommon.ExportOpts `mapstructure:",squash"` @@ -50,34 +52,39 @@ type config struct { RawSingleISOUrl string `mapstructure:"iso_url"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - md, err := common.DecodeConfig(&b.config, raws...) + err := config.Decode(&b.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "boot_command", + "guest_additions_path", + "guest_additions_url", + "vboxmanage", + "vboxmanage_post", + }, + }, + }, raws...) if err != nil { return nil, err } - b.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, err - } - b.config.tpl.UserVars = b.config.PackerUserVars - // Accumulate any errors and warnings - errs := common.CheckUnusedConfig(md) - errs = packer.MultiErrorAppend(errs, b.config.ExportConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.ExportOpts.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.FloppyConfig.Prepare(b.config.tpl)...) + var errs *packer.MultiError + errs = packer.MultiErrorAppend(errs, b.config.ExportConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.ExportOpts.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.FloppyConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend( - errs, b.config.OutputConfig.Prepare(b.config.tpl, &b.config.PackerConfig)...) - errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.ShutdownConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.SSHConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.VBoxManageConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.VBoxManagePostConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.VBoxVersionConfig.Prepare(b.config.tpl)...) + errs, b.config.OutputConfig.Prepare(&b.config.ctx, &b.config.PackerConfig)...) + errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.ShutdownConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.SSHConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.VBoxManageConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.VBoxManagePostConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.VBoxVersionConfig.Prepare(&b.config.ctx)...) warnings := make([]string, 0) if b.config.DiskSize == 0 { @@ -108,56 +115,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.VMName = fmt.Sprintf("packer-%s-{{timestamp}}", b.config.PackerBuildName) } - // Errors - templates := map[string]*string{ - "guest_additions_mode": &b.config.GuestAdditionsMode, - "guest_additions_sha256": &b.config.GuestAdditionsSHA256, - "guest_os_type": &b.config.GuestOSType, - "hard_drive_interface": &b.config.HardDriveInterface, - "iso_checksum": &b.config.ISOChecksum, - "iso_checksum_type": &b.config.ISOChecksumType, - "iso_interface": &b.config.ISOInterface, - "iso_url": &b.config.RawSingleISOUrl, - "vm_name": &b.config.VMName, - } - - for n, ptr := range templates { - var err error - *ptr, err = b.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - for i, url := range b.config.ISOUrls { - var err error - b.config.ISOUrls[i], err = b.config.tpl.Process(url, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing iso_urls[%d]: %s", i, err)) - } - } - - validates := map[string]*string{ - "guest_additions_path": &b.config.GuestAdditionsPath, - "guest_additions_url": &b.config.GuestAdditionsURL, - } - - for n, ptr := range validates { - if err := b.config.tpl.Validate(*ptr); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error parsing %s: %s", n, err)) - } - } - - for i, command := range b.config.BootCommand { - if err := b.config.tpl.Validate(command); err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing boot_command[%d]: %s", i, err)) - } - } - if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" && b.config.HardDriveInterface != "scsi" { errs = packer.MultiErrorAppend( errs, errors.New("hard_drive_interface can only be ide, sata, or scsi")) @@ -265,7 +222,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe GuestAdditionsMode: b.config.GuestAdditionsMode, GuestAdditionsURL: b.config.GuestAdditionsURL, GuestAdditionsSHA256: b.config.GuestAdditionsSHA256, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &common.StepDownload{ Checksum: b.config.ISOChecksum, @@ -301,7 +258,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, &vboxcommon.StepVBoxManage{ Commands: b.config.VBoxManage, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &vboxcommon.StepRun{ BootWait: b.config.BootWait, @@ -310,7 +267,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &vboxcommon.StepTypeBootCommand{ BootCommand: b.config.BootCommand, VMName: b.config.VMName, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &common.StepConnectSSH{ SSHAddress: vboxcommon.SSHAddress, @@ -323,7 +280,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &vboxcommon.StepUploadGuestAdditions{ GuestAdditionsMode: b.config.GuestAdditionsMode, GuestAdditionsPath: b.config.GuestAdditionsPath, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, new(common.StepProvision), &vboxcommon.StepShutdown{ @@ -333,7 +290,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe new(vboxcommon.StepRemoveDevices), &vboxcommon.StepVBoxManage{ Commands: b.config.VBoxManagePost, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &vboxcommon.StepExport{ Format: b.config.Format, diff --git a/builder/virtualbox/iso/step_attach_iso.go b/builder/virtualbox/iso/step_attach_iso.go index 7fac2ec42..dfba91b7b 100644 --- a/builder/virtualbox/iso/step_attach_iso.go +++ b/builder/virtualbox/iso/step_attach_iso.go @@ -17,7 +17,7 @@ type stepAttachISO struct { } func (s *stepAttachISO) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) driver := state.Get("driver").(vboxcommon.Driver) isoPath := state.Get("iso_path").(string) ui := state.Get("ui").(packer.Ui) @@ -65,7 +65,7 @@ func (s *stepAttachISO) Cleanup(state multistep.StateBag) { return } - config := state.Get("config").(*config) + config := state.Get("config").(*Config) driver := state.Get("driver").(vboxcommon.Driver) vmName := state.Get("vmName").(string) diff --git a/builder/virtualbox/iso/step_create_disk.go b/builder/virtualbox/iso/step_create_disk.go index 82b849dbe..22b171f44 100644 --- a/builder/virtualbox/iso/step_create_disk.go +++ b/builder/virtualbox/iso/step_create_disk.go @@ -15,7 +15,7 @@ import ( type stepCreateDisk struct{} func (s *stepCreateDisk) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) driver := state.Get("driver").(vboxcommon.Driver) ui := state.Get("ui").(packer.Ui) vmName := state.Get("vmName").(string) diff --git a/builder/virtualbox/iso/step_create_vm.go b/builder/virtualbox/iso/step_create_vm.go index a1f42a748..adee36411 100644 --- a/builder/virtualbox/iso/step_create_vm.go +++ b/builder/virtualbox/iso/step_create_vm.go @@ -17,7 +17,7 @@ type stepCreateVM struct { } func (s *stepCreateVM) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) driver := state.Get("driver").(vboxcommon.Driver) ui := state.Get("ui").(packer.Ui) diff --git a/builder/virtualbox/iso/step_http_server.go b/builder/virtualbox/iso/step_http_server.go index 3d86139e3..4744af278 100644 --- a/builder/virtualbox/iso/step_http_server.go +++ b/builder/virtualbox/iso/step_http_server.go @@ -25,7 +25,7 @@ type stepHTTPServer struct { } func (s *stepHTTPServer) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) ui := state.Get("ui").(packer.Ui) var httpPort uint = 0 From 0dc42268108b89689d0b72d437ddf7849b2b1086 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 14:03:56 -0700 Subject: [PATCH 138/956] builder/virtualbox/ovf: new interpolation --- builder/virtualbox/ovf/builder.go | 10 +-- builder/virtualbox/ovf/config.go | 100 +++++++++--------------------- 2 files changed, 33 insertions(+), 77 deletions(-) diff --git a/builder/virtualbox/ovf/builder.go b/builder/virtualbox/ovf/builder.go index 9e022b398..1634bd5ad 100644 --- a/builder/virtualbox/ovf/builder.go +++ b/builder/virtualbox/ovf/builder.go @@ -70,7 +70,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe GuestAdditionsMode: b.config.GuestAdditionsMode, GuestAdditionsURL: b.config.GuestAdditionsURL, GuestAdditionsSHA256: b.config.GuestAdditionsSHA256, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &StepImport{ Name: b.config.VMName, @@ -88,7 +88,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, &vboxcommon.StepVBoxManage{ Commands: b.config.VBoxManage, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &vboxcommon.StepRun{ BootWait: b.config.BootWait, @@ -97,7 +97,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &vboxcommon.StepTypeBootCommand{ BootCommand: b.config.BootCommand, VMName: b.config.VMName, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &common.StepConnectSSH{ SSHAddress: vboxcommon.SSHAddress, @@ -110,7 +110,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &vboxcommon.StepUploadGuestAdditions{ GuestAdditionsMode: b.config.GuestAdditionsMode, GuestAdditionsPath: b.config.GuestAdditionsPath, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, new(common.StepProvision), &vboxcommon.StepShutdown{ @@ -120,7 +120,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe new(vboxcommon.StepRemoveDevices), &vboxcommon.StepVBoxManage{ Commands: b.config.VBoxManagePost, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &vboxcommon.StepExport{ Format: b.config.Format, diff --git a/builder/virtualbox/ovf/config.go b/builder/virtualbox/ovf/config.go index 885687b67..de60bcc7b 100644 --- a/builder/virtualbox/ovf/config.go +++ b/builder/virtualbox/ovf/config.go @@ -7,7 +7,9 @@ import ( vboxcommon "github.com/mitchellh/packer/builder/virtualbox/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // Config is the configuration structure for the builder. @@ -34,22 +36,27 @@ type Config struct { ImportOpts string `mapstructure:"import_opts"` ImportFlags []string `mapstructure:"import_flags"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } func NewConfig(raws ...interface{}) (*Config, []string, error) { - c := new(Config) - md, err := common.DecodeConfig(c, raws...) + var c Config + err := config.Decode(&c, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "boot_command", + "guest_additions_path", + "guest_additions_url", + "vboxmanage", + "vboxmanage_post", + }, + }, + }, raws...) if err != nil { return nil, nil, err } - c.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, nil, err - } - c.tpl.UserVars = c.PackerUserVars - // Defaults if c.GuestAdditionsMode == "" { c.GuestAdditionsMode = "upload" @@ -63,49 +70,17 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { } // Prepare the errors - errs := common.CheckUnusedConfig(md) - errs = packer.MultiErrorAppend(errs, c.ExportConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.ExportOpts.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.FloppyConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.OutputConfig.Prepare(c.tpl, &c.PackerConfig)...) - errs = packer.MultiErrorAppend(errs, c.RunConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.ShutdownConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.SSHConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.VBoxManageConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.VBoxManagePostConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.VBoxVersionConfig.Prepare(c.tpl)...) - - templates := map[string]*string{ - "guest_additions_mode": &c.GuestAdditionsMode, - "guest_additions_sha256": &c.GuestAdditionsSHA256, - "source_path": &c.SourcePath, - "vm_name": &c.VMName, - "import_opts": &c.ImportOpts, - } - - for n, ptr := range templates { - var err error - *ptr, err = c.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - sliceTemplates := map[string][]string{ - "import_flags": c.ImportFlags, - } - - for n, slice := range sliceTemplates { - for i, elem := range slice { - var err error - slice[i], err = c.tpl.Process(elem, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err)) - } - } - } + var errs *packer.MultiError + errs = packer.MultiErrorAppend(errs, c.ExportConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.ExportOpts.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.FloppyConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.OutputConfig.Prepare(&c.ctx, &c.PackerConfig)...) + errs = packer.MultiErrorAppend(errs, c.RunConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.ShutdownConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.SSHConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.VBoxManageConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.VBoxManagePostConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.VBoxVersionConfig.Prepare(&c.ctx)...) if c.SourcePath == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("source_path is required")) @@ -116,25 +91,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { } } - for i, command := range c.BootCommand { - if err := c.tpl.Validate(command); err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing boot_command[%d]: %s", i, err)) - } - } - - validates := map[string]*string{ - "guest_additions_path": &c.GuestAdditionsPath, - "guest_additions_url": &c.GuestAdditionsURL, - } - - for n, ptr := range validates { - if err := c.tpl.Validate(*ptr); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error parsing %s: %s", n, err)) - } - } - validMode := false validModes := []string{ vboxcommon.GuestAdditionsModeDisable, @@ -176,5 +132,5 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { c.ImportFlags = append(c.ImportFlags, "--options", c.ImportOpts) } - return c, warnings, nil + return &c, warnings, nil } From f5945eeb1bc88ea1f4607488441e2ef9e6578d28 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 14:16:28 -0700 Subject: [PATCH 139/956] vmware/iso: new interpolation --- builder/vmware/common/driver_config.go | 20 +--- builder/vmware/common/output_config.go | 21 +--- builder/vmware/common/run_config.go | 18 +-- builder/vmware/common/shutdown_config.go | 20 +--- builder/vmware/common/ssh_config.go | 22 +--- .../vmware/common/step_type_boot_command.go | 7 +- builder/vmware/common/step_upload_tools.go | 10 +- builder/vmware/common/tools_config.go | 26 +--- builder/vmware/common/vmx_config.go | 51 +------- builder/vmware/iso/builder.go | 112 +++++------------- builder/vmware/iso/driver.go | 2 +- builder/vmware/iso/driver_esx5.go | 11 +- builder/vmware/iso/step_create_disk.go | 2 +- builder/vmware/iso/step_create_vmx.go | 15 ++- builder/vmware/iso/step_remote_upload.go | 2 +- template/interpolate/i.go | 11 ++ template/interpolate/render.go | 23 ++++ 17 files changed, 118 insertions(+), 255 deletions(-) diff --git a/builder/vmware/common/driver_config.go b/builder/vmware/common/driver_config.go index c28e30de0..ecd42b0f5 100644 --- a/builder/vmware/common/driver_config.go +++ b/builder/vmware/common/driver_config.go @@ -1,17 +1,16 @@ package common import ( - "fmt" "os" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type DriverConfig struct { FusionAppPath string `mapstructure:"fusion_app_path"` } -func (c *DriverConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *DriverConfig) Prepare(ctx *interpolate.Context) []error { if c.FusionAppPath == "" { c.FusionAppPath = os.Getenv("FUSION_APP_PATH") } @@ -19,18 +18,5 @@ func (c *DriverConfig) Prepare(t *packer.ConfigTemplate) []error { c.FusionAppPath = "/Applications/VMware Fusion.app" } - templates := map[string]*string{ - "fusion_app_path": &c.FusionAppPath, - } - - var err error - errs := make([]error, 0) - for n, ptr := range templates { - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - return errs + return nil } diff --git a/builder/vmware/common/output_config.go b/builder/vmware/common/output_config.go index 19be1ba00..f3427183c 100644 --- a/builder/vmware/common/output_config.go +++ b/builder/vmware/common/output_config.go @@ -2,33 +2,22 @@ package common import ( "fmt" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "os" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/template/interpolate" ) type OutputConfig struct { OutputDir string `mapstructure:"output_directory"` } -func (c *OutputConfig) Prepare(t *packer.ConfigTemplate, pc *common.PackerConfig) []error { +func (c *OutputConfig) Prepare(ctx *interpolate.Context, pc *common.PackerConfig) []error { if c.OutputDir == "" { c.OutputDir = fmt.Sprintf("output-%s", pc.PackerBuildName) } - templates := map[string]*string{ - "output_directory": &c.OutputDir, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs []error if !pc.PackerForce { if _, err := os.Stat(c.OutputDir); err == nil { errs = append(errs, fmt.Errorf( diff --git a/builder/vmware/common/run_config.go b/builder/vmware/common/run_config.go index aef49683b..2f99984b1 100644 --- a/builder/vmware/common/run_config.go +++ b/builder/vmware/common/run_config.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type RunConfig struct { @@ -22,7 +22,7 @@ type RunConfig struct { BootWait time.Duration `` } -func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { if c.RawBootWait == "" { c.RawBootWait = "10s" } @@ -43,20 +43,8 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { c.VNCPortMax = 6000 } - templates := map[string]*string{ - "boot_wait": &c.RawBootWait, - "http_directory": &c.HTTPDir, - } - + var errs []error var err error - errs := make([]error, 0) - for n, ptr := range templates { - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - if c.RawBootWait != "" { c.BootWait, err = time.ParseDuration(c.RawBootWait) if err != nil { diff --git a/builder/vmware/common/shutdown_config.go b/builder/vmware/common/shutdown_config.go index 05e5fdfeb..83d2224c3 100644 --- a/builder/vmware/common/shutdown_config.go +++ b/builder/vmware/common/shutdown_config.go @@ -2,8 +2,9 @@ package common import ( "fmt" - "github.com/mitchellh/packer/packer" "time" + + "github.com/mitchellh/packer/template/interpolate" ) type ShutdownConfig struct { @@ -13,25 +14,12 @@ type ShutdownConfig struct { ShutdownTimeout time.Duration `` } -func (c *ShutdownConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *ShutdownConfig) Prepare(ctx *interpolate.Context) []error { if c.RawShutdownTimeout == "" { c.RawShutdownTimeout = "5m" } - templates := map[string]*string{ - "shutdown_command": &c.ShutdownCommand, - "shutdown_timeout": &c.RawShutdownTimeout, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs []error var err error c.ShutdownTimeout, err = time.ParseDuration(c.RawShutdownTimeout) if err != nil { diff --git a/builder/vmware/common/ssh_config.go b/builder/vmware/common/ssh_config.go index 75d0a4b75..1bd481d92 100644 --- a/builder/vmware/common/ssh_config.go +++ b/builder/vmware/common/ssh_config.go @@ -8,7 +8,7 @@ import ( "time" commonssh "github.com/mitchellh/packer/common/ssh" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type SSHConfig struct { @@ -23,7 +23,7 @@ type SSHConfig struct { SSHWaitTimeout time.Duration } -func (c *SSHConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *SSHConfig) Prepare(ctx *interpolate.Context) []error { if c.SSHPort == 0 { c.SSHPort = 22 } @@ -32,23 +32,7 @@ func (c *SSHConfig) Prepare(t *packer.ConfigTemplate) []error { c.RawSSHWaitTimeout = "20m" } - templates := map[string]*string{ - "ssh_host": &c.SSHHost, - "ssh_key_path": &c.SSHKeyPath, - "ssh_password": &c.SSHPassword, - "ssh_username": &c.SSHUser, - "ssh_wait_timeout": &c.RawSSHWaitTimeout, - } - - errs := make([]error, 0) - for n, ptr := range templates { - var err error - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs []error if c.SSHKeyPath != "" { if _, err := os.Stat(c.SSHKeyPath); err != nil { errs = append(errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) diff --git a/builder/vmware/common/step_type_boot_command.go b/builder/vmware/common/step_type_boot_command.go index 82e8b3e17..397b515c5 100644 --- a/builder/vmware/common/step_type_boot_command.go +++ b/builder/vmware/common/step_type_boot_command.go @@ -13,6 +13,7 @@ import ( "github.com/mitchellh/go-vnc" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) const KeyLeftShift uint32 = 0xFFE1 @@ -35,7 +36,7 @@ type bootCommandTemplateData struct { type StepTypeBootCommand struct { BootCommand []string VMName string - Tpl *packer.ConfigTemplate + Ctx interpolate.Context } func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction { @@ -87,7 +88,7 @@ func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction log.Printf("Host IP for the VMware machine: %s", hostIp) - tplData := &bootCommandTemplateData{ + s.Ctx.Data = &bootCommandTemplateData{ hostIp, httpPort, s.VMName, @@ -95,7 +96,7 @@ func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction ui.Say("Typing the boot command over VNC...") for _, command := range s.BootCommand { - command, err := s.Tpl.Process(command, tplData) + command, err := interpolate.Render(command, &s.Ctx) if err != nil { err := fmt.Errorf("Error preparing boot command: %s", err) state.Put("error", err) diff --git a/builder/vmware/common/step_upload_tools.go b/builder/vmware/common/step_upload_tools.go index 03387a13f..aa7dd08e7 100644 --- a/builder/vmware/common/step_upload_tools.go +++ b/builder/vmware/common/step_upload_tools.go @@ -2,9 +2,11 @@ package common import ( "fmt" + "os" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" - "os" + "github.com/mitchellh/packer/template/interpolate" ) type toolsUploadPathTemplate struct { @@ -15,7 +17,7 @@ type StepUploadTools struct { RemoteType string ToolsUploadFlavor string ToolsUploadPath string - Tpl *packer.ConfigTemplate + Ctx interpolate.Context } func (c *StepUploadTools) Run(state multistep.StateBag) multistep.StepAction { @@ -44,10 +46,10 @@ func (c *StepUploadTools) Run(state multistep.StateBag) multistep.StepAction { } defer f.Close() - tplData := &toolsUploadPathTemplate{ + c.Ctx.Data = &toolsUploadPathTemplate{ Flavor: c.ToolsUploadFlavor, } - c.ToolsUploadPath, err = c.Tpl.Process(c.ToolsUploadPath, tplData) + c.ToolsUploadPath, err = interpolate.Render(c.ToolsUploadPath, &c.Ctx) if err != nil { err := fmt.Errorf("Error preparing upload path: %s", err) state.Put("error", err) diff --git a/builder/vmware/common/tools_config.go b/builder/vmware/common/tools_config.go index c50c22e6b..923e531bf 100644 --- a/builder/vmware/common/tools_config.go +++ b/builder/vmware/common/tools_config.go @@ -1,10 +1,7 @@ package common import ( - "fmt" - "text/template" - - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type ToolsConfig struct { @@ -12,27 +9,10 @@ type ToolsConfig struct { ToolsUploadPath string `mapstructure:"tools_upload_path"` } -func (c *ToolsConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *ToolsConfig) Prepare(ctx *interpolate.Context) []error { if c.ToolsUploadPath == "" { c.ToolsUploadPath = "{{ .Flavor }}.iso" } - templates := map[string]*string{ - "tools_upload_flavor": &c.ToolsUploadFlavor, - } - - var err error - errs := make([]error, 0) - for n, ptr := range templates { - *ptr, err = t.Process(*ptr, nil) - if err != nil { - errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - if _, err := template.New("path").Parse(c.ToolsUploadPath); err != nil { - errs = append(errs, fmt.Errorf("tools_upload_path invalid: %s", err)) - } - - return errs + return nil } diff --git a/builder/vmware/common/vmx_config.go b/builder/vmware/common/vmx_config.go index 98bf70a31..aac16d1e0 100644 --- a/builder/vmware/common/vmx_config.go +++ b/builder/vmware/common/vmx_config.go @@ -1,9 +1,7 @@ package common import ( - "fmt" - - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type VMXConfig struct { @@ -11,49 +9,6 @@ type VMXConfig struct { VMXDataPost map[string]string `mapstructure:"vmx_data_post"` } -func (c *VMXConfig) Prepare(t *packer.ConfigTemplate) []error { - errs := make([]error, 0) - newVMXData := make(map[string]string) - for k, v := range c.VMXData { - var err error - k, err = t.Process(k, nil) - if err != nil { - errs = append(errs, - fmt.Errorf("Error processing vmx_data key %s: %s", k, err)) - continue - } - - v, err = t.Process(v, nil) - if err != nil { - errs = append(errs, - fmt.Errorf("Error processing vmx_data value '%s': %s", v, err)) - continue - } - - newVMXData[k] = v - } - c.VMXData = newVMXData - - newVMXDataPost := make(map[string]string) - for k, v := range c.VMXDataPost { - var err error - k, err = t.Process(k, nil) - if err != nil { - errs = append(errs, - fmt.Errorf("Error processing vmx_post_data key %s: %s", k, err)) - continue - } - - v, err = t.Process(v, nil) - if err != nil { - errs = append(errs, - fmt.Errorf("Error processing vmx_post_data value '%s': %s", v, err)) - continue - } - - newVMXDataPost[k] = v - } - c.VMXDataPost = newVMXDataPost - - return errs +func (c *VMXConfig) Prepare(ctx *interpolate.Context) []error { + return nil } diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index 674b308c9..c63bfdc8b 100644 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -3,26 +3,29 @@ package iso import ( "errors" "fmt" - "github.com/mitchellh/multistep" - vmwcommon "github.com/mitchellh/packer/builder/vmware/common" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "io/ioutil" "log" "math/rand" "os" "strings" "time" + + "github.com/mitchellh/multistep" + vmwcommon "github.com/mitchellh/packer/builder/vmware/common" + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) const BuilderIdESX = "mitchellh.vmware-esx" type Builder struct { - config config + config Config runner multistep.Runner } -type config struct { +type Config struct { common.PackerConfig `mapstructure:",squash"` vmwcommon.DriverConfig `mapstructure:",squash"` vmwcommon.OutputConfig `mapstructure:",squash"` @@ -57,31 +60,33 @@ type config struct { RawSingleISOUrl string `mapstructure:"iso_url"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - md, err := common.DecodeConfig(&b.config, raws...) + err := config.Decode(&b.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "boot_command", + "tools_upload_path", + }, + }, + }, raws...) if err != nil { return nil, err } - b.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, err - } - b.config.tpl.UserVars = b.config.PackerUserVars - - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - errs = packer.MultiErrorAppend(errs, b.config.DriverConfig.Prepare(b.config.tpl)...) + // Accumulate any errors and warnings + var errs *packer.MultiError + errs = packer.MultiErrorAppend(errs, b.config.DriverConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, - b.config.OutputConfig.Prepare(b.config.tpl, &b.config.PackerConfig)...) - errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.ShutdownConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.SSHConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.ToolsConfig.Prepare(b.config.tpl)...) - errs = packer.MultiErrorAppend(errs, b.config.VMXConfig.Prepare(b.config.tpl)...) + b.config.OutputConfig.Prepare(&b.config.ctx, &b.config.PackerConfig)...) + errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.ShutdownConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.SSHConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.ToolsConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.VMXConfig.Prepare(&b.config.ctx)...) warnings := make([]string, 0) if b.config.DiskName == "" { @@ -137,59 +142,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.RemotePort = 22 } - // Errors - templates := map[string]*string{ - "disk_name": &b.config.DiskName, - "guest_os_type": &b.config.GuestOSType, - "iso_checksum": &b.config.ISOChecksum, - "iso_checksum_type": &b.config.ISOChecksumType, - "iso_url": &b.config.RawSingleISOUrl, - "vm_name": &b.config.VMName, - "vmx_template_path": &b.config.VMXTemplatePath, - "remote_type": &b.config.RemoteType, - "remote_host": &b.config.RemoteHost, - "remote_datastore": &b.config.RemoteDatastore, - "remote_cache_datastore": &b.config.RemoteCacheDatastore, - "remote_cache_directory": &b.config.RemoteCacheDirectory, - "remote_user": &b.config.RemoteUser, - "remote_password": &b.config.RemotePassword, - } - - for n, ptr := range templates { - var err error - *ptr, err = b.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - for i, url := range b.config.ISOUrls { - var err error - b.config.ISOUrls[i], err = b.config.tpl.Process(url, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing iso_urls[%d]: %s", i, err)) - } - } - - for i, command := range b.config.BootCommand { - if err := b.config.tpl.Validate(command); err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing boot_command[%d]: %s", i, err)) - } - } - - for i, file := range b.config.FloppyFiles { - var err error - b.config.FloppyFiles[i], err = b.config.tpl.Process(file, nil) - if err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing floppy_files[%d]: %s", - i, err)) - } - } - if b.config.ISOChecksumType == "" { errs = packer.MultiErrorAppend( errs, errors.New("The iso_checksum_type must be specified.")) @@ -343,7 +295,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &vmwcommon.StepTypeBootCommand{ BootCommand: b.config.BootCommand, VMName: b.config.VMName, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &common.StepConnectSSH{ SSHAddress: driver.SSHAddress, @@ -355,7 +307,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe RemoteType: b.config.RemoteType, ToolsUploadFlavor: b.config.ToolsUploadFlavor, ToolsUploadPath: b.config.ToolsUploadPath, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &common.StepProvision{}, &vmwcommon.StepShutdown{ @@ -369,7 +321,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, &vmwcommon.StepCleanVMX{}, &StepUploadVMX{ - RemoteType: b.config.RemoteType, + RemoteType: b.config.RemoteType, }, &vmwcommon.StepCompactDisk{ Skip: b.config.SkipCompaction, @@ -440,5 +392,5 @@ func (b *Builder) validateVMXTemplatePath() error { return err } - return b.config.tpl.Validate(string(data)) + return interpolate.Validate(string(data), &b.config.ctx) } diff --git a/builder/vmware/iso/driver.go b/builder/vmware/iso/driver.go index e98019a7e..2c6c7465d 100644 --- a/builder/vmware/iso/driver.go +++ b/builder/vmware/iso/driver.go @@ -8,7 +8,7 @@ import ( // NewDriver returns a new driver implementation for this operating // system, or an error if the driver couldn't be initialized. -func NewDriver(config *config) (vmwcommon.Driver, error) { +func NewDriver(config *Config) (vmwcommon.Driver, error) { drivers := []vmwcommon.Driver{} if config.RemoteType == "" { diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index 1f9bd7a78..d81149511 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -3,13 +3,9 @@ package iso import ( "bufio" "bytes" - gossh "code.google.com/p/go.crypto/ssh" "encoding/csv" "errors" "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/communicator/ssh" - "github.com/mitchellh/packer/packer" "io" "log" "net" @@ -17,6 +13,11 @@ import ( "path/filepath" "strings" "time" + + gossh "code.google.com/p/go.crypto/ssh" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/communicator/ssh" + "github.com/mitchellh/packer/packer" ) // ESX5 driver talks to an ESXi5 hypervisor remotely over SSH to build @@ -218,7 +219,7 @@ func (d *ESX5Driver) VNCAddress(portMin, portMax uint) (string, uint, error) { } func (d *ESX5Driver) SSHAddress(state multistep.StateBag) (string, error) { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) if address, ok := state.GetOk("vm_address"); ok { return address.(string), nil diff --git a/builder/vmware/iso/step_create_disk.go b/builder/vmware/iso/step_create_disk.go index cade63f75..b357cad01 100644 --- a/builder/vmware/iso/step_create_disk.go +++ b/builder/vmware/iso/step_create_disk.go @@ -20,7 +20,7 @@ import ( type stepCreateDisk struct{} func (stepCreateDisk) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) driver := state.Get("driver").(vmwcommon.Driver) ui := state.Get("ui").(packer.Ui) diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index e490e639b..5a1a829b5 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -2,12 +2,14 @@ package iso import ( "fmt" - "github.com/mitchellh/multistep" - vmwcommon "github.com/mitchellh/packer/builder/vmware/common" - "github.com/mitchellh/packer/packer" "io/ioutil" "os" "path/filepath" + + "github.com/mitchellh/multistep" + vmwcommon "github.com/mitchellh/packer/builder/vmware/common" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type vmxTemplateData struct { @@ -32,13 +34,14 @@ type stepCreateVMX struct { } func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*config) + config := state.Get("config").(*Config) isoPath := state.Get("iso_path").(string) ui := state.Get("ui").(packer.Ui) ui.Say("Building and writing VMX file") - tplData := &vmxTemplateData{ + ctx := config.ctx + ctx.Data = &vmxTemplateData{ Name: config.VMName, GuestOS: config.GuestOSType, DiskName: config.DiskName, @@ -68,7 +71,7 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { vmxTemplate = string(rawBytes) } - vmxContents, err := config.tpl.Process(vmxTemplate, tplData) + vmxContents, err := interpolate.Render(vmxTemplate, &ctx) if err != nil { err := fmt.Errorf("Error procesing VMX template: %s", err) state.Put("error", err) diff --git a/builder/vmware/iso/step_remote_upload.go b/builder/vmware/iso/step_remote_upload.go index 927c16c2a..1fc66a541 100644 --- a/builder/vmware/iso/step_remote_upload.go +++ b/builder/vmware/iso/step_remote_upload.go @@ -29,7 +29,7 @@ func (s *stepRemoteUpload) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionContinue } - config := state.Get("config").(*config) + config := state.Get("config").(*Config) checksum := config.ISOChecksum checksumType := config.ISOChecksumType diff --git a/template/interpolate/i.go b/template/interpolate/i.go index fa3a31395..c0cd13f85 100644 --- a/template/interpolate/i.go +++ b/template/interpolate/i.go @@ -27,6 +27,11 @@ func Render(v string, ctx *Context) (string, error) { return (&I{Value: v}).Render(ctx) } +// Validate is shorthand for constructing an I and calling Validate. +func Validate(v string, ctx *Context) error { + return (&I{Value: v}).Validate(ctx) +} + // I stands for "interpolation" and is the main interpolation struct // in order to render values. type I struct { @@ -52,6 +57,12 @@ func (i *I) Render(ctx *Context) (string, error) { return result.String(), nil } +// Validate validates that the template is syntactically valid. +func (i *I) Validate(ctx *Context) error { + _, err := i.template(ctx) + return err +} + func (i *I) template(ctx *Context) (*template.Template, error) { return template.New("root").Funcs(Funcs(ctx)).Parse(i.Value) } diff --git a/template/interpolate/render.go b/template/interpolate/render.go index a4c33222d..2120c171f 100644 --- a/template/interpolate/render.go +++ b/template/interpolate/render.go @@ -34,6 +34,11 @@ func RenderMap(v interface{}, ctx *Context, f *RenderFilter) (map[string]interfa // Now go through each value and render it for k, raw := range m { + // Always validate every field + if err := ValidateInterface(raw, ctx); err != nil { + return nil, fmt.Errorf("invalid '%s': %s", k, err) + } + if !f.include(k) { continue } @@ -70,6 +75,24 @@ func RenderInterface(v interface{}, ctx *Context) (interface{}, error) { return v, nil } +// ValidateInterface renders any value and returns the resulting value. +func ValidateInterface(v interface{}, ctx *Context) error { + f := func(v string) (string, error) { + return v, Validate(v, ctx) + } + + walker := &renderWalker{ + F: f, + Replace: false, + } + err := reflectwalk.Walk(v, walker) + if err != nil { + return err + } + + return nil +} + // Include checks whether a key should be included. func (f *RenderFilter) include(k string) bool { if f == nil { From c3b75f4b8646edb3b69646802b2241284c301c0d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 14:21:15 -0700 Subject: [PATCH 140/956] vmware/vmx: interpolation --- builder/vmware/vmx/builder.go | 4 +-- builder/vmware/vmx/config.go | 68 +++++++++++------------------------ 2 files changed, 22 insertions(+), 50 deletions(-) diff --git a/builder/vmware/vmx/builder.go b/builder/vmware/vmx/builder.go index 4597e647b..2a8d7eef9 100644 --- a/builder/vmware/vmx/builder.go +++ b/builder/vmware/vmx/builder.go @@ -88,7 +88,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &vmwcommon.StepTypeBootCommand{ BootCommand: b.config.BootCommand, VMName: b.config.VMName, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &common.StepConnectSSH{ SSHAddress: driver.SSHAddress, @@ -100,7 +100,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe RemoteType: b.config.RemoteType, ToolsUploadFlavor: b.config.ToolsUploadFlavor, ToolsUploadPath: b.config.ToolsUploadPath, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, &common.StepProvision{}, &vmwcommon.StepShutdown{ diff --git a/builder/vmware/vmx/config.go b/builder/vmware/vmx/config.go index 575bc7a5b..bf21f2e54 100644 --- a/builder/vmware/vmx/config.go +++ b/builder/vmware/vmx/config.go @@ -6,7 +6,9 @@ import ( vmwcommon "github.com/mitchellh/packer/builder/vmware/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) // Config is the configuration structure for the builder. @@ -27,68 +29,38 @@ type Config struct { SourcePath string `mapstructure:"source_path"` VMName string `mapstructure:"vm_name"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) - md, err := common.DecodeConfig(c, raws...) + err := config.Decode(c, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "boot_command", + "tools_upload_path", + }, + }, + }, raws...) if err != nil { return nil, nil, err } - c.tpl, err = packer.NewConfigTemplate() - if err != nil { - return nil, nil, err - } - c.tpl.UserVars = c.PackerUserVars - // Defaults if c.VMName == "" { c.VMName = fmt.Sprintf("packer-%s-{{timestamp}}", c.PackerBuildName) } // Prepare the errors - errs := common.CheckUnusedConfig(md) - errs = packer.MultiErrorAppend(errs, c.DriverConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.OutputConfig.Prepare(c.tpl, &c.PackerConfig)...) - errs = packer.MultiErrorAppend(errs, c.RunConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.ShutdownConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.SSHConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.ToolsConfig.Prepare(c.tpl)...) - errs = packer.MultiErrorAppend(errs, c.VMXConfig.Prepare(c.tpl)...) - - templates := map[string]*string{ - "remote_type": &c.RemoteType, - "source_path": &c.SourcePath, - "vm_name": &c.VMName, - } - - for n, ptr := range templates { - var err error - *ptr, err = c.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - for i, file := range c.FloppyFiles { - var err error - c.FloppyFiles[i], err = c.tpl.Process(file, nil) - if err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing floppy_files[%d]: %s", - i, err)) - } - } - - for i, command := range c.BootCommand { - if err := c.tpl.Validate(command); err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing boot_command[%d]: %s", i, err)) - } - } + var errs *packer.MultiError + errs = packer.MultiErrorAppend(errs, c.DriverConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.OutputConfig.Prepare(&c.ctx, &c.PackerConfig)...) + errs = packer.MultiErrorAppend(errs, c.RunConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.ShutdownConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.SSHConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.ToolsConfig.Prepare(&c.ctx)...) + errs = packer.MultiErrorAppend(errs, c.VMXConfig.Prepare(&c.ctx)...) if c.SourcePath == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("source_path is required")) From cbe6e83b60f56841b5286357ce9bb144e9a2c073 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 14:27:11 -0700 Subject: [PATCH 141/956] post-processor/docker-*: fix compilation errors --- .../docker-import/post-processor.go | 45 +++++------------- post-processor/docker-push/post-processor.go | 46 +++++-------------- post-processor/docker-save/post-processor.go | 46 +++++-------------- post-processor/docker-tag/post-processor.go | 45 +++++------------- 4 files changed, 46 insertions(+), 136 deletions(-) diff --git a/post-processor/docker-import/post-processor.go b/post-processor/docker-import/post-processor.go index 78543a754..cb0e4ec7a 100644 --- a/post-processor/docker-import/post-processor.go +++ b/post-processor/docker-import/post-processor.go @@ -2,9 +2,12 @@ package dockerimport import ( "fmt" + "github.com/mitchellh/packer/builder/docker" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) const BuilderId = "packer.post-processor.docker-import" @@ -15,7 +18,7 @@ type Config struct { Repository string `mapstructure:"repository"` Tag string `mapstructure:"tag"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } type PostProcessor struct { @@ -23,42 +26,16 @@ type PostProcessor struct { } func (p *PostProcessor) Configure(raws ...interface{}) error { - _, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - - // Accumulate any errors - errs := new(packer.MultiError) - - templates := map[string]*string{ - "repository": &p.config.Repository, - "tag": &p.config.Tag, - } - - for key, ptr := range templates { - if *ptr == "" { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("%s must be set", key)) - } - - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", key, err)) - } - } - - if len(errs.Errors) > 0 { - return errs - } - return nil } @@ -76,7 +53,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac importRepo += ":" + p.config.Tag } - driver := &docker.DockerDriver{Tpl: p.config.tpl, Ui: ui} + driver := &docker.DockerDriver{Ctx: &p.config.ctx, Ui: ui} ui.Message("Importing image: " + artifact.Id()) ui.Message("Repository: " + importRepo) diff --git a/post-processor/docker-push/post-processor.go b/post-processor/docker-push/post-processor.go index 25b41afee..7fd2e4a32 100644 --- a/post-processor/docker-push/post-processor.go +++ b/post-processor/docker-push/post-processor.go @@ -2,12 +2,15 @@ package dockerpush import ( "fmt" + "strings" + "github.com/mitchellh/packer/builder/docker" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/post-processor/docker-import" "github.com/mitchellh/packer/post-processor/docker-tag" - "strings" + "github.com/mitchellh/packer/template/interpolate" ) type Config struct { @@ -19,7 +22,7 @@ type Config struct { LoginPassword string `mapstructure:"login_password"` LoginServer string `mapstructure:"login_server"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } type PostProcessor struct { @@ -29,41 +32,16 @@ type PostProcessor struct { } func (p *PostProcessor) Configure(raws ...interface{}) error { - _, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - - // Accumulate any errors - errs := new(packer.MultiError) - - // Process templates - templates := map[string]*string{ - "login_email": &p.config.LoginEmail, - "login_username": &p.config.LoginUsername, - "login_password": &p.config.LoginPassword, - "login_server": &p.config.LoginServer, - } - - for n, ptr := range templates { - var err error - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - if len(errs.Errors) > 0 { - return errs - } - return nil } @@ -79,7 +57,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac driver := p.Driver if driver == nil { // If no driver is set, then we use the real driver - driver = &docker.DockerDriver{Tpl: p.config.tpl, Ui: ui} + driver = &docker.DockerDriver{Ctx: &p.config.ctx, Ui: ui} } if p.config.Login { diff --git a/post-processor/docker-save/post-processor.go b/post-processor/docker-save/post-processor.go index 6a2d86298..f35b0053e 100644 --- a/post-processor/docker-save/post-processor.go +++ b/post-processor/docker-save/post-processor.go @@ -2,11 +2,14 @@ package dockersave import ( "fmt" + "os" + "github.com/mitchellh/packer/builder/docker" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/post-processor/docker-import" - "os" + "github.com/mitchellh/packer/template/interpolate" ) const BuilderId = "packer.post-processor.docker-save" @@ -16,7 +19,7 @@ type Config struct { Path string `mapstructure:"path"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } type PostProcessor struct { @@ -26,41 +29,16 @@ type PostProcessor struct { } func (p *PostProcessor) Configure(raws ...interface{}) error { - _, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - - // Accumulate any errors - errs := new(packer.MultiError) - - templates := map[string]*string{ - "path": &p.config.Path, - } - - for key, ptr := range templates { - if *ptr == "" { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("%s must be set", key)) - } - - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", key, err)) - } - } - - if len(errs.Errors) > 0 { - return errs - } - return nil } @@ -85,7 +63,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac driver := p.Driver if driver == nil { // If no driver is set, then we use the real driver - driver = &docker.DockerDriver{Tpl: p.config.tpl, Ui: ui} + driver = &docker.DockerDriver{Ctx: &p.config.ctx, Ui: ui} } ui.Message("Saving image: " + artifact.Id()) diff --git a/post-processor/docker-tag/post-processor.go b/post-processor/docker-tag/post-processor.go index d68b48e4c..64d3e0479 100644 --- a/post-processor/docker-tag/post-processor.go +++ b/post-processor/docker-tag/post-processor.go @@ -2,10 +2,13 @@ package dockertag import ( "fmt" + "github.com/mitchellh/packer/builder/docker" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/post-processor/docker-import" + "github.com/mitchellh/packer/template/interpolate" ) const BuilderId = "packer.post-processor.docker-tag" @@ -16,7 +19,7 @@ type Config struct { Repository string `mapstructure:"repository"` Tag string `mapstructure:"tag"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } type PostProcessor struct { @@ -26,42 +29,16 @@ type PostProcessor struct { } func (p *PostProcessor) Configure(raws ...interface{}) error { - _, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - - // Accumulate any errors - errs := new(packer.MultiError) - - templates := map[string]*string{ - "repository": &p.config.Repository, - "tag": &p.config.Tag, - } - - for key, ptr := range templates { - if *ptr == "" { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("%s must be set", key)) - } - - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", key, err)) - } - } - - if len(errs.Errors) > 0 { - return errs - } - return nil } @@ -77,7 +54,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac driver := p.Driver if driver == nil { // If no driver is set, then we use the real driver - driver = &docker.DockerDriver{Tpl: p.config.tpl, Ui: ui} + driver = &docker.DockerDriver{Ctx: &p.config.ctx, Ui: ui} } importRepo := p.config.Repository From 4bb16ac22361efeb96adf8ecadcbab83862fb1ae Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 14:30:59 -0700 Subject: [PATCH 142/956] fix failing tests --- builder/parallels/common/config_test.go | 12 ++++-------- .../parallels/common/step_type_boot_command_test.go | 3 +-- builder/parallels/common/tools_config_test.go | 8 -------- builder/virtualbox/common/config_test.go | 12 ++++-------- builder/vmware/common/config_test.go | 12 ++++-------- 5 files changed, 13 insertions(+), 34 deletions(-) diff --git a/builder/parallels/common/config_test.go b/builder/parallels/common/config_test.go index a84c51bc1..eeeda864a 100644 --- a/builder/parallels/common/config_test.go +++ b/builder/parallels/common/config_test.go @@ -1,15 +1,11 @@ package common import ( - "github.com/mitchellh/packer/packer" "testing" + + "github.com/mitchellh/packer/template/interpolate" ) -func testConfigTemplate(t *testing.T) *packer.ConfigTemplate { - result, err := packer.NewConfigTemplate() - if err != nil { - t.Fatalf("err: %s", err) - } - - return result +func testConfigTemplate(t *testing.T) *interpolate.Context { + return &interpolate.Context{} } diff --git a/builder/parallels/common/step_type_boot_command_test.go b/builder/parallels/common/step_type_boot_command_test.go index 6dbbe9e1c..2570e7fbe 100644 --- a/builder/parallels/common/step_type_boot_command_test.go +++ b/builder/parallels/common/step_type_boot_command_test.go @@ -9,7 +9,6 @@ import ( func TestStepTypeBootCommand(t *testing.T) { state := testState(t) - tpl, _ := packer.NewConfigTemplate() var bootcommand = []string{ "1234567890-=", @@ -27,7 +26,7 @@ func TestStepTypeBootCommand(t *testing.T) { BootCommand: bootcommand, HostInterfaces: []string{}, VMName: "myVM", - Tpl: tpl, + Ctx: *testConfigTemplate(t), } comm := new(packer.MockCommunicator) diff --git a/builder/parallels/common/tools_config_test.go b/builder/parallels/common/tools_config_test.go index 5f5a8a35d..8755ec090 100644 --- a/builder/parallels/common/tools_config_test.go +++ b/builder/parallels/common/tools_config_test.go @@ -70,14 +70,6 @@ func TestToolsConfigPrepare_ParallelsToolsGuestPath(t *testing.T) { t.Fatal("should not be empty") } - // Test with a bad value - c = testToolsConfig() - c.ParallelsToolsGuestPath = "{{{nope}" - errs = c.Prepare(testConfigTemplate(t)) - if len(errs) == 0 { - t.Fatal("should have error") - } - // Test with a good one c = testToolsConfig() c.ParallelsToolsGuestPath = "foo" diff --git a/builder/virtualbox/common/config_test.go b/builder/virtualbox/common/config_test.go index a84c51bc1..eeeda864a 100644 --- a/builder/virtualbox/common/config_test.go +++ b/builder/virtualbox/common/config_test.go @@ -1,15 +1,11 @@ package common import ( - "github.com/mitchellh/packer/packer" "testing" + + "github.com/mitchellh/packer/template/interpolate" ) -func testConfigTemplate(t *testing.T) *packer.ConfigTemplate { - result, err := packer.NewConfigTemplate() - if err != nil { - t.Fatalf("err: %s", err) - } - - return result +func testConfigTemplate(t *testing.T) *interpolate.Context { + return &interpolate.Context{} } diff --git a/builder/vmware/common/config_test.go b/builder/vmware/common/config_test.go index a84c51bc1..eeeda864a 100644 --- a/builder/vmware/common/config_test.go +++ b/builder/vmware/common/config_test.go @@ -1,15 +1,11 @@ package common import ( - "github.com/mitchellh/packer/packer" "testing" + + "github.com/mitchellh/packer/template/interpolate" ) -func testConfigTemplate(t *testing.T) *packer.ConfigTemplate { - result, err := packer.NewConfigTemplate() - if err != nil { - t.Fatalf("err: %s", err) - } - - return result +func testConfigTemplate(t *testing.T) *interpolate.Context { + return &interpolate.Context{} } From 5b343ca98cd49056c50bc09ffa65845155f00dc2 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 14:36:15 -0700 Subject: [PATCH 143/956] post-processor/vagrant: interpolation --- post-processor/vagrant/post-processor.go | 74 ++++++++++-------------- 1 file changed, 29 insertions(+), 45 deletions(-) diff --git a/post-processor/vagrant/post-processor.go b/post-processor/vagrant/post-processor.go index 50b80f7cf..63200272b 100644 --- a/post-processor/vagrant/post-processor.go +++ b/post-processor/vagrant/post-processor.go @@ -11,8 +11,11 @@ import ( "path/filepath" "text/template" + "github.com/mitchellh/mapstructure" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) var builtins = map[string]string{ @@ -35,7 +38,7 @@ type Config struct { Override map[string]interface{} VagrantfileTemplate string `mapstructure:"vagrantfile_template"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } type PostProcessor struct { @@ -73,10 +76,12 @@ func (p *PostProcessor) PostProcessProvider(name string, provider Provider, ui p ui.Say(fmt.Sprintf("Creating Vagrant box for '%s' provider", name)) - outputPath, err := config.tpl.Process(config.OutputPath, &outputPathTemplate{ - ArtifactId: artifact.Id(), - BuildName: config.PackerBuildName, - Provider: name, + outputPath, err := interpolate.Render(config.OutputPath, &interpolate.Context{ + Data: &outputPathTemplate{ + ArtifactId: artifact.Id(), + BuildName: config.PackerBuildName, + Provider: name, + }, }) if err != nil { return nil, false, err @@ -162,21 +167,24 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac return p.PostProcessProvider(name, provider, ui, artifact) } -func (p *PostProcessor) configureSingle(config *Config, raws ...interface{}) error { - md, err := common.DecodeConfig(config, raws...) +func (p *PostProcessor) configureSingle(c *Config, raws ...interface{}) error { + var md mapstructure.Metadata + err := config.Decode(c, &config.DecodeOpts{ + Metadata: &md, + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "output", + }, + }, + }, raws...) if err != nil { return err } - config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - config.tpl.UserVars = config.PackerUserVars - // Defaults - if config.OutputPath == "" { - config.OutputPath = "packer_{{ .BuildName }}_{{.Provider}}.box" + if c.OutputPath == "" { + c.OutputPath = "packer_{{ .BuildName }}_{{.Provider}}.box" } found := false @@ -188,39 +196,15 @@ func (p *PostProcessor) configureSingle(config *Config, raws ...interface{}) err } if !found { - config.CompressionLevel = flate.DefaultCompression + c.CompressionLevel = flate.DefaultCompression } - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - - templates := map[string]*string{ - "vagrantfile_template": &config.VagrantfileTemplate, - } - - for key, ptr := range templates { - *ptr, err = config.tpl.Process(*ptr, nil) + var errs *packer.MultiError + if c.VagrantfileTemplate != "" { + _, err := os.Stat(c.VagrantfileTemplate) if err != nil { - errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing %s: %s", key, err)) - } - } - - validates := map[string]*string{ - "output": &config.OutputPath, - "vagrantfile_template": &config.VagrantfileTemplate, - } - - if config.VagrantfileTemplate != "" { - _, err := os.Stat(config.VagrantfileTemplate) - if err != nil { - errs = packer.MultiErrorAppend(errs, fmt.Errorf("vagrantfile_template '%s' does not exist", config.VagrantfileTemplate)) - } - } - - for n, ptr := range validates { - if err := config.tpl.Validate(*ptr); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error parsing %s: %s", n, err)) + errs = packer.MultiErrorAppend(errs, fmt.Errorf( + "vagrantfile_template '%s' does not exist", c.VagrantfileTemplate)) } } From c2381be44fb5e8cdecf110e549b36ebe35a9cda0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 14:41:47 -0700 Subject: [PATCH 144/956] provisioner/*: convert to interpolation --- provisioner/ansible-local/provisioner.go | 59 +++---------- provisioner/chef-client/provisioner.go | 100 +++++++---------------- 2 files changed, 39 insertions(+), 120 deletions(-) diff --git a/provisioner/ansible-local/provisioner.go b/provisioner/ansible-local/provisioner.go index 3ab37c086..1d18ffc49 100644 --- a/provisioner/ansible-local/provisioner.go +++ b/provisioner/ansible-local/provisioner.go @@ -8,14 +8,16 @@ import ( "strings" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) const DefaultStagingDir = "/tmp/packer-provisioner-ansible-local" type Config struct { common.PackerConfig `mapstructure:",squash"` - tpl *packer.ConfigTemplate + ctx interpolate.Context // The command to run ansible Command string @@ -54,21 +56,16 @@ type Provisioner struct { } func (p *Provisioner) Prepare(raws ...interface{}) error { - md, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - - p.config.tpl.UserVars = p.config.PackerUserVars - - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - // Defaults if p.config.Command == "" { p.config.Command = "ANSIBLE_FORCE_COLOR=1 PYTHONUNBUFFERED=1 ansible-playbook" @@ -78,44 +75,8 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { p.config.StagingDir = DefaultStagingDir } - // Templates - templates := map[string]*string{ - "command": &p.config.Command, - "group_vars": &p.config.GroupVars, - "host_vars": &p.config.HostVars, - "playbook_file": &p.config.PlaybookFile, - "playbook_dir": &p.config.PlaybookDir, - "staging_dir": &p.config.StagingDir, - "inventory_file": &p.config.InventoryFile, - } - - for n, ptr := range templates { - var err error - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - sliceTemplates := map[string][]string{ - "extra_arguments": p.config.ExtraArguments, - "playbook_paths": p.config.PlaybookPaths, - "role_paths": p.config.RolePaths, - } - - for n, slice := range sliceTemplates { - for i, elem := range slice { - var err error - slice[i], err = p.config.tpl.Process(elem, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err)) - } - } - } - // Validation + var errs *packer.MultiError err = validateFileConfig(p.config.PlaybookFile, "playbook_file", true) if err != nil { errs = packer.MultiErrorAppend(errs, err) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index b3d91b3e4..ea41e954c 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -15,7 +15,9 @@ import ( "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/common/uuid" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type Config struct { @@ -38,7 +40,7 @@ type Config struct { ValidationKeyPath string `mapstructure:"validation_key_path"` ValidationClientName string `mapstructure:"validation_client_name"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } type Provisioner struct { @@ -65,42 +67,19 @@ type InstallChefTemplate struct { } func (p *Provisioner) Prepare(raws ...interface{}) error { - md, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "execute_command", + "install_command", + }, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - - templates := map[string]*string{ - "chef_environment": &p.config.ChefEnvironment, - "ssl_verify_mode": &p.config.SslVerifyMode, - "config_template": &p.config.ConfigTemplate, - "node_name": &p.config.NodeName, - "staging_dir": &p.config.StagingDir, - "chef_server_url": &p.config.ServerUrl, - "execute_command": &p.config.ExecuteCommand, - "install_command": &p.config.InstallCommand, - "validation_key_path": &p.config.ValidationKeyPath, - "validation_client_name": &p.config.ValidationClientName, - } - - for n, ptr := range templates { - var err error - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - if p.config.ExecuteCommand == "" { p.config.ExecuteCommand = "{{if .Sudo}}sudo {{end}}chef-client " + "--no-color -c {{.ConfigPath}} -j {{.JsonPath}}" @@ -120,33 +99,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { p.config.StagingDir = "/tmp/packer-chef-client" } - sliceTemplates := map[string][]string{ - "run_list": p.config.RunList, - } - - for n, slice := range sliceTemplates { - for i, elem := range slice { - var err error - slice[i], err = p.config.tpl.Process(elem, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err)) - } - } - } - - validates := map[string]*string{ - "execute_command": &p.config.ExecuteCommand, - "install_command": &p.config.InstallCommand, - } - - for n, ptr := range validates { - if err := p.config.tpl.Validate(*ptr); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error parsing %s: %s", n, err)) - } - } - + var errs *packer.MultiError if p.config.ConfigTemplate != "" { fi, err := os.Stat(p.config.ConfigTemplate) if err != nil { @@ -291,14 +244,16 @@ func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeN tpl = string(tplBytes) } - configString, err := p.config.tpl.Process(tpl, &ConfigTemplate{ + ctx := p.config.ctx + ctx.Data = &ConfigTemplate{ NodeName: nodeName, ServerUrl: serverUrl, ValidationKeyPath: remoteKeyPath, ValidationClientName: validationClientName, ChefEnvironment: chefEnvironment, SslVerifyMode: sslVerifyMode, - }) + } + configString, err := interpolate.Render(tpl, &ctx) if err != nil { return "", err } @@ -415,11 +370,12 @@ func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir stri } func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config string, json string) error { - command, err := p.config.tpl.Process(p.config.ExecuteCommand, &ExecuteTemplate{ + p.config.ctx.Data = &ExecuteTemplate{ ConfigPath: config, JsonPath: json, Sudo: !p.config.PreventSudo, - }) + } + command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) if err != nil { return err } @@ -444,9 +400,10 @@ func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config func (p *Provisioner) installChef(ui packer.Ui, comm packer.Communicator) error { ui.Message("Installing Chef...") - command, err := p.config.tpl.Process(p.config.InstallCommand, &InstallChefTemplate{ + p.config.ctx.Data = &InstallChefTemplate{ Sudo: !p.config.PreventSudo, - }) + } + command, err := interpolate.Render(p.config.InstallCommand, &p.config.ctx) if err != nil { return err } @@ -532,24 +489,25 @@ func (p *Provisioner) processJsonUserVars() (map[string]interface{}, error) { // Copy the user variables so that we can restore them later, and // make sure we make the quotes JSON-friendly in the user variables. originalUserVars := make(map[string]string) - for k, v := range p.config.tpl.UserVars { + for k, v := range p.config.ctx.UserVariables { originalUserVars[k] = v } // Make sure we reset them no matter what defer func() { - p.config.tpl.UserVars = originalUserVars + p.config.ctx.UserVariables = originalUserVars }() // Make the current user variables JSON string safe. - for k, v := range p.config.tpl.UserVars { + for k, v := range p.config.ctx.UserVariables { v = strings.Replace(v, `\`, `\\`, -1) v = strings.Replace(v, `"`, `\"`, -1) - p.config.tpl.UserVars[k] = v + p.config.ctx.UserVariables[k] = v } // Process the bytes with the template processor - jsonBytesProcessed, err := p.config.tpl.Process(string(jsonBytes), nil) + p.config.ctx.Data = nil + jsonBytesProcessed, err := interpolate.Render(string(jsonBytes), &p.config.ctx) if err != nil { return nil, err } From 2b4df93f2fbaeddb284584e1a1a2c83f2b386b93 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 14:50:20 -0700 Subject: [PATCH 145/956] provisioner/*: interpolation --- provisioner/chef-solo/provisioner.go | 103 ++++++------------- provisioner/file/provisioner.go | 44 +++----- provisioner/puppet-masterless/provisioner.go | 97 ++++------------- provisioner/puppet-server/provisioner.go | 69 +++---------- provisioner/salt-masterless/provisioner.go | 42 +++----- provisioner/shell/provisioner.go | 70 ++++--------- 6 files changed, 113 insertions(+), 312 deletions(-) diff --git a/provisioner/chef-solo/provisioner.go b/provisioner/chef-solo/provisioner.go index 3ee0e74c7..686dc250e 100644 --- a/provisioner/chef-solo/provisioner.go +++ b/provisioner/chef-solo/provisioner.go @@ -7,12 +7,15 @@ import ( "bytes" "encoding/json" "fmt" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "io/ioutil" "os" "path/filepath" "strings" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type Config struct { @@ -34,7 +37,7 @@ type Config struct { SkipInstall bool `mapstructure:"skip_install"` StagingDir string `mapstructure:"staging_directory"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } type Provisioner struct { @@ -69,17 +72,19 @@ type InstallChefTemplate struct { } func (p *Provisioner) Prepare(raws ...interface{}) error { - md, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "execute_command", + "install_command", + }, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - if p.config.ExecuteCommand == "" { p.config.ExecuteCommand = "{{if .Sudo}}sudo {{end}}chef-solo --no-color -c {{.ConfigPath}} -j {{.JsonPath}}" } @@ -96,57 +101,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { p.config.StagingDir = "/tmp/packer-chef-solo" } - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - - templates := map[string]*string{ - "config_template": &p.config.ConfigTemplate, - "data_bags_path": &p.config.DataBagsPath, - "encrypted_data_bag_secret": &p.config.EncryptedDataBagSecretPath, - "roles_path": &p.config.RolesPath, - "staging_dir": &p.config.StagingDir, - "environments_path": &p.config.EnvironmentsPath, - "chef_environment": &p.config.ChefEnvironment, - } - - for n, ptr := range templates { - var err error - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - sliceTemplates := map[string][]string{ - "cookbook_paths": p.config.CookbookPaths, - "remote_cookbook_paths": p.config.RemoteCookbookPaths, - "run_list": p.config.RunList, - } - - for n, slice := range sliceTemplates { - for i, elem := range slice { - var err error - slice[i], err = p.config.tpl.Process(elem, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err)) - } - } - } - - validates := map[string]*string{ - "execute_command": &p.config.ExecuteCommand, - "install_command": &p.config.InstallCommand, - } - - for n, ptr := range validates { - if err := p.config.tpl.Validate(*ptr); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error parsing %s: %s", n, err)) - } - } - + var errs *packer.MultiError if p.config.ConfigTemplate != "" { fi, err := os.Stat(p.config.ConfigTemplate) if err != nil { @@ -362,7 +317,7 @@ func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, local tpl = string(tplBytes) } - configString, err := p.config.tpl.Process(tpl, &ConfigTemplate{ + p.config.ctx.Data = &ConfigTemplate{ CookbookPaths: strings.Join(cookbook_paths, ","), RolesPath: rolesPath, DataBagsPath: dataBagsPath, @@ -373,7 +328,8 @@ func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, local HasEncryptedDataBagSecretPath: encryptedDataBagSecretPath != "", HasEnvironmentsPath: environmentsPath != "", ChefEnvironment: chefEnvironment, - }) + } + configString, err := interpolate.Render(tpl, &p.config.ctx) if err != nil { return "", err } @@ -433,11 +389,12 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri } func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config string, json string) error { - command, err := p.config.tpl.Process(p.config.ExecuteCommand, &ExecuteTemplate{ + p.config.ctx.Data = &ExecuteTemplate{ ConfigPath: config, JsonPath: json, Sudo: !p.config.PreventSudo, - }) + } + command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) if err != nil { return err } @@ -462,9 +419,10 @@ func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config func (p *Provisioner) installChef(ui packer.Ui, comm packer.Communicator) error { ui.Message("Installing Chef...") - command, err := p.config.tpl.Process(p.config.InstallCommand, &InstallChefTemplate{ + p.config.ctx.Data = &InstallChefTemplate{ Sudo: !p.config.PreventSudo, - }) + } + command, err := interpolate.Render(p.config.InstallCommand, &p.config.ctx) if err != nil { return err } @@ -533,24 +491,25 @@ func (p *Provisioner) processJsonUserVars() (map[string]interface{}, error) { // Copy the user variables so that we can restore them later, and // make sure we make the quotes JSON-friendly in the user variables. originalUserVars := make(map[string]string) - for k, v := range p.config.tpl.UserVars { + for k, v := range p.config.ctx.UserVariables { originalUserVars[k] = v } // Make sure we reset them no matter what defer func() { - p.config.tpl.UserVars = originalUserVars + p.config.ctx.UserVariables = originalUserVars }() // Make the current user variables JSON string safe. - for k, v := range p.config.tpl.UserVars { + for k, v := range p.config.ctx.UserVariables { v = strings.Replace(v, `\`, `\\`, -1) v = strings.Replace(v, `"`, `\"`, -1) - p.config.tpl.UserVars[k] = v + p.config.ctx.UserVariables[k] = v } // Process the bytes with the template processor - jsonBytesProcessed, err := p.config.tpl.Process(string(jsonBytes), nil) + p.config.ctx.Data = nil + jsonBytesProcessed, err := interpolate.Render(string(jsonBytes), &p.config.ctx) if err != nil { return nil, err } diff --git a/provisioner/file/provisioner.go b/provisioner/file/provisioner.go index 1cb69acd4..ce359a407 100644 --- a/provisioner/file/provisioner.go +++ b/provisioner/file/provisioner.go @@ -3,12 +3,15 @@ package file import ( "errors" "fmt" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "os" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) -type config struct { +type Config struct { common.PackerConfig `mapstructure:",squash"` // The local path of the file to upload. @@ -17,42 +20,25 @@ type config struct { // The remote path where the local file will be uploaded to. Destination string - tpl *packer.ConfigTemplate + ctx interpolate.Context } type Provisioner struct { - config config + config Config } func (p *Provisioner) Prepare(raws ...interface{}) error { - md, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - - templates := map[string]*string{ - "source": &p.config.Source, - "destination": &p.config.Destination, - } - - for n, ptr := range templates { - var err error - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - + var errs *packer.MultiError if _, err := os.Stat(p.config.Source); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Bad source '%s': %s", p.config.Source, err)) diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index efb5e53e2..eb364da58 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -5,16 +5,19 @@ package puppetmasterless import ( "fmt" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "os" "path/filepath" "strings" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type Config struct { common.PackerConfig `mapstructure:",squash"` - tpl *packer.ConfigTemplate + ctx interpolate.Context // The command used to execute Puppet. ExecuteCommand string `mapstructure:"execute_command"` @@ -57,20 +60,18 @@ type ExecuteTemplate struct { } func (p *Provisioner) Prepare(raws ...interface{}) error { - md, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "execute_command", + }, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - // Set some defaults if p.config.ExecuteCommand == "" { p.config.ExecuteCommand = "{{.FacterVars}} {{if .Sudo}} sudo -E {{end}}" + @@ -85,71 +86,8 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { p.config.StagingDir = "/tmp/packer-puppet-masterless" } - // Templates - templates := map[string]*string{ - "hiera_config_path": &p.config.HieraConfigPath, - "manifest_file": &p.config.ManifestFile, - "manifest_dir": &p.config.ManifestDir, - "staging_dir": &p.config.StagingDir, - } - - for n, ptr := range templates { - var err error - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - sliceTemplates := map[string][]string{ - "module_paths": p.config.ModulePaths, - } - - for n, slice := range sliceTemplates { - for i, elem := range slice { - var err error - slice[i], err = p.config.tpl.Process(elem, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err)) - } - } - } - - validates := map[string]*string{ - "execute_command": &p.config.ExecuteCommand, - } - - for n, ptr := range validates { - if err := p.config.tpl.Validate(*ptr); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error parsing %s: %s", n, err)) - } - } - - newFacts := make(map[string]string) - for k, v := range p.config.Facter { - k, err := p.config.tpl.Process(k, nil) - if err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing facter key %s: %s", k, err)) - continue - } - - v, err := p.config.tpl.Process(v, nil) - if err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing facter value '%s': %s", v, err)) - continue - } - - newFacts[k] = v - } - - p.config.Facter = newFacts - // Validation + var errs *packer.MultiError if p.config.HieraConfigPath != "" { info, err := os.Stat(p.config.HieraConfigPath) if err != nil { @@ -255,14 +193,15 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } // Execute Puppet - command, err := p.config.tpl.Process(p.config.ExecuteCommand, &ExecuteTemplate{ + p.config.ctx.Data = &ExecuteTemplate{ FacterVars: strings.Join(facterVars, " "), HieraConfigPath: remoteHieraConfigPath, ManifestDir: remoteManifestDir, ManifestFile: remoteManifestFile, ModulePath: strings.Join(modulePaths, ":"), Sudo: !p.config.PreventSudo, - }) + } + command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) if err != nil { return err } diff --git a/provisioner/puppet-server/provisioner.go b/provisioner/puppet-server/provisioner.go index de21e0105..1188f8978 100644 --- a/provisioner/puppet-server/provisioner.go +++ b/provisioner/puppet-server/provisioner.go @@ -4,15 +4,18 @@ package puppetserver import ( "fmt" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "os" "strings" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type Config struct { common.PackerConfig `mapstructure:",squash"` - tpl *packer.ConfigTemplate + ctx interpolate.Context // Additional facts to set when executing Puppet Facter map[string]string @@ -55,62 +58,21 @@ type ExecuteTemplate struct { } func (p *Provisioner) Prepare(raws ...interface{}) error { - md, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - if p.config.StagingDir == "" { p.config.StagingDir = "/tmp/packer-puppet-server" } - // Templates - templates := map[string]*string{ - "client_cert_dir": &p.config.ClientCertPath, - "client_private_key_dir": &p.config.ClientPrivateKeyPath, - "puppet_server": &p.config.PuppetServer, - "puppet_node": &p.config.PuppetNode, - "options": &p.config.Options, - } - - for n, ptr := range templates { - var err error - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - newFacts := make(map[string]string) - for k, v := range p.config.Facter { - k, err := p.config.tpl.Process(k, nil) - if err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing facter key %s: %s", k, err)) - continue - } - - v, err := p.config.tpl.Process(v, nil) - if err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Error processing facter value '%s': %s", v, err)) - continue - } - - newFacts[k] = v - } - p.config.Facter = newFacts - + var errs *packer.MultiError if p.config.ClientCertPath != "" { info, err := os.Stat(p.config.ClientCertPath) if err != nil { @@ -178,7 +140,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } // Execute Puppet - command, err := p.config.tpl.Process(p.commandTemplate(), &ExecuteTemplate{ + p.config.ctx.Data = &ExecuteTemplate{ FacterVars: strings.Join(facterVars, " "), ClientCertPath: remoteClientCertPath, ClientPrivateKeyPath: remoteClientPrivateKeyPath, @@ -186,7 +148,8 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { PuppetServer: p.config.PuppetServer, Options: p.config.Options, Sudo: !p.config.PreventSudo, - }) + } + command, err := interpolate.Render(p.commandTemplate(), &p.config.ctx) if err != nil { return err } diff --git a/provisioner/salt-masterless/provisioner.go b/provisioner/salt-masterless/provisioner.go index 1e2877fe3..9c9ef8b4c 100644 --- a/provisioner/salt-masterless/provisioner.go +++ b/provisioner/salt-masterless/provisioner.go @@ -5,10 +5,13 @@ package saltmasterless import ( "errors" "fmt" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "os" "path/filepath" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) const DefaultTempConfigDir = "/tmp/salt" @@ -32,7 +35,7 @@ type Config struct { // Where files will be copied before moving to the /srv/salt directory TempConfigDir string `mapstructure:"temp_config_dir"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } type Provisioner struct { @@ -40,40 +43,21 @@ type Provisioner struct { } func (p *Provisioner) Prepare(raws ...interface{}) error { - md, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - if p.config.TempConfigDir == "" { p.config.TempConfigDir = DefaultTempConfigDir } - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - - templates := map[string]*string{ - "bootstrap_args": &p.config.BootstrapArgs, - "minion_config": &p.config.MinionConfig, - "local_state_tree": &p.config.LocalStateTree, - "local_pillar_roots": &p.config.LocalPillarRoots, - "temp_config_dir": &p.config.TempConfigDir, - } - - for n, ptr := range templates { - var err error - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } + var errs *packer.MultiError // require a salt state tree if p.config.LocalStateTree == "" { diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index ec22acf63..48904710d 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -6,19 +6,22 @@ import ( "bufio" "errors" "fmt" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "io" "io/ioutil" "log" "os" "strings" "time" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) const DefaultRemotePath = "/tmp/script.sh" -type config struct { +type Config struct { common.PackerConfig `mapstructure:",squash"` // If true, the script contains binary and line endings will not be @@ -57,11 +60,11 @@ type config struct { RawStartRetryTimeout string `mapstructure:"start_retry_timeout"` startRetryTimeout time.Duration - tpl *packer.ConfigTemplate + ctx interpolate.Context } type Provisioner struct { - config config + config Config } type ExecuteCommandTemplate struct { @@ -70,20 +73,18 @@ type ExecuteCommandTemplate struct { } func (p *Provisioner) Prepare(raws ...interface{}) error { - md, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "execute_command", + }, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - - // Accumulate any errors - errs := common.CheckUnusedConfig(md) - if p.config.ExecuteCommand == "" { p.config.ExecuteCommand = "chmod +x {{.Path}}; {{.Vars}} {{.Path}}" } @@ -112,6 +113,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { p.config.Vars = make([]string, 0) } + var errs *packer.MultiError if p.config.Script != "" && len(p.config.Scripts) > 0 { errs = packer.MultiErrorAppend(errs, errors.New("Only one of script or scripts can be specified.")) @@ -121,39 +123,6 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { p.config.Scripts = []string{p.config.Script} } - templates := map[string]*string{ - "inline_shebang": &p.config.InlineShebang, - "script": &p.config.Script, - "start_retry_timeout": &p.config.RawStartRetryTimeout, - "remote_path": &p.config.RemotePath, - } - - for n, ptr := range templates { - var err error - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", n, err)) - } - } - - sliceTemplates := map[string][]string{ - "inline": p.config.Inline, - "scripts": p.config.Scripts, - "environment_vars": p.config.Vars, - } - - for n, slice := range sliceTemplates { - for i, elem := range slice { - var err error - slice[i], err = p.config.tpl.Process(elem, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err)) - } - } - } - if len(p.config.Scripts) == 0 && p.config.Inline == nil { errs = packer.MultiErrorAppend(errs, errors.New("Either a script file or inline script must be specified.")) @@ -248,10 +217,11 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { flattendVars := strings.Join(envVars, " ") // Compile the command - command, err := p.config.tpl.Process(p.config.ExecuteCommand, &ExecuteCommandTemplate{ + p.config.ctx.Data = &ExecuteCommandTemplate{ Vars: flattendVars, Path: p.config.RemotePath, - }) + } + command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) if err != nil { return fmt.Errorf("Error processing command: %s", err) } From dc1e67b6d27d233313d62b659f43778d0d7581e4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 14:56:22 -0700 Subject: [PATCH 146/956] post-processor/*: interpolation --- post-processor/atlas/post-processor.go | 36 +++++------------ post-processor/compress/post-processor.go | 39 +++++------------- .../vagrant-cloud/post-processor.go | 40 +++++++++---------- post-processor/vsphere/post-processor.go | 36 ++++++----------- 4 files changed, 50 insertions(+), 101 deletions(-) diff --git a/post-processor/atlas/post-processor.go b/post-processor/atlas/post-processor.go index 26ecf5318..8ac16d273 100644 --- a/post-processor/atlas/post-processor.go +++ b/post-processor/atlas/post-processor.go @@ -10,7 +10,9 @@ import ( "github.com/hashicorp/atlas-go/v1" "github.com/mitchellh/mapstructure" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) const BuildEnvKey = "ATLAS_BUILD_ID" @@ -39,7 +41,7 @@ type Config struct { // This shouldn't ever be set outside of unit tests. Test bool `mapstructure:"test"` - tpl *packer.ConfigTemplate + ctx interpolate.Context user, name string buildId int } @@ -50,38 +52,22 @@ type PostProcessor struct { } func (p *PostProcessor) Configure(raws ...interface{}) error { - _, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - - templates := map[string]*string{ - "artifact": &p.config.Artifact, - "type": &p.config.Type, - "server_address": &p.config.ServerAddr, - "token": &p.config.Token, - } - - errs := new(packer.MultiError) - for key, ptr := range templates { - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", key, err)) - } - } - required := map[string]*string{ "artifact": &p.config.Artifact, "artifact_type": &p.config.Type, } + var errs *packer.MultiError for key, ptr := range required { if *ptr == "" { errs = packer.MultiErrorAppend( @@ -89,7 +75,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } } - if len(errs.Errors) > 0 { + if errs != nil && len(errs.Errors) > 0 { return errs } diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 003afd2a9..ccf300946 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -8,7 +8,9 @@ import ( "os" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type Config struct { @@ -16,7 +18,7 @@ type Config struct { OutputPath string `mapstructure:"output"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } type PostProcessor struct { @@ -24,39 +26,16 @@ type PostProcessor struct { } func (self *PostProcessor) Configure(raws ...interface{}) error { - _, err := common.DecodeConfig(&self.config, raws...) + err := config.Decode(&self.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) if err != nil { return err } - self.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - self.config.tpl.UserVars = self.config.PackerUserVars - - templates := map[string]*string{ - "output": &self.config.OutputPath, - } - - errs := new(packer.MultiError) - for key, ptr := range templates { - if *ptr == "" { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("%s must be set", key)) - } - - *ptr, err = self.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", key, err)) - } - } - - if len(errs.Errors) > 0 { - return errs - } - return nil } diff --git a/post-processor/vagrant-cloud/post-processor.go b/post-processor/vagrant-cloud/post-processor.go index 516bea5d9..ace2a5c71 100644 --- a/post-processor/vagrant-cloud/post-processor.go +++ b/post-processor/vagrant-cloud/post-processor.go @@ -6,11 +6,14 @@ package vagrantcloud import ( "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "log" "strings" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) const VAGRANT_CLOUD_URL = "https://vagrantcloud.com/api/v1" @@ -28,7 +31,7 @@ type Config struct { BoxDownloadUrl string `mapstructure:"box_download_url"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } type boxDownloadUrlTemplate struct { @@ -43,17 +46,18 @@ type PostProcessor struct { } func (p *PostProcessor) Configure(raws ...interface{}) error { - _, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "box_download_url", + }, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - // Default configuration if p.config.VagrantCloudUrl == "" { p.config.VagrantCloudUrl = VAGRANT_CLOUD_URL @@ -76,15 +80,6 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } } - // Template process - for key, ptr := range templates { - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", key, err)) - } - } - if len(errs.Errors) > 0 { return errs } @@ -111,10 +106,11 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac // The name of the provider for vagrant cloud, and vagrant providerName := providerFromBuilderName(artifact.Id()) - boxDownloadUrl, err := p.config.tpl.Process(p.config.BoxDownloadUrl, &boxDownloadUrlTemplate{ + p.config.ctx.Data = &boxDownloadUrlTemplate{ ArtifactId: artifact.Id(), Provider: providerName, - }) + } + boxDownloadUrl, err := interpolate.Render(p.config.BoxDownloadUrl, &p.config.ctx) if err != nil { return nil, false, fmt.Errorf("Error processing box_download_url: %s", err) } diff --git a/post-processor/vsphere/post-processor.go b/post-processor/vsphere/post-processor.go index 38bff43aa..f9a6c37c3 100644 --- a/post-processor/vsphere/post-processor.go +++ b/post-processor/vsphere/post-processor.go @@ -3,12 +3,15 @@ package vsphere import ( "bytes" "fmt" - "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/packer" "log" "net/url" "os/exec" "strings" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) var builtins = map[string]string{ @@ -31,7 +34,7 @@ type Config struct { VMName string `mapstructure:"vm_name"` VMNetwork string `mapstructure:"vm_network"` - tpl *packer.ConfigTemplate + ctx interpolate.Context } type PostProcessor struct { @@ -39,17 +42,16 @@ type PostProcessor struct { } func (p *PostProcessor) Configure(raws ...interface{}) error { - _, err := common.DecodeConfig(&p.config, raws...) + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) if err != nil { return err } - p.config.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.config.tpl.UserVars = p.config.PackerUserVars - // Defaults if p.config.DiskMode == "" { p.config.DiskMode = "thick" @@ -81,20 +83,6 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } } - // Then define the ones that are optional - templates["datastore"] = &p.config.Datastore - templates["vm_network"] = &p.config.VMNetwork - templates["vm_folder"] = &p.config.VMFolder - - // Template process - for key, ptr := range templates { - *ptr, err = p.config.tpl.Process(*ptr, nil) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", key, err)) - } - } - if len(errs.Errors) > 0 { return errs } From 1a15371c7ac8d8498ddb0ac5ec71c00a7b1fc6f6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 14:57:57 -0700 Subject: [PATCH 147/956] post-processor/docker-tag: remove decodeconfig usage --- post-processor/docker-tag/post-processor_test.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/post-processor/docker-tag/post-processor_test.go b/post-processor/docker-tag/post-processor_test.go index 925419a10..c28e7cac0 100644 --- a/post-processor/docker-tag/post-processor_test.go +++ b/post-processor/docker-tag/post-processor_test.go @@ -2,11 +2,11 @@ package dockertag import ( "bytes" + "testing" + "github.com/mitchellh/packer/builder/docker" - "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/post-processor/docker-import" - "testing" ) func testConfig() map[string]interface{} { @@ -39,9 +39,8 @@ func TestPostProcessor_ImplementsPostProcessor(t *testing.T) { func TestPostProcessor_PostProcess(t *testing.T) { driver := &docker.MockDriver{} p := &PostProcessor{Driver: driver} - _, err := common.DecodeConfig(&p.config, testConfig()) - if err != nil { - t.Fatalf("err %s", err) + if err := p.Configure(testConfig()); err != nil { + t.Fatalf("err: %s", err) } artifact := &packer.MockArtifact{ From adb6b43dd8298cb5d69226db0b66bed0745d9cf4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 14:58:09 -0700 Subject: [PATCH 148/956] common: remove unused config methods --- common/config.go | 121 ----------------------------------- common/config_test.go | 142 ------------------------------------------ 2 files changed, 263 deletions(-) diff --git a/common/config.go b/common/config.go index 72b3bdd27..8b1761ceb 100644 --- a/common/config.go +++ b/common/config.go @@ -2,14 +2,10 @@ package common import ( "fmt" - "github.com/mitchellh/mapstructure" - "github.com/mitchellh/packer/packer" "net/url" "os" "path/filepath" - "reflect" "runtime" - "sort" "strings" ) @@ -23,28 +19,6 @@ func ScrubConfig(target interface{}, values ...string) string { return conf } -// CheckUnusedConfig is a helper that makes sure that the there are no -// unused configuration keys, properly ignoring keys that don't matter. -func CheckUnusedConfig(md *mapstructure.Metadata) *packer.MultiError { - errs := make([]error, 0) - - if md.Unused != nil && len(md.Unused) > 0 { - sort.Strings(md.Unused) - for _, unused := range md.Unused { - if unused != "type" && !strings.HasPrefix(unused, "packer_") { - errs = append( - errs, fmt.Errorf("Unknown configuration key: %q", unused)) - } - } - } - - if len(errs) > 0 { - return &packer.MultiError{errs} - } - - return nil -} - // ChooseString returns the first non-empty value. func ChooseString(vals ...string) string { for _, el := range vals { @@ -56,42 +30,6 @@ func ChooseString(vals ...string) string { return "" } -// DecodeConfig is a helper that handles decoding raw configuration using -// mapstructure. It returns the metadata and any errors that may happen. -// If you need extra configuration for mapstructure, you should configure -// it manually and not use this helper function. -func DecodeConfig(target interface{}, raws ...interface{}) (*mapstructure.Metadata, error) { - decodeHook, err := decodeConfigHook(raws) - if err != nil { - return nil, err - } - - var md mapstructure.Metadata - decoderConfig := &mapstructure.DecoderConfig{ - DecodeHook: mapstructure.ComposeDecodeHookFunc( - decodeHook, - mapstructure.StringToSliceHookFunc(","), - ), - Metadata: &md, - Result: target, - WeaklyTypedInput: true, - } - - decoder, err := mapstructure.NewDecoder(decoderConfig) - if err != nil { - return nil, err - } - - for _, raw := range raws { - err := decoder.Decode(raw) - if err != nil { - return nil, err - } - } - - return &md, nil -} - // DownloadableURL processes a URL that may also be a file path and returns // a completely valid URL. For example, the original URL might be "local/file.iso" // which isn't a valid URL. DownloadableURL will return "file:///local/file.iso" @@ -182,62 +120,3 @@ func DownloadableURL(original string) (string, error) { return url.String(), nil } - -// This returns a mapstructure.DecodeHookFunc that automatically template -// processes any configuration values that aren't strings but have been -// provided as strings. -// -// For example: "image_id" wants an int and the user uses a string with -// a user variable like "{{user `image_id`}}". This decode hook makes that -// work. -func decodeConfigHook(raws []interface{}) (mapstructure.DecodeHookFunc, error) { - // First thing we do is decode PackerConfig so that we can have access - // to the user variables so that we can process some templates. - var pc PackerConfig - - decoderConfig := &mapstructure.DecoderConfig{ - Result: &pc, - WeaklyTypedInput: true, - } - decoder, err := mapstructure.NewDecoder(decoderConfig) - if err != nil { - return nil, err - } - for _, raw := range raws { - if err := decoder.Decode(raw); err != nil { - return nil, err - } - } - - tpl, err := packer.NewConfigTemplate() - if err != nil { - return nil, err - } - tpl.UserVars = pc.PackerUserVars - - return func(f reflect.Kind, t reflect.Kind, v interface{}) (interface{}, error) { - if t != reflect.String { - // We need to convert []uint8 to string. We have to do this - // because internally Packer uses MsgPack for RPC and the MsgPack - // codec turns strings into []uint8 - if f == reflect.Slice { - dataVal := reflect.ValueOf(v) - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - v = string(dataVal.Interface().([]uint8)) - } - } - - if sv, ok := v.(string); ok { - var err error - v, err = tpl.Process(sv, nil) - if err != nil { - return nil, err - } - } - } - - return v, nil - }, nil -} diff --git a/common/config_test.go b/common/config_test.go index a93feb729..92a7316a3 100644 --- a/common/config_test.go +++ b/common/config_test.go @@ -2,33 +2,14 @@ package common import ( "fmt" - "github.com/mitchellh/mapstructure" "io/ioutil" "os" "path/filepath" - "reflect" "runtime" "strings" "testing" ) -func TestCheckUnusedConfig(t *testing.T) { - md := &mapstructure.Metadata{ - Unused: make([]string, 0), - } - - err := CheckUnusedConfig(md) - if err != nil { - t.Fatalf("err: %s", err) - } - - md.Unused = []string{"foo", "bar"} - err = CheckUnusedConfig(md) - if err == nil { - t.Fatal("should have error") - } -} - func TestChooseString(t *testing.T) { cases := []struct { Input []string @@ -56,129 +37,6 @@ func TestChooseString(t *testing.T) { } } -func TestDecodeConfig(t *testing.T) { - type Local struct { - Foo string - Bar string - } - - raws := []interface{}{ - map[string]interface{}{ - "foo": "bar", - }, - map[string]interface{}{ - "bar": "baz", - "baz": "what", - }, - } - - var result Local - md, err := DecodeConfig(&result, raws...) - if err != nil { - t.Fatalf("err: %s", err) - } - - if result.Foo != "bar" { - t.Fatalf("invalid: %#v", result.Foo) - } - - if result.Bar != "baz" { - t.Fatalf("invalid: %#v", result.Bar) - } - - if md == nil { - t.Fatal("metadata should not be nil") - } - - if !reflect.DeepEqual(md.Unused, []string{"baz"}) { - t.Fatalf("unused: %#v", md.Unused) - } -} - -// This test tests the case that a user var is used for an integer -// configuration. -func TestDecodeConfig_stringToSlice(t *testing.T) { - type Local struct { - Val []string - EmptyVal []string - } - - raw := map[string]interface{}{ - "packer_user_variables": map[string]string{ - "foo": "bar", - }, - - "val": "foo,{{user `foo`}}", - "emptyval": "", - } - - var result Local - _, err := DecodeConfig(&result, raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := []string{"foo", "bar"} - if !reflect.DeepEqual(result.Val, expected) { - t.Fatalf("invalid: %#v", result.Val) - } - if len(result.EmptyVal) > 0 { - t.Fatalf("invalid: %#v", result.EmptyVal) - } -} - -// This test tests the case that a user var is used for an integer -// configuration. -func TestDecodeConfig_userVarConversion(t *testing.T) { - type Local struct { - Val int - } - - raw := map[string]interface{}{ - "packer_user_variables": map[string]string{ - "foo": "42", - }, - - "val": "{{user `foo`}}", - } - - var result Local - _, err := DecodeConfig(&result, raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - if result.Val != 42 { - t.Fatalf("invalid: %#v", result.Val) - } -} - -// This tests the way MessagePack decodes strings (into []uint8) and -// that we can still decode into the proper types. -func TestDecodeConfig_userVarConversionUInt8(t *testing.T) { - type Local struct { - Val int - } - - raw := map[string]interface{}{ - "packer_user_variables": map[string]string{ - "foo": "42", - }, - - "val": []uint8("{{user `foo`}}"), - } - - var result Local - _, err := DecodeConfig(&result, raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - if result.Val != 42 { - t.Fatalf("invalid: %#v", result.Val) - } -} - func TestDownloadableURL(t *testing.T) { // Invalid URL: has hex code in host _, err := DownloadableURL("http://what%20.com") From 1d3a4d6aa2b2c969bb52f00e27113d21a28ad793 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 17:56:45 -0700 Subject: [PATCH 149/956] packer: remove ConfigTemplate --- builder/digitalocean/builder.go | 1 - packer/config_template.go | 139 --------------------- packer/config_template_test.go | 214 -------------------------------- 3 files changed, 354 deletions(-) delete mode 100644 packer/config_template.go delete mode 100644 packer/config_template_test.go diff --git a/builder/digitalocean/builder.go b/builder/digitalocean/builder.go index 57e6018a1..3292bea10 100644 --- a/builder/digitalocean/builder.go +++ b/builder/digitalocean/builder.go @@ -66,7 +66,6 @@ type Config struct { stateTimeout time.Duration ctx *interpolate.Context - tpl *packer.ConfigTemplate } type Builder struct { diff --git a/packer/config_template.go b/packer/config_template.go deleted file mode 100644 index 47b096227..000000000 --- a/packer/config_template.go +++ /dev/null @@ -1,139 +0,0 @@ -package packer - -import ( - "bytes" - "fmt" - "os" - "strconv" - "strings" - "text/template" - "time" - - "github.com/mitchellh/packer/common/uuid" -) - -// InitTime is the UTC time when this package was initialized. It is -// used as the timestamp for all configuration templates so that they -// match for a single build. -var InitTime time.Time - -func init() { - InitTime = time.Now().UTC() -} - -// ConfigTemplate processes string data as a text/template with some common -// elements and functions available. Plugin creators should process as -// many fields as possible through this. -type ConfigTemplate struct { - UserVars map[string]string - - root *template.Template - i int -} - -// NewConfigTemplate creates a new configuration template processor. -func NewConfigTemplate() (*ConfigTemplate, error) { - result := &ConfigTemplate{ - UserVars: make(map[string]string), - } - - result.root = template.New("configTemplateRoot") - result.root.Funcs(template.FuncMap{ - "env": templateDisableEnv, - "pwd": templatePwd, - "isotime": templateISOTime, - "timestamp": templateTimestamp, - "user": result.templateUser, - "uuid": templateUuid, - "upper": strings.ToUpper, - "lower": strings.ToLower, - }) - - return result, nil -} - -// Process processes a single string, compiling and executing the template. -func (t *ConfigTemplate) Process(s string, data interface{}) (string, error) { - tpl, err := t.root.New(t.nextTemplateName()).Parse(s) - if err != nil { - return "", err - } - - buf := new(bytes.Buffer) - if err := tpl.Execute(buf, data); err != nil { - return "", err - } - - return buf.String(), nil -} - -// Validate the template. -func (t *ConfigTemplate) Validate(s string) error { - root, err := t.root.Clone() - if err != nil { - return err - } - - _, err = root.New("template").Parse(s) - return err -} - -// Add additional functions to the template -func (t *ConfigTemplate) Funcs(funcs template.FuncMap) { - t.root.Funcs(funcs) -} - -func (t *ConfigTemplate) nextTemplateName() string { - name := fmt.Sprintf("tpl%d", t.i) - t.i++ - return name -} - -// User is the function exposed as "user" within the templates and -// looks up user variables. -func (t *ConfigTemplate) templateUser(n string) (string, error) { - result, ok := t.UserVars[n] - if !ok { - return "", fmt.Errorf("unknown user var: %s", n) - } - - return result, nil -} - -func templateDisableEnv(n string) (string, error) { - return "", fmt.Errorf( - "Environmental variables can only be used as default values for user variables.") -} - -func templateDisableUser(n string) (string, error) { - return "", fmt.Errorf( - "User variable can't be used within a default value for a user variable: %s", n) -} - -func templateEnv(n string) string { - return os.Getenv(n) -} - -func templateISOTime(timeFormat ...string) (string, error) { - if len(timeFormat) == 0 { - return time.Now().UTC().Format(time.RFC3339), nil - } - - if len(timeFormat) > 1 { - return "", fmt.Errorf("too many values, 1 needed: %v", timeFormat) - } - - return time.Now().UTC().Format(timeFormat[0]), nil -} - -func templatePwd() (string, error) { - return os.Getwd() -} - -func templateTimestamp() string { - return strconv.FormatInt(InitTime.Unix(), 10) -} - -func templateUuid() string { - return uuid.TimeOrderedUUID() -} diff --git a/packer/config_template_test.go b/packer/config_template_test.go deleted file mode 100644 index cdc5fd461..000000000 --- a/packer/config_template_test.go +++ /dev/null @@ -1,214 +0,0 @@ -package packer - -import ( - "fmt" - "math" - "os" - "strconv" - "testing" - "time" -) - -func TestConfigTemplateProcess_env(t *testing.T) { - tpl, err := NewConfigTemplate() - if err != nil { - t.Fatalf("err: %s", err) - } - - _, err = tpl.Process(`{{env "foo"}}`, nil) - if err == nil { - t.Fatal("should error") - } -} - -func TestConfigTemplateProcess_isotime(t *testing.T) { - tpl, err := NewConfigTemplate() - if err != nil { - t.Fatalf("err: %s", err) - } - - result, err := tpl.Process(`{{isotime}}`, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - val, err := time.Parse(time.RFC3339, result) - if err != nil { - t.Fatalf("err: %s", err) - } - - currentTime := time.Now().UTC() - if currentTime.Sub(val) > 2*time.Second { - t.Fatalf("val: %d (current: %d)", val, currentTime) - } -} - -// Note must format with the magic Date: Mon Jan 2 15:04:05 -0700 MST 2006 -func TestConfigTemplateProcess_isotime_withFormat(t *testing.T) { - tpl, err := NewConfigTemplate() - if err != nil { - t.Fatalf("err: %s", err) - } - - // Checking for a too-many arguments error - // Because of the variadic function, compile time checking won't work - _, err = tpl.Process(`{{isotime "20060102" "huh"}}`, nil) - if err == nil { - t.Fatalf("err: cannot have more than 1 input") - } - - result, err := tpl.Process(`{{isotime "20060102"}}`, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - ti := time.Now().UTC() - val := fmt.Sprintf("%04d%02d%02d", ti.Year(), ti.Month(), ti.Day()) - - if result != val { - t.Fatalf("val: %s (formated: %s)", val, result) - } -} - -func TestConfigTemplateProcess_pwd(t *testing.T) { - tpl, err := NewConfigTemplate() - if err != nil { - t.Fatalf("err: %s", err) - } - - pwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - - result, err := tpl.Process(`{{pwd}}`, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if result != pwd { - t.Fatalf("err: %s", result) - } -} - -func TestConfigTemplateProcess_timestamp(t *testing.T) { - tpl, err := NewConfigTemplate() - if err != nil { - t.Fatalf("err: %s", err) - } - - result, err := tpl.Process(`{{timestamp}}`, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - val, err := strconv.ParseInt(result, 10, 64) - if err != nil { - t.Fatalf("err: %s", err) - } - - currentTime := time.Now().UTC().Unix() - if math.Abs(float64(currentTime-val)) > 10 { - t.Fatalf("val: %d (current: %d)", val, currentTime) - } - - time.Sleep(2 * time.Second) - - result2, err := tpl.Process(`{{timestamp}}`, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if result != result2 { - t.Fatalf("bad: %#v %#v", result, result2) - } -} - -func TestConfigTemplateProcess_user(t *testing.T) { - tpl, err := NewConfigTemplate() - if err != nil { - t.Fatalf("err: %s", err) - } - - tpl.UserVars["foo"] = "bar" - - result, err := tpl.Process(`{{user "foo"}}`, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if result != "bar" { - t.Fatalf("bad: %s", result) - } -} - -func TestConfigTemplateProcess_uuid(t *testing.T) { - tpl, err := NewConfigTemplate() - if err != nil { - t.Fatalf("err: %s", err) - } - - result, err := tpl.Process(`{{uuid}}`, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if len(result) != 36 { - t.Fatalf("err: %s", result) - } -} - -func TestConfigTemplateProcess_upper(t *testing.T) { - tpl, err := NewConfigTemplate() - if err != nil { - t.Fatalf("err: %s", err) - } - - tpl.UserVars["foo"] = "bar" - - result, err := tpl.Process(`{{user "foo" | upper}}`, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if result != "BAR" { - t.Fatalf("bad: %s", result) - } -} - -func TestConfigTemplateProcess_lower(t *testing.T) { - tpl, err := NewConfigTemplate() - if err != nil { - t.Fatalf("err: %s", err) - } - - tpl.UserVars["foo"] = "BAR" - - result, err := tpl.Process(`{{user "foo" | lower}}`, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if result != "bar" { - t.Fatalf("bad: %s", result) - } -} - -func TestConfigTemplateValidate(t *testing.T) { - tpl, err := NewConfigTemplate() - if err != nil { - t.Fatalf("err: %s", err) - } - - // Valid - err = tpl.Validate(`{{user "foo"}}`) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Invalid - err = tpl.Validate(`{{idontexist}}`) - if err == nil { - t.Fatal("should have error") - } -} From 1ee2b014a6983bd381cf8de9d97ce9d1de9e36b6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 20:09:52 -0700 Subject: [PATCH 150/956] packer: remove Ui/Cache from CoreConfig --- command/build.go | 2 +- command/meta.go | 1 + commands.go | 27 +++++++++------------------ config.go | 3 --- main.go | 29 ++++++++++++++++++++++------- packer/core.go | 15 --------------- packer/testing.go | 3 --- 7 files changed, 33 insertions(+), 47 deletions(-) diff --git a/command/build.go b/command/build.go index ec6f70555..fbadd6dab 100644 --- a/command/build.go +++ b/command/build.go @@ -149,7 +149,7 @@ func (c BuildCommand) Run(args []string) int { name := b.Name() log.Printf("Starting build run: %s", name) ui := buildUis[name] - runArtifacts, err := b.Run(ui, c.CoreConfig.Cache) + runArtifacts, err := b.Run(ui, c.Cache) if err != nil { ui.Error(fmt.Sprintf("Build '%s' errored: %s", name, err)) diff --git a/command/meta.go b/command/meta.go index 0dc721bd1..250f94f6b 100644 --- a/command/meta.go +++ b/command/meta.go @@ -26,6 +26,7 @@ const ( // Packer command inherits. type Meta struct { CoreConfig *packer.CoreConfig + Cache packer.Cache Ui packer.Ui // These are set by command-line flags diff --git a/commands.go b/commands.go index e0f313957..510250721 100644 --- a/commands.go +++ b/commands.go @@ -6,62 +6,53 @@ import ( "github.com/mitchellh/cli" "github.com/mitchellh/packer/command" - "github.com/mitchellh/packer/packer" ) // Commands is the mapping of all the available Terraform commands. var Commands map[string]cli.CommandFactory -// Ui is the cli.Ui used for communicating to the outside world. -var Ui cli.Ui +// CommandMeta is the Meta to use for the commands. This must be written +// before the CLI is started. +var CommandMeta *command.Meta const ErrorPrefix = "e:" const OutputPrefix = "o:" func init() { - meta := command.Meta{ - CoreConfig: &CoreConfig, - Ui: &packer.BasicUi{ - Reader: os.Stdin, - Writer: os.Stdout, - ErrorWriter: os.Stdout, - }, - } - Commands = map[string]cli.CommandFactory{ "build": func() (cli.Command, error) { return &command.BuildCommand{ - Meta: meta, + Meta: *CommandMeta, }, nil }, "fix": func() (cli.Command, error) { return &command.FixCommand{ - Meta: meta, + Meta: *CommandMeta, }, nil }, "inspect": func() (cli.Command, error) { return &command.InspectCommand{ - Meta: meta, + Meta: *CommandMeta, }, nil }, "push": func() (cli.Command, error) { return &command.PushCommand{ - Meta: meta, + Meta: *CommandMeta, }, nil }, "validate": func() (cli.Command, error) { return &command.ValidateCommand{ - Meta: meta, + Meta: *CommandMeta, }, nil }, "version": func() (cli.Command, error) { return &command.VersionCommand{ - Meta: meta, + Meta: *CommandMeta, Revision: GitCommit, Version: Version, VersionPrerelease: VersionPrerelease, diff --git a/config.go b/config.go index a9c07043f..745922b1e 100644 --- a/config.go +++ b/config.go @@ -13,9 +13,6 @@ import ( "github.com/mitchellh/packer/packer/plugin" ) -// CoreConfig is the global CoreConfig we use to initialize the CLI. -var CoreConfig packer.CoreConfig - type config struct { DisableCheckpoint bool `json:"disable_checkpoint"` DisableCheckpointSignature bool `json:"disable_checkpoint_signature"` diff --git a/main.go b/main.go index 73d4b88cf..d00336283 100644 --- a/main.go +++ b/main.go @@ -12,6 +12,7 @@ import ( "sync" "github.com/mitchellh/cli" + "github.com/mitchellh/packer/command" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer/plugin" "github.com/mitchellh/panicwrap" @@ -139,14 +140,14 @@ func wrappedMain() int { defer plugin.CleanupClients() - // Create the environment configuration - CoreConfig.Cache = cache - CoreConfig.Components.Builder = config.LoadBuilder - CoreConfig.Components.Hook = config.LoadHook - CoreConfig.Components.PostProcessor = config.LoadPostProcessor - CoreConfig.Components.Provisioner = config.LoadProvisioner + // Setup the UI if we're being machine-readable + var ui packer.Ui = &packer.BasicUi{ + Reader: os.Stdin, + Writer: os.Stdout, + ErrorWriter: os.Stdout, + } if machineReadable { - CoreConfig.Ui = &packer.MachineReadableUi{ + ui = &packer.MachineReadableUi{ Writer: os.Stdout, } @@ -158,6 +159,20 @@ func wrappedMain() int { } } + // Create the CLI meta + CommandMeta = &command.Meta{ + CoreConfig: &packer.CoreConfig{ + Components: packer.ComponentFinder{ + Builder: config.LoadBuilder, + Hook: config.LoadHook, + PostProcessor: config.LoadPostProcessor, + Provisioner: config.LoadProvisioner, + }, + }, + Cache: cache, + Ui: ui, + } + //setupSignalHandlers(env) cli := &cli.CLI{ diff --git a/packer/core.go b/packer/core.go index 3969da9c9..e8329b821 100644 --- a/packer/core.go +++ b/packer/core.go @@ -2,7 +2,6 @@ package packer import ( "fmt" - "os" "sort" "github.com/hashicorp/go-multierror" @@ -13,9 +12,7 @@ import ( // Core is the main executor of Packer. If Packer is being used as a // library, this is the struct you'll want to instantiate to get anything done. type Core struct { - cache Cache components ComponentFinder - ui Ui template *template.Template variables map[string]string builds map[string]*template.Builder @@ -24,9 +21,7 @@ type Core struct { // CoreConfig is the structure for initializing a new Core. Once a CoreConfig // is used to initialize a Core, it shouldn't be re-used or modified again. type CoreConfig struct { - Cache Cache Components ComponentFinder - Ui Ui Template *template.Template Variables map[string]string } @@ -55,14 +50,6 @@ type ComponentFinder struct { // NewCore creates a new Core. func NewCore(c *CoreConfig) (*Core, error) { - if c.Ui == nil { - c.Ui = &BasicUi{ - Reader: os.Stdin, - Writer: os.Stdout, - ErrorWriter: os.Stdout, - } - } - // Go through and interpolate all the build names. We shuld be able // to do this at this point with the variables. builds := make(map[string]*template.Builder) @@ -80,9 +67,7 @@ func NewCore(c *CoreConfig) (*Core, error) { } return &Core{ - cache: c.Cache, components: c.Components, - ui: c.Ui, template: c.Template, variables: c.Variables, builds: builds, diff --git a/packer/testing.go b/packer/testing.go index 7e7ad0b53..7217a5083 100644 --- a/packer/testing.go +++ b/packer/testing.go @@ -3,7 +3,6 @@ package packer import ( "bytes" "io/ioutil" - "os" "testing" ) @@ -20,9 +19,7 @@ func TestCoreConfig(t *testing.T) *CoreConfig { } return &CoreConfig{ - Cache: &FileCache{CacheDir: os.TempDir()}, Components: components, - Ui: TestUi(t), } } From c78c4e78ada82777a3fb8338a87da23b72a1d273 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 20:15:07 -0700 Subject: [PATCH 151/956] amazon/common: sleep 3 seconds if instance doesn't exist --- builder/amazon/common/step_run_source_instance.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index 545e7765d..76baf9bde 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -195,13 +195,14 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi instanceId = spotResp.SpotRequestResults[0].InstanceId } - var instanceResp, instanceErr = ec2conn.Instances([]string{instanceId}, nil) + instanceResp, instanceErr := ec2conn.Instances([]string{instanceId}, nil) for i := 0; i < 10; i++ { if instanceErr == nil { err = instanceErr break } - time.Sleep(time.Duration(3)) + + time.Sleep(3 * time.Second) instanceResp, err = ec2conn.Instances([]string{instanceId}, nil) } From 4413e60b598d11ae347b824fbd463b7d5be317fe Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 20:15:44 -0700 Subject: [PATCH 152/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 44d929beb..ba59baa34 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ IMPROVEMENTS: BUG FIXES: * builder/amazon: Remove deprecated ec2-upload-bundle paramger [GH-1931] + * builder/amazon: Retry finding created instance for eventual + consistency. [GH-2129] * builder/digitalocean: Ignore invalid fields from the ever-changing v2 API * builder/docker: Fixed hang on prompt while copying script * builder/virtualbox: Added SCSI support From b31cef06ccbc54309c39b1e005757754a82ad826 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 27 May 2015 20:18:37 -0700 Subject: [PATCH 153/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ba59baa34..0b664452a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ BUG FIXES: consistency. [GH-2129] * builder/digitalocean: Ignore invalid fields from the ever-changing v2 API * builder/docker: Fixed hang on prompt while copying script + * builder/qemu: Add `disk_discard` option [GH-2120] * builder/virtualbox: Added SCSI support * postprocessor/vagrant-cloud: Fixed failing on response * provisioner/puppet-masterless: Allow manifest_file to be a directory From b1497b951c1c3a5878e65c26d9d9f924e0a926f3 Mon Sep 17 00:00:00 2001 From: jszwedko Date: Fri, 3 Apr 2015 22:30:13 -0700 Subject: [PATCH 154/956] code.google.com/p/go.crypto/ssh -> golang.org/x/crypto/ssh code.google.com/p/go.crypto/ssh is now at golang.org/x/crypto/ssh as of https://code.google.com/p/go/source/detail?spec=svn.crypto.69e2a90ed92d03812364aeb947b7068dc42e561e&repo=crypto&r=8fec09c61d5d66f460d227fd1df3473d7e015bc6 Using the code.google.com import redirects properly, but runs into issues if you try to use a subpackage of `ssh`, e.g. `agent` which refers to golang.org/x/crypto/ssh causing conflicts if your types expect code.google.com/p/go.crypto/ssh. This is a precursor to a PR for #1066. --- builder/amazon/common/ssh.go | 2 +- builder/digitalocean/ssh.go | 2 +- builder/googlecompute/ssh.go | 2 +- builder/null/ssh.go | 2 +- builder/openstack/ssh.go | 2 +- builder/parallels/common/ssh.go | 2 +- builder/qemu/ssh.go | 2 +- builder/virtualbox/common/ssh.go | 2 +- builder/vmware/common/ssh.go | 2 +- builder/vmware/iso/driver_esx5.go | 2 +- common/ssh/key.go | 2 +- common/step_connect_ssh.go | 2 +- communicator/ssh/communicator.go | 2 +- communicator/ssh/communicator_test.go | 2 +- communicator/ssh/password.go | 2 +- communicator/ssh/password_test.go | 2 +- 16 files changed, 16 insertions(+), 16 deletions(-) diff --git a/builder/amazon/common/ssh.go b/builder/amazon/common/ssh.go index f31437d89..38ea6cc40 100644 --- a/builder/amazon/common/ssh.go +++ b/builder/amazon/common/ssh.go @@ -1,7 +1,7 @@ package common import ( - "code.google.com/p/go.crypto/ssh" + "golang.org/x/crypto/ssh" "errors" "fmt" "github.com/mitchellh/goamz/ec2" diff --git a/builder/digitalocean/ssh.go b/builder/digitalocean/ssh.go index c53262ccc..bd0afc3fe 100644 --- a/builder/digitalocean/ssh.go +++ b/builder/digitalocean/ssh.go @@ -1,7 +1,7 @@ package digitalocean import ( - "code.google.com/p/go.crypto/ssh" + "golang.org/x/crypto/ssh" "fmt" "github.com/mitchellh/multistep" ) diff --git a/builder/googlecompute/ssh.go b/builder/googlecompute/ssh.go index b17a5b91d..a4e0151f4 100644 --- a/builder/googlecompute/ssh.go +++ b/builder/googlecompute/ssh.go @@ -1,7 +1,7 @@ package googlecompute import ( - "code.google.com/p/go.crypto/ssh" + "golang.org/x/crypto/ssh" "fmt" "github.com/mitchellh/multistep" ) diff --git a/builder/null/ssh.go b/builder/null/ssh.go index ab9c9e40a..a9c2af330 100644 --- a/builder/null/ssh.go +++ b/builder/null/ssh.go @@ -1,7 +1,7 @@ package null import ( - gossh "code.google.com/p/go.crypto/ssh" + gossh "golang.org/x/crypto/ssh" "fmt" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/communicator/ssh" diff --git a/builder/openstack/ssh.go b/builder/openstack/ssh.go index cbf2c6b41..16afda64d 100644 --- a/builder/openstack/ssh.go +++ b/builder/openstack/ssh.go @@ -1,7 +1,7 @@ package openstack import ( - "code.google.com/p/go.crypto/ssh" + "golang.org/x/crypto/ssh" "errors" "fmt" "github.com/mitchellh/multistep" diff --git a/builder/parallels/common/ssh.go b/builder/parallels/common/ssh.go index e914583c1..142b6c99d 100644 --- a/builder/parallels/common/ssh.go +++ b/builder/parallels/common/ssh.go @@ -3,7 +3,7 @@ package common import ( "fmt" - "code.google.com/p/go.crypto/ssh" + "golang.org/x/crypto/ssh" "github.com/mitchellh/multistep" commonssh "github.com/mitchellh/packer/common/ssh" packerssh "github.com/mitchellh/packer/communicator/ssh" diff --git a/builder/qemu/ssh.go b/builder/qemu/ssh.go index dfd87ee20..deb7ba405 100644 --- a/builder/qemu/ssh.go +++ b/builder/qemu/ssh.go @@ -3,7 +3,7 @@ package qemu import ( "fmt" - gossh "code.google.com/p/go.crypto/ssh" + gossh "golang.org/x/crypto/ssh" "github.com/mitchellh/multistep" commonssh "github.com/mitchellh/packer/common/ssh" "github.com/mitchellh/packer/communicator/ssh" diff --git a/builder/virtualbox/common/ssh.go b/builder/virtualbox/common/ssh.go index 0cfc05d2c..c07c2ce9c 100644 --- a/builder/virtualbox/common/ssh.go +++ b/builder/virtualbox/common/ssh.go @@ -3,7 +3,7 @@ package common import ( "fmt" - gossh "code.google.com/p/go.crypto/ssh" + gossh "golang.org/x/crypto/ssh" "github.com/mitchellh/multistep" commonssh "github.com/mitchellh/packer/common/ssh" "github.com/mitchellh/packer/communicator/ssh" diff --git a/builder/vmware/common/ssh.go b/builder/vmware/common/ssh.go index 5d2ca40e2..bfd0b8bc7 100644 --- a/builder/vmware/common/ssh.go +++ b/builder/vmware/common/ssh.go @@ -7,7 +7,7 @@ import ( "log" "os" - gossh "code.google.com/p/go.crypto/ssh" + gossh "golang.org/x/crypto/ssh" "github.com/mitchellh/multistep" commonssh "github.com/mitchellh/packer/common/ssh" "github.com/mitchellh/packer/communicator/ssh" diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index d81149511..2b9332760 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -14,10 +14,10 @@ import ( "strings" "time" - gossh "code.google.com/p/go.crypto/ssh" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/communicator/ssh" "github.com/mitchellh/packer/packer" + gossh "golang.org/x/crypto/ssh" ) // ESX5 driver talks to an ESXi5 hypervisor remotely over SSH to build diff --git a/common/ssh/key.go b/common/ssh/key.go index 11a4b0742..0de73b78c 100644 --- a/common/ssh/key.go +++ b/common/ssh/key.go @@ -6,7 +6,7 @@ import ( "io/ioutil" "os" - "code.google.com/p/go.crypto/ssh" + "golang.org/x/crypto/ssh" ) // FileSigner returns an ssh.Signer for a key file. diff --git a/common/step_connect_ssh.go b/common/step_connect_ssh.go index b00d5bfc0..30064c7f2 100644 --- a/common/step_connect_ssh.go +++ b/common/step_connect_ssh.go @@ -1,7 +1,7 @@ package common import ( - gossh "code.google.com/p/go.crypto/ssh" + gossh "golang.org/x/crypto/ssh" "errors" "fmt" "github.com/mitchellh/multistep" diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 07fb1eaa2..611622750 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -3,7 +3,7 @@ package ssh import ( "bufio" "bytes" - "code.google.com/p/go.crypto/ssh" + "golang.org/x/crypto/ssh" "errors" "fmt" "github.com/mitchellh/packer/packer" diff --git a/communicator/ssh/communicator_test.go b/communicator/ssh/communicator_test.go index aa241cca8..26cf76757 100644 --- a/communicator/ssh/communicator_test.go +++ b/communicator/ssh/communicator_test.go @@ -4,7 +4,7 @@ package ssh import ( "bytes" - "code.google.com/p/go.crypto/ssh" + "golang.org/x/crypto/ssh" "fmt" "github.com/mitchellh/packer/packer" "net" diff --git a/communicator/ssh/password.go b/communicator/ssh/password.go index e5e2a3595..c406975ab 100644 --- a/communicator/ssh/password.go +++ b/communicator/ssh/password.go @@ -1,7 +1,7 @@ package ssh import ( - "code.google.com/p/go.crypto/ssh" + "golang.org/x/crypto/ssh" "log" ) diff --git a/communicator/ssh/password_test.go b/communicator/ssh/password_test.go index e74b46e06..6e3e0a257 100644 --- a/communicator/ssh/password_test.go +++ b/communicator/ssh/password_test.go @@ -1,7 +1,7 @@ package ssh import ( - "code.google.com/p/go.crypto/ssh" + "golang.org/x/crypto/ssh" "reflect" "testing" ) From b9bfae8da3355dd4575c1800df0be80c923b0f60 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 28 May 2015 08:19:53 -0700 Subject: [PATCH 155/956] amazon/chroot: style --- builder/amazon/chroot/step_attach_volume.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/builder/amazon/chroot/step_attach_volume.go b/builder/amazon/chroot/step_attach_volume.go index ec3facc10..a7db44b91 100644 --- a/builder/amazon/chroot/step_attach_volume.go +++ b/builder/amazon/chroot/step_attach_volume.go @@ -3,12 +3,13 @@ package chroot import ( "errors" "fmt" + "strings" + "time" + "github.com/mitchellh/goamz/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" - "strings" - "time" ) // StepAttachVolume attaches the previously created volume to an @@ -51,7 +52,7 @@ func (s *StepAttachVolume) Run(state multistep.StateBag) multistep.StepAction { StepState: state, Target: "attached", Refresh: func() (interface{}, string, error) { - var attempts = 0 + attempts := 0 for attempts < 30 { resp, err := ec2conn.Volumes([]string{volumeId}, ec2.NewFilter()) if err != nil { @@ -63,11 +64,12 @@ func (s *StepAttachVolume) Run(state multistep.StateBag) multistep.StepAction { } // When Attachment on volume is not present sleep for 2s and retry attempts += 1 - ui.Say( - fmt.Sprintf("Warning volume %s show no attachments, Attempt %d/30, Sleeping for 2s and will retry.", - volumeId, attempts)) - time.Sleep(time.Duration(2) * time.Second) + ui.Say(fmt.Sprintf( + "Volume %s show no attachments. Attempt %d/30. Sleeping for 2s and will retry.", + volumeId, attempts)) + time.Sleep(2 * time.Second) } + // Attachment on volume is not present after all attempts return nil, "", errors.New("No attachments on volume.") }, From 73bdc3fd0f0fe7784ff9900d778c5b69241a1073 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 28 May 2015 08:20:24 -0700 Subject: [PATCH 156/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b664452a..cbc5bfcb6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ BUG FIXES: * builder/amazon: Remove deprecated ec2-upload-bundle paramger [GH-1931] * builder/amazon: Retry finding created instance for eventual consistency. [GH-2129] + * builder/amazon/chroot: Retry waiting for disk attachments [GH-2046] * builder/digitalocean: Ignore invalid fields from the ever-changing v2 API * builder/docker: Fixed hang on prompt while copying script * builder/qemu: Add `disk_discard` option [GH-2120] From b78b119a11a67736afd5c301eee7fcbc0e96929a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 28 May 2015 08:31:22 -0700 Subject: [PATCH 157/956] amazon/*: fix merge issues with lib switch --- builder/amazon/common/access_config.go | 24 ++++++++++--------- builder/amazon/common/block_device.go | 2 -- builder/amazon/common/step_ami_region_copy.go | 18 +++++++------- builder/amazon/ebs/builder.go | 3 ++- builder/amazon/instance/builder.go | 3 ++- 5 files changed, 27 insertions(+), 23 deletions(-) diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go index a5403a3fd..16d5041f3 100644 --- a/builder/amazon/common/access_config.go +++ b/builder/amazon/common/access_config.go @@ -8,6 +8,7 @@ import ( "unicode" "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/aws/credentials" "github.com/mitchellh/packer/template/interpolate" ) @@ -22,16 +23,16 @@ type AccessConfig struct { // Config returns a valid aws.Config object for access to AWS services, or // an error if the authentication and region couldn't be resolved func (c *AccessConfig) Config() (*aws.Config, error) { - credsProvider := aws.DetectCreds(c.AccessKey, c.SecretKey, c.Token) - - creds, err := credsProvider.Credentials() - if err != nil { - return nil, err - } - - c.AccessKey = creds.AccessKeyID - c.SecretKey = creds.SecretAccessKey - c.Token = creds.SessionToken + creds := credentials.NewChainCredentials([]credentials.Provider{ + &credentials.StaticProvider{Value: credentials.Value{ + AccessKeyID: c.AccessKey, + SecretAccessKey: c.SecretKey, + SessionToken: c.Token, + }}, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &credentials.EC2RoleProvider{}, + }) region, err := c.Region() if err != nil { @@ -40,7 +41,8 @@ func (c *AccessConfig) Config() (*aws.Config, error) { return &aws.Config{ Region: region, - Credentials: credsProvider, + Credentials: creds, + MaxRetries: 11, }, nil } diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index f86feff1c..9bd8344a3 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -1,8 +1,6 @@ package common import ( - "fmt" - "github.com/awslabs/aws-sdk-go/aws" "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/packer/template/interpolate" diff --git a/builder/amazon/common/step_ami_region_copy.go b/builder/amazon/common/step_ami_region_copy.go index 88ed1884f..33d67c655 100644 --- a/builder/amazon/common/step_ami_region_copy.go +++ b/builder/amazon/common/step_ami_region_copy.go @@ -5,7 +5,6 @@ import ( "sync" - "github.com/awslabs/aws-sdk-go/aws" "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" @@ -13,7 +12,8 @@ import ( ) type StepAMIRegionCopy struct { - Regions []string + AccessConfig *AccessConfig + Regions []string } func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { @@ -37,7 +37,7 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { go func(region string) { defer wg.Done() - id, err := amiRegionCopy(state, ec2conn.Config.Credentials, ami, region, ec2conn.Config.Region) + id, err := amiRegionCopy(state, s.AccessConfig, ami, region, ec2conn.Config.Region) lock.Lock() defer lock.Unlock() @@ -69,15 +69,17 @@ func (s *StepAMIRegionCopy) Cleanup(state multistep.StateBag) { // amiRegionCopy does a copy for the given AMI to the target region and // returns the resulting ID or error. -func amiRegionCopy(state multistep.StateBag, auth aws.CredentialsProvider, imageId string, +func amiRegionCopy(state multistep.StateBag, config *AccessConfig, imageId string, target string, source string) (string, error) { // Connect to the region where the AMI will be copied to - config := &aws.Config{ - Credentials: auth, - Region: target, + awsConfig, err := config.Config() + if err != nil { + return "", err } - regionconn := ec2.New(config) + awsConfig.Region = target + + regionconn := ec2.New(awsConfig) resp, err := regionconn.CopyImage(&ec2.CopyImageInput{ SourceRegion: &source, SourceImageID: &imageId, diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index a2d393bd5..e4f8eb0b2 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -121,7 +121,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &stepModifyInstance{}, &stepCreateAMI{}, &awscommon.StepAMIRegionCopy{ - Regions: b.config.AMIRegions, + AccessConfig: &b.config.AccessConfig, + Regions: b.config.AMIRegions, }, &awscommon.StepModifyAMIAttributes{ Description: b.config.AMIDescription, diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 66351aab0..bffd1a97a 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -203,7 +203,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, &StepRegisterAMI{}, &awscommon.StepAMIRegionCopy{ - Regions: b.config.AMIRegions, + AccessConfig: &b.config.AccessConfig, + Regions: b.config.AMIRegions, }, &awscommon.StepModifyAMIAttributes{ Description: b.config.AMIDescription, From 68bde0ba5675ad165f5d88dbaf4b8e309c4dec0a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 28 May 2015 09:35:50 -0700 Subject: [PATCH 158/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cbc5bfcb6..9cb767d08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,8 @@ BUG FIXES: * builder/amazon: Remove deprecated ec2-upload-bundle paramger [GH-1931] * builder/amazon: Retry finding created instance for eventual consistency. [GH-2129] + * builder/amazon: If no AZ is specified, use AZ chosen automatically by + AWS for spot instance. [GH-2017] * builder/amazon/chroot: Retry waiting for disk attachments [GH-2046] * builder/digitalocean: Ignore invalid fields from the ever-changing v2 API * builder/docker: Fixed hang on prompt while copying script From b4ae3fbdb33cc6a419d25443275d66935c9c47b5 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 28 May 2015 09:37:21 -0700 Subject: [PATCH 159/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9cb767d08..f85e84093 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ BUG FIXES: * builder/amazon: If no AZ is specified, use AZ chosen automatically by AWS for spot instance. [GH-2017] * builder/amazon/chroot: Retry waiting for disk attachments [GH-2046] + * builder/amazon/instance: Use `-i` in sudo commands so PATH is inherited. [GH-1930] * builder/digitalocean: Ignore invalid fields from the ever-changing v2 API * builder/docker: Fixed hang on prompt while copying script * builder/qemu: Add `disk_discard` option [GH-2120] From b8a3a5ffad5c8a9c9060105613771181d931e0fa Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 28 May 2015 09:38:47 -0700 Subject: [PATCH 160/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f85e84093..f5366b84e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ BUG FIXES: * builder/docker: Fixed hang on prompt while copying script * builder/qemu: Add `disk_discard` option [GH-2120] * builder/virtualbox: Added SCSI support + * builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989] * postprocessor/vagrant-cloud: Fixed failing on response * provisioner/puppet-masterless: Allow manifest_file to be a directory * provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call From 8cc2e579b44ae406cad289412e3e6745a1958b55 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 28 May 2015 09:40:17 -0700 Subject: [PATCH 161/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f5366b84e..3758515fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ BUG FIXES: * builder/qemu: Add `disk_discard` option [GH-2120] * builder/virtualbox: Added SCSI support * builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989] + * builder/vmware: More robust IP parsing from ifconfig output [GH-1999] * postprocessor/vagrant-cloud: Fixed failing on response * provisioner/puppet-masterless: Allow manifest_file to be a directory * provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call From 07734133a3b560c6f89cee92e88b6674a6033b7e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 28 May 2015 09:44:33 -0700 Subject: [PATCH 162/956] builder/amazon: fix test failures --- builder/amazon/common/step_run_source_instance.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index ac6ce3ca5..fac240df7 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -111,7 +111,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi if price == 0 || current < price { price = current if s.AvailabilityZone == "" { - availabilityZone = history.AvailabilityZone + availabilityZone = *history.AvailabilityZone } } } @@ -167,7 +167,6 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi ui.Message(fmt.Sprintf( "Requesting spot instance '%s' for: %s", s.InstanceType, spotPrice)) - D runSpotResp, err := ec2conn.RequestSpotInstances(&ec2.RequestSpotInstancesInput{ SpotPrice: &spotPrice, LaunchSpecification: &ec2.RequestSpotLaunchSpecification{ From 913d6f6996b5f8b9cc6204473feda0822339561b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 28 May 2015 10:20:34 -0700 Subject: [PATCH 163/956] builder/amazon/common: fix compilation issues with latest libs --- builder/amazon/common/state.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/amazon/common/state.go b/builder/amazon/common/state.go index 00a58be08..5f79f1968 100644 --- a/builder/amazon/common/state.go +++ b/builder/amazon/common/state.go @@ -9,7 +9,7 @@ import ( "strconv" "time" - "github.com/awslabs/aws-sdk-go/aws" + "github.com/awslabs/aws-sdk-go/aws/awserr" "github.com/awslabs/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" ) @@ -42,7 +42,7 @@ func AMIStateRefreshFunc(conn *ec2.EC2, imageId string) StateRefreshFunc { ImageIDs: []*string{&imageId}, }) if err != nil { - if ec2err, ok := err.(*aws.APIError); ok && ec2err.Code == "InvalidAMIID.NotFound" { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" { // Set this to nil as if we didn't find anything. resp = nil } else if isTransientNetworkError(err) { @@ -73,7 +73,7 @@ func InstanceStateRefreshFunc(conn *ec2.EC2, i *ec2.Instance) StateRefreshFunc { InstanceIDs: []*string{i.InstanceID}, }) if err != nil { - if ec2err, ok := err.(*aws.APIError); ok && ec2err.Code == "InvalidInstanceID.NotFound" { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" { // Set this to nil as if we didn't find anything. resp = nil } else if isTransientNetworkError(err) { @@ -105,7 +105,7 @@ func SpotRequestStateRefreshFunc(conn *ec2.EC2, spotRequestId string) StateRefre }) if err != nil { - if ec2err, ok := err.(*aws.APIError); ok && ec2err.Code == "InvalidSpotInstanceRequestID.NotFound" { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSpotInstanceRequestID.NotFound" { // Set this to nil as if we didn't find anything. resp = nil } else if isTransientNetworkError(err) { From e0a9215e476d4b2d04641d500656f3d58d372dce Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 28 May 2015 14:40:45 -0700 Subject: [PATCH 164/956] packer: test for environment variables interpolation --- packer/core.go | 41 +++++++++++++++++++++++++++-- packer/core_test.go | 33 +++++++++++++++++++++++ packer/test-fixtures/build-env.json | 10 +++++++ 3 files changed, 82 insertions(+), 2 deletions(-) create mode 100644 packer/test-fixtures/build-env.json diff --git a/packer/core.go b/packer/core.go index e8329b821..678e44e08 100644 --- a/packer/core.go +++ b/packer/core.go @@ -66,12 +66,17 @@ func NewCore(c *CoreConfig) (*Core, error) { builds[v] = b } - return &Core{ + result := &Core{ components: c.Components, template: c.Template, variables: c.Variables, builds: builds, - }, nil + } + if err := result.init(); err != nil { + return nil, err + } + + return result, nil } // BuildNames returns the builds that are available in this configured core. @@ -228,3 +233,35 @@ func (c *Core) Validate() error { return err } + +func (c *Core) init() error { + if c.variables == nil { + c.variables = make(map[string]string) + } + + // Go through the variables and interpolate the environment variables + ctx := &interpolate.Context{EnableEnv: true} + for k, v := range c.template.Variables { + // Ignore variables that are required + if v.Required { + continue + } + + // Ignore variables that have a value + if _, ok := c.variables[k]; ok { + continue + } + + // Interpolate the default + def, err := interpolate.Render(v.Default, ctx) + if err != nil { + return fmt.Errorf( + "error interpolating default value for '%s': %s", + k, err) + } + + c.variables[k] = def + } + + return nil +} diff --git a/packer/core_test.go b/packer/core_test.go index 8cec16bae..09059909b 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -5,6 +5,7 @@ import ( "reflect" "testing" + configHelper "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/template" ) @@ -108,6 +109,38 @@ func TestCoreBuild_basicInterpolated(t *testing.T) { } } +func TestCoreBuild_env(t *testing.T) { + os.Setenv("PACKER_TEST_ENV", "test") + defer os.Setenv("PACKER_TEST_ENV", "") + + config := TestCoreConfig(t) + testCoreTemplate(t, config, fixtureDir("build-env.json")) + b := TestBuilder(t, config, "test") + core := TestCore(t, config) + + b.ArtifactId = "hello" + + build, err := core.Build("test") + if err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := build.Prepare(); err != nil { + t.Fatalf("err: %s", err) + } + + // Interpolate the config + var result map[string]interface{} + err = configHelper.Decode(&result, nil, b.PrepareConfig...) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result["value"] != "test" { + t.Fatalf("bad: %#v", result) + } +} + func TestCoreBuild_nonExist(t *testing.T) { config := TestCoreConfig(t) testCoreTemplate(t, config, fixtureDir("build-basic.json")) diff --git a/packer/test-fixtures/build-env.json b/packer/test-fixtures/build-env.json new file mode 100644 index 000000000..4f2c9b118 --- /dev/null +++ b/packer/test-fixtures/build-env.json @@ -0,0 +1,10 @@ +{ + "variables": { + "var": "{{env `PACKER_TEST_ENV`}}" + }, + + "builders": [{ + "type": "test", + "value": "{{user `var`}}" + }] +} From 590997df449b3ac219fcc9bdc8a2f5bf3026213e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 28 May 2015 14:42:53 -0700 Subject: [PATCH 165/956] packer: automatically validate when creating a core --- command/meta.go | 5 ----- packer/core.go | 9 ++++++--- packer/core_test.go | 8 ++------ packer/test-fixtures/build-prov-skip.json | 3 +++ 4 files changed, 11 insertions(+), 14 deletions(-) diff --git a/command/meta.go b/command/meta.go index 250f94f6b..e55aebf42 100644 --- a/command/meta.go +++ b/command/meta.go @@ -49,11 +49,6 @@ func (m *Meta) Core(tpl *template.Template) (*packer.Core, error) { return nil, fmt.Errorf("Error initializing core: %s", err) } - // Validate it - if err := core.Validate(); err != nil { - return nil, err - } - return core, nil } diff --git a/packer/core.go b/packer/core.go index 678e44e08..d4cb9fea5 100644 --- a/packer/core.go +++ b/packer/core.go @@ -72,6 +72,9 @@ func NewCore(c *CoreConfig) (*Core, error) { variables: c.Variables, builds: builds, } + if err := result.validate(); err != nil { + return nil, err + } if err := result.init(); err != nil { return nil, err } @@ -205,11 +208,11 @@ func (c *Core) Build(n string) (Build, error) { }, nil } -// Validate does a full validation of the template. +// validate does a full validation of the template. // -// This will automatically call template.Validate() in addition to doing +// This will automatically call template.validate() in addition to doing // richer semantic checks around variables and so on. -func (c *Core) Validate() error { +func (c *Core) validate() error { // First validate the template in general, we can't do anything else // unless the template itself is valid. if err := c.template.Validate(); err != nil { diff --git a/packer/core_test.go b/packer/core_test.go index 09059909b..9df7ca85c 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -376,15 +376,11 @@ func TestCoreValidate(t *testing.T) { t.Fatalf("err: %s\n\n%s", tc.File, err) } - core, err := NewCore(&CoreConfig{ + _, err = NewCore(&CoreConfig{ Template: tpl, Variables: tc.Vars, }) - if err != nil { - t.Fatalf("err: %s\n\n%s", tc.File, err) - } - - if err := core.Validate(); (err != nil) != tc.Err { + if (err != nil) != tc.Err { t.Fatalf("err: %s\n\n%s", tc.File, err) } } diff --git a/packer/test-fixtures/build-prov-skip.json b/packer/test-fixtures/build-prov-skip.json index bd9fa5072..24d597183 100644 --- a/packer/test-fixtures/build-prov-skip.json +++ b/packer/test-fixtures/build-prov-skip.json @@ -1,6 +1,9 @@ { "builders": [{ "type": "test" + }, { + "name": "foo", + "type": "test" }], "provisioners": [{ From da694a6ea8de571eddc51c7033f06dc184331ded Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 28 May 2015 15:13:10 -0700 Subject: [PATCH 166/956] packer: try to fix flaky test --- packer/communicator.go | 4 ++-- packer/communicator_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packer/communicator.go b/packer/communicator.go index 448e4aacf..df7216d9c 100644 --- a/packer/communicator.go +++ b/packer/communicator.go @@ -115,16 +115,16 @@ func (r *RemoteCmd) StartWithUi(c Communicator, ui Ui) error { } // Create the channels we'll use for data - exitCh := make(chan int, 1) + exitCh := make(chan struct{}) stdoutCh := iochan.DelimReader(stdout_r, '\n') stderrCh := iochan.DelimReader(stderr_r, '\n') // Start the goroutine to watch for the exit go func() { + defer close(exitCh) defer stdout_w.Close() defer stderr_w.Close() r.Wait() - exitCh <- r.ExitStatus }() // Loop and get all our output diff --git a/packer/communicator_test.go b/packer/communicator_test.go index 7c88e2059..a4c3af3d2 100644 --- a/packer/communicator_test.go +++ b/packer/communicator_test.go @@ -33,7 +33,7 @@ func TestRemoteCmd_StartWithUi(t *testing.T) { rc.Wait() expected := strings.TrimSpace(data) - if uiOutput.String() != expected+"\n" { + if strings.TrimSpace(uiOutput.String()) != expected { t.Fatalf("bad output: '%s'", uiOutput.String()) } From 27e525e508968a61c18c6aa487ed4311fe6627d5 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 28 May 2015 15:19:22 -0700 Subject: [PATCH 167/956] update CHANGELOG --- CHANGELOG.md | 9 +++++---- template/parse_test.go | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3758515fb..78a3d54af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,10 +4,11 @@ FEATURES: IMPROVEMENTS: - * builder/openstack: Add rackconnect_wait for Rackspace customers to wait for - RackConnect data to appear - * buidler/openstakc: Add ssh_interface option for rackconnect for users that - have prohibitive firewalls + * builder/openstack: Add `rackconnect_wait` for Rackspace customers to wait for + RackConnect data to appear + * buidler/openstakc: Add `ssh_interface` option for rackconnect for users that + have prohibitive firewalls + * command/push: Add `-name` flag for specifying name from CLI [GH-2042] BUG FIXES: diff --git a/template/parse_test.go b/template/parse_test.go index e99f35e51..78b4416ba 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -263,7 +263,7 @@ func TestParse(t *testing.T) { { "parse-push.json", &Template{ - Push: &Push{ + Push: Push{ Name: "foo", }, }, From 7bbb940896e07e3b3215845b9ad97e751f8e1100 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 08:37:41 -0700 Subject: [PATCH 168/956] update README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ab069a213..de9328795 100644 --- a/README.md +++ b/README.md @@ -81,7 +81,7 @@ http://www.packer.io/docs ## Developing Packer If you wish to work on Packer itself or any of its built-in providers, -you'll first need [Go](http://www.golang.org) installed (version 1.2+ is +you'll first need [Go](http://www.golang.org) installed (version 1.4+ is _required_). Make sure Go is properly installed, including setting up a [GOPATH](http://golang.org/doc/code.html#GOPATH). From 9c7b4b63c5c585bdc34ccd5c27e2bca14a8dd49e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 09:19:20 -0700 Subject: [PATCH 169/956] builder/docker: fix config parsing --- builder/docker/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/docker/config.go b/builder/docker/config.go index e261068df..4fc6f762a 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -30,7 +30,7 @@ type Config struct { } func NewConfig(raws ...interface{}) (*Config, []string, error) { - c := new(Config) + var c Config var md mapstructure.Metadata err := config.Decode(&c, &config.DecodeOpts{ @@ -83,5 +83,5 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { return nil, nil, errs } - return c, nil, nil + return &c, nil, nil } From 6570b53c4a5ed8ac79705c7bae6d404c77420208 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 09:29:59 -0700 Subject: [PATCH 170/956] builder/docker: use exec for v1.4+ --- builder/docker/builder.go | 9 +++++++- builder/docker/communicator.go | 39 +++++++++----------------------- builder/docker/driver.go | 5 ++++ builder/docker/driver_docker.go | 16 +++++++++++++ builder/docker/driver_mock.go | 10 ++++++++ builder/docker/step_provision.go | 9 ++++++++ 6 files changed, 59 insertions(+), 29 deletions(-) diff --git a/builder/docker/builder.go b/builder/docker/builder.go index 2dddbf94e..18ff73357 100644 --- a/builder/docker/builder.go +++ b/builder/docker/builder.go @@ -1,10 +1,11 @@ package docker import ( + "log" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/packer" - "log" ) const BuilderId = "packer.docker" @@ -31,6 +32,12 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, err } + version, err := driver.Version() + if err != nil { + return nil, err + } + log.Printf("[DEBUG] Docker version: %s", version.String()) + steps := []multistep.Step{ &StepTempDir{}, &StepPull{}, diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index bd1c87240..548c1b4d8 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -9,7 +9,6 @@ import ( "os" "os/exec" "path/filepath" - "regexp" "strconv" "sync" "syscall" @@ -24,36 +23,20 @@ type Communicator struct { ContainerId string HostDir string ContainerDir string + Version *version.Version lock sync.Mutex } -var dockerVersion *version.Version -var useDockerExec bool - -func init() { - execConstraint, _ := version.NewConstraint(">= 1.4.0") - - versionExtractor := regexp.MustCompile(version.VersionRegexpRaw) - dockerVersionOutput, err := exec.Command("docker", "-v").Output() - extractedVersion := versionExtractor.FindSubmatch(dockerVersionOutput) - - if extractedVersion != nil { - dockerVersionString := string(extractedVersion[0]) - dockerVersion, err = version.NewVersion(dockerVersionString) - } - - if dockerVersion == nil { - log.Printf("Could not determine docker version: %v", err) - log.Printf("Assuming no `exec` capability, using `attatch`") - useDockerExec = false - } else { - log.Printf("Docker version detected as %s", dockerVersion) - useDockerExec = execConstraint.Check(dockerVersion) - } -} - func (c *Communicator) Start(remote *packer.RemoteCmd) error { + // Determine if we're using docker exec or not + useExec := false + execConstraint, err := version.NewConstraint(">= 1.4.0") + if err != nil { + return err + } + useExec = execConstraint.Check(c.Version) + // Create a temporary file to store the output. Because of a bug in // Docker, sometimes all the output doesn't properly show up. This // file will capture ALL of the output, and we'll read that. @@ -69,7 +52,7 @@ func (c *Communicator) Start(remote *packer.RemoteCmd) error { exitCodePath := outputFile.Name() + "-exit" var cmd *exec.Cmd - if useDockerExec { + if useExec { cmd = exec.Command("docker", "exec", "-i", c.ContainerId, "/bin/sh") } else { cmd = exec.Command("docker", "attach", c.ContainerId) @@ -150,7 +133,7 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error return os.MkdirAll(hostpath, info.Mode()) } - if info.Mode() & os.ModeSymlink == os.ModeSymlink { + if info.Mode()&os.ModeSymlink == os.ModeSymlink { dest, err := os.Readlink(path) if err != nil { diff --git a/builder/docker/driver.go b/builder/docker/driver.go index 85b87b1d0..d9cb94f9b 100644 --- a/builder/docker/driver.go +++ b/builder/docker/driver.go @@ -2,6 +2,8 @@ package docker import ( "io" + + "github.com/hashicorp/go-version" ) // Driver is the interface that has to be implemented to communicate with @@ -48,6 +50,9 @@ type Driver interface { // Verify verifies that the driver can run Verify() error + + // Version reads the Docker version + Version() (*version.Version, error) } // ContainerConfig is the configuration used to start a container. diff --git a/builder/docker/driver_docker.go b/builder/docker/driver_docker.go index 038aa046e..017c33ee0 100644 --- a/builder/docker/driver_docker.go +++ b/builder/docker/driver_docker.go @@ -7,9 +7,11 @@ import ( "log" "os" "os/exec" + "regexp" "strings" "sync" + "github.com/hashicorp/go-version" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" ) @@ -263,3 +265,17 @@ func (d *DockerDriver) Verify() error { return nil } + +func (d *DockerDriver) Version() (*version.Version, error) { + output, err := exec.Command("docker", "-v").Output() + if err != nil { + return nil, err + } + + match := regexp.MustCompile(version.VersionRegexpRaw).FindSubmatch(output) + if match == nil { + return nil, fmt.Errorf("unknown version: %s", output) + } + + return version.NewVersion(string(match[0])) +} diff --git a/builder/docker/driver_mock.go b/builder/docker/driver_mock.go index 549e79611..390a7e308 100644 --- a/builder/docker/driver_mock.go +++ b/builder/docker/driver_mock.go @@ -2,6 +2,8 @@ package docker import ( "io" + + "github.com/hashicorp/go-version" ) // MockDriver is a driver implementation that can be used for tests. @@ -63,6 +65,9 @@ type MockDriver struct { StopCalled bool StopID string VerifyCalled bool + + VersionCalled bool + VersionVersion string } func (d *MockDriver) Commit(id string) (string, error) { @@ -162,3 +167,8 @@ func (d *MockDriver) Verify() error { d.VerifyCalled = true return d.VerifyError } + +func (d *MockDriver) Version() (*version.Version, error) { + d.VersionCalled = true + return version.NewVersion(d.VersionVersion) +} diff --git a/builder/docker/step_provision.go b/builder/docker/step_provision.go index 726653763..d9852ae2b 100644 --- a/builder/docker/step_provision.go +++ b/builder/docker/step_provision.go @@ -9,14 +9,23 @@ type StepProvision struct{} func (s *StepProvision) Run(state multistep.StateBag) multistep.StepAction { containerId := state.Get("container_id").(string) + driver := state.Get("driver").(Driver) tempDir := state.Get("temp_dir").(string) + // Get the version so we can pass it to the communicator + version, err := driver.Version() + if err != nil { + state.Put("error", err) + return multistep.ActionHalt + } + // Create the communicator that talks to Docker via various // os/exec tricks. comm := &Communicator{ ContainerId: containerId, HostDir: tempDir, ContainerDir: "/packer-files", + Version: version, } prov := common.StepProvision{Comm: comm} From ce275969e464145e674bd64e478c8c23fe398a62 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 09:37:27 -0700 Subject: [PATCH 171/956] builder/docker: don't attempt to read artifact if cancelled --- builder/docker/builder.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/builder/docker/builder.go b/builder/docker/builder.go index 18ff73357..96a79b02d 100644 --- a/builder/docker/builder.go +++ b/builder/docker/builder.go @@ -77,8 +77,13 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, rawErr.(error) } - var artifact packer.Artifact + // If it was cancelled, then just return + if _, ok := state.GetOk(multistep.StateCancelled); ok { + return nil, nil + } + // No errors, must've worked + var artifact packer.Artifact if b.config.Commit { artifact = &ImportArtifact{ IdValue: state.Get("image_id").(string), @@ -88,6 +93,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe } else { artifact = &ExportArtifact{path: b.config.ExportPath} } + return artifact, nil } From 31ac2652d64ddd364f0bd3ae2652559fc02caad6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 11:08:41 -0700 Subject: [PATCH 172/956] bulder/docker: canExec as sep function --- builder/docker/communicator.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 548c1b4d8..6fedf2769 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -29,14 +29,6 @@ type Communicator struct { } func (c *Communicator) Start(remote *packer.RemoteCmd) error { - // Determine if we're using docker exec or not - useExec := false - execConstraint, err := version.NewConstraint(">= 1.4.0") - if err != nil { - return err - } - useExec = execConstraint.Check(c.Version) - // Create a temporary file to store the output. Because of a bug in // Docker, sometimes all the output doesn't properly show up. This // file will capture ALL of the output, and we'll read that. @@ -52,7 +44,7 @@ func (c *Communicator) Start(remote *packer.RemoteCmd) error { exitCodePath := outputFile.Name() + "-exit" var cmd *exec.Cmd - if useExec { + if c.canExec() { cmd = exec.Command("docker", "exec", "-i", c.ContainerId, "/bin/sh") } else { cmd = exec.Command("docker", "attach", c.ContainerId) @@ -202,6 +194,15 @@ func (c *Communicator) Download(src string, dst io.Writer) error { panic("not implemented") } +// canExec tells us whether `docker exec` is supported +func (c *Communicator) canExec() bool { + execConstraint, err := version.NewConstraint(">= 1.4.0") + if err != nil { + panic(err) + } + return execConstraint.Check(c.Version) +} + // Runs the given command and blocks until completion func (c *Communicator) run(cmd *exec.Cmd, remote *packer.RemoteCmd, stdin_w io.WriteCloser, outputFile *os.File, exitCodePath string) { // For Docker, remote communication must be serialized since it From 62aa9ada786231cd539c54788f1fe7ed51701a20 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 11:14:48 -0700 Subject: [PATCH 173/956] update CHANGELOG --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 78a3d54af..2112fb531 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,9 @@ BUG FIXES: * builder/amazon/instance: Use `-i` in sudo commands so PATH is inherited. [GH-1930] * builder/digitalocean: Ignore invalid fields from the ever-changing v2 API * builder/docker: Fixed hang on prompt while copying script + * builder/docker: Use `docker exec` for newer versions of Docker for + running scripts [GH-1993] + * builder/docker: Fix crash that could occur at certain timed ctrl-c [GH-1838] * builder/qemu: Add `disk_discard` option [GH-2120] * builder/virtualbox: Added SCSI support * builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989] From 76e9045bc058e9668a0bc7ad2de83dd6a61d5ea4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 11:26:07 -0700 Subject: [PATCH 174/956] post-processor/atlas: find common prefix on Windows [GH-1874] --- post-processor/atlas/util.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/post-processor/atlas/util.go b/post-processor/atlas/util.go index 1c0bfa27f..0a26d05a4 100644 --- a/post-processor/atlas/util.go +++ b/post-processor/atlas/util.go @@ -2,6 +2,7 @@ package atlas import ( "math" + "path/filepath" "strings" ) @@ -26,12 +27,12 @@ func longestCommonPrefix(vs []string) string { // short string, which itself must contain the prefix. for i := len(shortest); i > 0; i-- { // We only care about prefixes with path seps - if shortest[i-1] != '/' { + if shortest[i-1] != filepath.Separator { continue } bad := false - prefix := shortest[0 : i] + prefix := shortest[0:i] for _, v := range vs { if !strings.HasPrefix(v, prefix) { bad = true From 2eff9c53579668dd5b6724553473c993cae34b2d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 11:26:41 -0700 Subject: [PATCH 175/956] update CHANGELOG --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2112fb531..8d9480f72 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,7 +28,8 @@ BUG FIXES: * builder/virtualbox: Added SCSI support * builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989] * builder/vmware: More robust IP parsing from ifconfig output [GH-1999] - * postprocessor/vagrant-cloud: Fixed failing on response + * post-processor/atlas: Find common archive prefix for Windows [GH-1874] + * post-processor/vagrant-cloud: Fixed failing on response * provisioner/puppet-masterless: Allow manifest_file to be a directory * provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call From f259e7352aa1992dd800221db60be4866a2449e6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 11:30:56 -0700 Subject: [PATCH 176/956] packer/plugin: fix crash case, nil function call [GH-2098] --- CHANGELOG.md | 1 + packer/plugin/post_processor.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d9480f72..42f2e4f3e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ IMPROVEMENTS: BUG FIXES: + * core: Fix potential panic for post-processor plugin exits [GH-2098] * builder/amazon: Remove deprecated ec2-upload-bundle paramger [GH-1931] * builder/amazon: Retry finding created instance for eventual consistency. [GH-2129] diff --git a/packer/plugin/post_processor.go b/packer/plugin/post_processor.go index 72398020f..483140564 100644 --- a/packer/plugin/post_processor.go +++ b/packer/plugin/post_processor.go @@ -29,7 +29,7 @@ func (c *cmdPostProcessor) PostProcess(ui packer.Ui, a packer.Artifact) (packer. } func (c *cmdPostProcessor) checkExit(p interface{}, cb func()) { - if c.client.Exited() { + if c.client.Exited() && cb != nil { cb() } else if p != nil && !Killed { log.Panic(p) From 58b430d7efcefb6745596233d94a31bac6068ded Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 11:34:22 -0700 Subject: [PATCH 177/956] website: update docs for provider key [GH-2099] --- .../docs/post-processors/atlas.html.markdown | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index e0886ad89..dc8f7d042 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -9,11 +9,11 @@ description: |- Type: `atlas` -The Atlas post-processor for Packer receives an artifact from a Packer build and uploads it to Atlas. [Atlas](https://atlas.hashicorp.com) hosts and serves artifacts, allowing you to version and distribute them in a simple way. +The Atlas post-processor for Packer receives an artifact from a Packer build and uploads it to Atlas. [Atlas](https://atlas.hashicorp.com) hosts and serves artifacts, allowing you to version and distribute them in a simple way. ## Workflow -To take full advantage of Packer and Atlas, it's important to understand the +To take full advantage of Packer and Atlas, it's important to understand the workflow for creating artifacts with Packer and storing them in Atlas using this post-processor. The goal of the Atlas post-processor is to streamline the distribution of public or private artifacts by hosting them in a central location in Atlas. Here is an example workflow: @@ -25,17 +25,17 @@ Here is an example workflow: ## Configuration -The configuration allows you to specify and access the artifact in Atlas. +The configuration allows you to specify and access the artifact in Atlas. ### Required: * `token` (string) - Your access token for the Atlas API. - This can be generated on your [tokens page](https://atlas.hashicorp.com/settings/tokens). Alternatively you can export your Atlas token as an environmental variable and remove it from the configuration. + This can be generated on your [tokens page](https://atlas.hashicorp.com/settings/tokens). Alternatively you can export your Atlas token as an environmental variable and remove it from the configuration. * `artifact` (string) - The shorthand tag for your artifact that maps to - Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. You must - have access to the organization, hashicorp in this example, in order to add an artifact to - the organization in Atlas. + Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. You must + have access to the organization, hashicorp in this example, in order to add an artifact to + the organization in Atlas. * `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will always be `aws.ami`. This field must be defined because Atlas can host other artifact types, such as Vagrant boxes. @@ -48,7 +48,9 @@ The configuration allows you to specify and access the artifact in Atlas. is useful if you're using Atlas Enterprise in your own network. Defaults to `https://atlas.hashicorp.com/api/v1`. -* `metadata` (map) - Send metadata about the artifact. +* `metadata` (map) - Send metadata about the artifact. If the artifact + type is "vagrant.box", you must specify a "provider" metadata about + what provider to use. ### Example Configuration From e728c09301efc9949beabb40a01542b2739b9c8f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 13:30:52 -0700 Subject: [PATCH 178/956] post-processor/vagrant-cloud: in error, don't delete version [GH-2014] --- .../vagrant-cloud/step_create_version.go | 29 +------------------ 1 file changed, 1 insertion(+), 28 deletions(-) diff --git a/post-processor/vagrant-cloud/step_create_version.go b/post-processor/vagrant-cloud/step_create_version.go index 738614152..58c3051a7 100644 --- a/post-processor/vagrant-cloud/step_create_version.go +++ b/post-processor/vagrant-cloud/step_create_version.go @@ -55,31 +55,4 @@ func (s *stepCreateVersion) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionContinue } -func (s *stepCreateVersion) Cleanup(state multistep.StateBag) { - client := state.Get("client").(*VagrantCloudClient) - ui := state.Get("ui").(packer.Ui) - box := state.Get("box").(*Box) - version := state.Get("version").(*Version) - - _, cancelled := state.GetOk(multistep.StateCancelled) - _, halted := state.GetOk(multistep.StateHalted) - - // Return if we didn't cancel or halt, and thus need - // no cleanup - if !cancelled && !halted { - return - } - - path := fmt.Sprintf("box/%s/version/%v", box.Tag, version.Version) - - ui.Say("Cleaning up version") - ui.Message(fmt.Sprintf("Deleting version: %s", version.Version)) - - // No need for resp from the cleanup DELETE - _, err := client.Delete(path) - - if err != nil { - ui.Error(fmt.Sprintf("Error destroying version: %s", err)) - } - -} +func (s *stepCreateVersion) Cleanup(state multistep.StateBag) {} From 7ce76d2890e47f7ddc5a28582fbb0c2577d9247f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 13:31:19 -0700 Subject: [PATCH 179/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42f2e4f3e..783f8c49e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ BUG FIXES: * builder/vmware: More robust IP parsing from ifconfig output [GH-1999] * post-processor/atlas: Find common archive prefix for Windows [GH-1874] * post-processor/vagrant-cloud: Fixed failing on response + * post-processor/vagrant-cloud: Don't delete version on error [GH-2014] * provisioner/puppet-masterless: Allow manifest_file to be a directory * provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call From 2752e51e0917ed88702b5b37c224436ce95e2934 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 13:55:59 -0700 Subject: [PATCH 180/956] template/interpolate: add template_path --- template/interpolate/funcs.go | 23 +++++++++++++++++------ template/interpolate/funcs_test.go | 27 +++++++++++++++++++++++++++ template/interpolate/i.go | 4 ++++ 3 files changed, 48 insertions(+), 6 deletions(-) diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go index ca51fea81..abe84873f 100644 --- a/template/interpolate/funcs.go +++ b/template/interpolate/funcs.go @@ -23,12 +23,13 @@ func init() { // Funcs are the interpolation funcs that are available within interpolations. var FuncGens = map[string]FuncGenerator{ - "env": funcGenEnv, - "isotime": funcGenIsotime, - "pwd": funcGenPwd, - "timestamp": funcGenTimestamp, - "uuid": funcGenUuid, - "user": funcGenUser, + "env": funcGenEnv, + "isotime": funcGenIsotime, + "pwd": funcGenPwd, + "template_path": funcGenTemplatePath, + "timestamp": funcGenTimestamp, + "uuid": funcGenUuid, + "user": funcGenUser, "upper": funcGenPrimitive(strings.ToUpper), "lower": funcGenPrimitive(strings.ToLower), @@ -92,6 +93,16 @@ func funcGenPwd(ctx *Context) interface{} { } } +func funcGenTemplatePath(ctx *Context) interface{} { + return func() (string, error) { + if ctx == nil || ctx.TemplatePath == "" { + return "", errors.New("template path not available") + } + + return ctx.TemplatePath, nil + } +} + func funcGenTimestamp(ctx *Context) interface{} { return func() string { return strconv.FormatInt(InitTime.Unix(), 10) diff --git a/template/interpolate/funcs_test.go b/template/interpolate/funcs_test.go index aad05d376..d6ef3e758 100644 --- a/template/interpolate/funcs_test.go +++ b/template/interpolate/funcs_test.go @@ -116,6 +116,33 @@ func TestFuncPwd(t *testing.T) { } } +func TestFuncTemplatePath(t *testing.T) { + cases := []struct { + Input string + Output string + }{ + { + `{{template_path}}`, + `foo`, + }, + } + + ctx := &Context{ + TemplatePath: "foo", + } + for _, tc := range cases { + i := &I{Value: tc.Input} + result, err := i.Render(ctx) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", tc.Input, err) + } + + if result != tc.Output { + t.Fatalf("Input: %s\n\nGot: %s", tc.Input, result) + } + } +} + func TestFuncTimestamp(t *testing.T) { expected := strconv.FormatInt(InitTime.Unix(), 10) diff --git a/template/interpolate/i.go b/template/interpolate/i.go index c0cd13f85..d5f7c8413 100644 --- a/template/interpolate/i.go +++ b/template/interpolate/i.go @@ -14,6 +14,10 @@ type Context struct { // Funcs are extra functions available in the template Funcs map[string]interface{} + // TemplatePath is the path to the template that this is being + // rendered within. + TemplatePath string + // UserVariables is the mapping of user variables that the // "user" function reads from. UserVariables map[string]string From 2b9e52e74378224105de3883003f4edd8b64262a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 14:05:13 -0700 Subject: [PATCH 181/956] template: stores the path --- template/parse.go | 8 +++++++- template/parse_test.go | 4 ++++ template/template.go | 4 ++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/template/parse.go b/template/parse.go index c19c4b4ff..a7b187f37 100644 --- a/template/parse.go +++ b/template/parse.go @@ -310,5 +310,11 @@ func ParseFile(path string) (*Template, error) { } defer f.Close() - return Parse(f) + tpl, err := Parse(f) + if err != nil { + return nil, err + } + + tpl.Path = path + return tpl, nil } diff --git a/template/parse_test.go b/template/parse_test.go index 78b4416ba..46bb75ad8 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -272,11 +272,15 @@ func TestParse(t *testing.T) { } for _, tc := range cases { + path := fixtureDir(tc.File) tpl, err := ParseFile(fixtureDir(tc.File)) if (err != nil) != tc.Err { t.Fatalf("err: %s", err) } + if tc.Result != nil { + tc.Result.Path = path + } if tpl != nil { tpl.RawContents = nil } diff --git a/template/template.go b/template/template.go index bee4e510e..a22e78b95 100644 --- a/template/template.go +++ b/template/template.go @@ -11,6 +11,10 @@ import ( // Template represents the parsed template that is used to configure // Packer builds. type Template struct { + // Path is the path to the template. This will be blank if Parse is + // used, but will be automatically populated by ParseFile. + Path string + Description string MinVersion string From 31d6dcb656898e5a8f7cd2a216804e1b222b4906 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 14:06:17 -0700 Subject: [PATCH 182/956] template: template path is the directory --- template/interpolate/funcs.go | 3 ++- template/interpolate/funcs_test.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go index abe84873f..ba3048823 100644 --- a/template/interpolate/funcs.go +++ b/template/interpolate/funcs.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "os" + "path/filepath" "strconv" "strings" "text/template" @@ -99,7 +100,7 @@ func funcGenTemplatePath(ctx *Context) interface{} { return "", errors.New("template path not available") } - return ctx.TemplatePath, nil + return filepath.Dir(ctx.TemplatePath), nil } } diff --git a/template/interpolate/funcs_test.go b/template/interpolate/funcs_test.go index d6ef3e758..9c7c639f3 100644 --- a/template/interpolate/funcs_test.go +++ b/template/interpolate/funcs_test.go @@ -128,7 +128,7 @@ func TestFuncTemplatePath(t *testing.T) { } ctx := &Context{ - TemplatePath: "foo", + TemplatePath: "foo/bar", } for _, tc := range cases { i := &I{Value: tc.Input} From 639e63fd7c070302af11424f4957768d9b438b9d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 14:29:32 -0700 Subject: [PATCH 183/956] packer: test for template path --- helper/config/decode.go | 4 ++- packer/build.go | 5 ++++ packer/core.go | 43 +++++++++++++++++------------- packer/core_test.go | 30 +++++++++++++++++++++ template/interpolate/funcs.go | 23 +++++++++------- template/interpolate/funcs_test.go | 10 ++++--- 6 files changed, 84 insertions(+), 31 deletions(-) diff --git a/helper/config/decode.go b/helper/config/decode.go index 73e470d27..0148ad27c 100644 --- a/helper/config/decode.go +++ b/helper/config/decode.go @@ -104,7 +104,8 @@ func Decode(target interface{}, config *DecodeOpts, raws ...interface{}) error { // detecting things like user variables from the raw configuration params. func DetectContext(raws ...interface{}) (*interpolate.Context, error) { var s struct { - Vars map[string]string `mapstructure:"packer_user_variables"` + TemplatePath string `mapstructure:"packer_template_path"` + Vars map[string]string `mapstructure:"packer_user_variables"` } for _, r := range raws { @@ -114,6 +115,7 @@ func DetectContext(raws ...interface{}) (*interpolate.Context, error) { } return &interpolate.Context{ + TemplatePath: s.TemplatePath, UserVariables: s.Vars, }, nil } diff --git a/packer/build.go b/packer/build.go index 987e4d339..757ef89eb 100644 --- a/packer/build.go +++ b/packer/build.go @@ -24,6 +24,9 @@ const ( // force build is enabled. ForceConfigKey = "packer_force" + // TemplatePathKey is the path to the template that configured this build + TemplatePathKey = "packer_template_path" + // This key contains a map[string]string of the user variables for // template processing. UserVariablesConfigKey = "packer_user_variables" @@ -78,6 +81,7 @@ type coreBuild struct { hooks map[string][]Hook postProcessors [][]coreBuildPostProcessor provisioners []coreBuildProvisioner + templatePath string variables map[string]string debug bool @@ -125,6 +129,7 @@ func (b *coreBuild) Prepare() (warn []string, err error) { BuilderTypeConfigKey: b.builderType, DebugConfigKey: b.debug, ForceConfigKey: b.force, + TemplatePathKey: b.templatePath, UserVariablesConfigKey: b.variables, } diff --git a/packer/core.go b/packer/core.go index d4cb9fea5..0f7886772 100644 --- a/packer/core.go +++ b/packer/core.go @@ -50,27 +50,10 @@ type ComponentFinder struct { // NewCore creates a new Core. func NewCore(c *CoreConfig) (*Core, error) { - // Go through and interpolate all the build names. We shuld be able - // to do this at this point with the variables. - builds := make(map[string]*template.Builder) - for _, b := range c.Template.Builders { - v, err := interpolate.Render(b.Name, &interpolate.Context{ - UserVariables: c.Variables, - }) - if err != nil { - return nil, fmt.Errorf( - "Error interpolating builder '%s': %s", - b.Name, err) - } - - builds[v] = b - } - result := &Core{ components: c.Components, template: c.Template, variables: c.Variables, - builds: builds, } if err := result.validate(); err != nil { return nil, err @@ -79,6 +62,20 @@ func NewCore(c *CoreConfig) (*Core, error) { return nil, err } + // Go through and interpolate all the build names. We shuld be able + // to do this at this point with the variables. + result.builds = make(map[string]*template.Builder) + for _, b := range c.Template.Builders { + v, err := interpolate.Render(b.Name, result.context()) + if err != nil { + return nil, fmt.Errorf( + "Error interpolating builder '%s': %s", + b.Name, err) + } + + result.builds[v] = b + } + return result, nil } @@ -204,6 +201,7 @@ func (c *Core) Build(n string) (Build, error) { builderType: configBuilder.Type, postProcessors: postProcessors, provisioners: provisioners, + templatePath: c.template.Path, variables: c.variables, }, nil } @@ -243,7 +241,9 @@ func (c *Core) init() error { } // Go through the variables and interpolate the environment variables - ctx := &interpolate.Context{EnableEnv: true} + ctx := c.context() + ctx.EnableEnv = true + ctx.UserVariables = nil for k, v := range c.template.Variables { // Ignore variables that are required if v.Required { @@ -268,3 +268,10 @@ func (c *Core) init() error { return nil } + +func (c *Core) context() *interpolate.Context { + return &interpolate.Context{ + TemplatePath: c.template.Path, + UserVariables: c.variables, + } +} diff --git a/packer/core_test.go b/packer/core_test.go index 9df7ca85c..cc03574ed 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -2,6 +2,7 @@ package packer import ( "os" + "path/filepath" "reflect" "testing" @@ -338,6 +339,35 @@ func TestCoreBuild_postProcess(t *testing.T) { } } +func TestCoreBuild_templatePath(t *testing.T) { + config := TestCoreConfig(t) + testCoreTemplate(t, config, fixtureDir("build-template-path.json")) + b := TestBuilder(t, config, "test") + core := TestCore(t, config) + + expected, _ := filepath.Abs("./test-fixtures") + + build, err := core.Build("test") + if err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := build.Prepare(); err != nil { + t.Fatalf("err: %s", err) + } + + // Interpolate the config + var result map[string]interface{} + err = configHelper.Decode(&result, nil, b.PrepareConfig...) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result["value"] != expected { + t.Fatalf("bad: %#v", result) + } +} + func TestCoreValidate(t *testing.T) { cases := []struct { File string diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go index ba3048823..6092707b8 100644 --- a/template/interpolate/funcs.go +++ b/template/interpolate/funcs.go @@ -24,13 +24,13 @@ func init() { // Funcs are the interpolation funcs that are available within interpolations. var FuncGens = map[string]FuncGenerator{ - "env": funcGenEnv, - "isotime": funcGenIsotime, - "pwd": funcGenPwd, - "template_path": funcGenTemplatePath, - "timestamp": funcGenTimestamp, - "uuid": funcGenUuid, - "user": funcGenUser, + "env": funcGenEnv, + "isotime": funcGenIsotime, + "pwd": funcGenPwd, + "template_dir": funcGenTemplateDir, + "timestamp": funcGenTimestamp, + "uuid": funcGenUuid, + "user": funcGenUser, "upper": funcGenPrimitive(strings.ToUpper), "lower": funcGenPrimitive(strings.ToLower), @@ -94,13 +94,18 @@ func funcGenPwd(ctx *Context) interface{} { } } -func funcGenTemplatePath(ctx *Context) interface{} { +func funcGenTemplateDir(ctx *Context) interface{} { return func() (string, error) { if ctx == nil || ctx.TemplatePath == "" { return "", errors.New("template path not available") } - return filepath.Dir(ctx.TemplatePath), nil + path, err := filepath.Abs(filepath.Dir(ctx.TemplatePath)) + if err != nil { + return "", err + } + + return path, nil } } diff --git a/template/interpolate/funcs_test.go b/template/interpolate/funcs_test.go index 9c7c639f3..ff877f13e 100644 --- a/template/interpolate/funcs_test.go +++ b/template/interpolate/funcs_test.go @@ -2,6 +2,7 @@ package interpolate import ( "os" + "path/filepath" "strconv" "testing" "time" @@ -117,18 +118,21 @@ func TestFuncPwd(t *testing.T) { } func TestFuncTemplatePath(t *testing.T) { + path := "foo/bar" + expected, _ := filepath.Abs(filepath.Dir(path)) + cases := []struct { Input string Output string }{ { - `{{template_path}}`, - `foo`, + `{{template_dir}}`, + expected, }, } ctx := &Context{ - TemplatePath: "foo/bar", + TemplatePath: path, } for _, tc := range cases { i := &I{Value: tc.Input} From 4620f33dd013860009b5febf2d0f67eae4966926 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 14:29:40 -0700 Subject: [PATCH 184/956] packer: test fixture --- packer/test-fixtures/build-template-path.json | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 packer/test-fixtures/build-template-path.json diff --git a/packer/test-fixtures/build-template-path.json b/packer/test-fixtures/build-template-path.json new file mode 100644 index 000000000..dcda0818a --- /dev/null +++ b/packer/test-fixtures/build-template-path.json @@ -0,0 +1,6 @@ +{ + "builders": [{ + "type": "test", + "value": "{{template_dir}}" + }] +} From 36c6d37f692607fd511274113514c0492f14c1e4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 14:30:36 -0700 Subject: [PATCH 185/956] website: Update docs for template_dir --- .../docs/templates/configuration-templates.html.markdown | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/website/source/docs/templates/configuration-templates.html.markdown b/website/source/docs/templates/configuration-templates.html.markdown index f5ae4a362..f940ebcc1 100644 --- a/website/source/docs/templates/configuration-templates.html.markdown +++ b/website/source/docs/templates/configuration-templates.html.markdown @@ -55,10 +55,11 @@ While some configuration settings have local variables specific to only that configuration, a set of functions are available globally for use in _any string_ in Packer templates. These are listed below for reference. -* `lower` - Lowercases the string. -* `pwd` - The working directory while executing Packer. * `isotime [FORMAT]` - UTC time, which can be [formatted](http://golang.org/pkg/time/#example_Time_Format). See more examples below. +* `lower` - Lowercases the string. +* `pwd` - The working directory while executing Packer. +* `template_dir` - The directory to the template for the build. * `timestamp` - The current Unix timestamp in UTC. * `uuid` - Returns a random UUID. * `upper` - Uppercases the string. From 048c764e05888dbf668164723a90f1c1ff96d148 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 14:34:45 -0700 Subject: [PATCH 186/956] packer: fix failing tests --- packer/build_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/packer/build_test.go b/packer/build_test.go index 4f93e03a5..b183fb95a 100644 --- a/packer/build_test.go +++ b/packer/build_test.go @@ -32,6 +32,7 @@ func testDefaultPackerConfig() map[string]interface{} { BuilderTypeConfigKey: "foo", DebugConfigKey: false, ForceConfigKey: false, + TemplatePathKey: "", UserVariablesConfigKey: make(map[string]string), } } From ebdd0d991a1d952971e1544f89561758e9b503d9 Mon Sep 17 00:00:00 2001 From: Andrew Bayer Date: Fri, 29 May 2015 14:50:11 -0700 Subject: [PATCH 187/956] Adds support for using the internal IP rather than NAT IP in GCE --- builder/googlecompute/config.go | 1 + builder/googlecompute/config_test.go | 15 ++++++ builder/googlecompute/driver.go | 3 ++ builder/googlecompute/driver_gce.go | 17 ++++++- builder/googlecompute/driver_mock.go | 11 +++++ builder/googlecompute/ssh.go | 2 +- builder/googlecompute/step_instance_info.go | 48 +++++++++++++------ .../googlecompute/step_instance_info_test.go | 40 ++++++++++++++++ 8 files changed, 120 insertions(+), 17 deletions(-) diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go index 743e4745c..e0e6624fb 100644 --- a/builder/googlecompute/config.go +++ b/builder/googlecompute/config.go @@ -36,6 +36,7 @@ type Config struct { RawSSHTimeout string `mapstructure:"ssh_timeout"` RawStateTimeout string `mapstructure:"state_timeout"` Tags []string `mapstructure:"tags"` + UseInternalIP bool `mapstructure:"use_internal_ip"` Zone string `mapstructure:"zone"` account accountFile diff --git a/builder/googlecompute/config_test.go b/builder/googlecompute/config_test.go index 4a7a1ed67..c28c35a0f 100644 --- a/builder/googlecompute/config_test.go +++ b/builder/googlecompute/config_test.go @@ -116,6 +116,21 @@ func TestConfigPrepare(t *testing.T) { "5s", false, }, + { + "use_internal_ip", + nil, + false, + }, + { + "use_internal_ip", + false, + false, + }, + { + "use_internal_ip", + "SO VERY BAD", + true, + }, } for _, tc := range cases { diff --git a/builder/googlecompute/driver.go b/builder/googlecompute/driver.go index 6f035c562..be697fe6b 100644 --- a/builder/googlecompute/driver.go +++ b/builder/googlecompute/driver.go @@ -24,6 +24,9 @@ type Driver interface { // GetNatIP gets the NAT IP address for the instance. GetNatIP(zone, name string) (string, error) + // GetInternalIP gets the GCE-internal IP address for the instance. + GetInternalIP(zone, name string) (string, error) + // RunInstance takes the given config and launches an instance. RunInstance(*InstanceConfig) (<-chan error, error) diff --git a/builder/googlecompute/driver_gce.go b/builder/googlecompute/driver_gce.go index b9ed4693e..f52ee6321 100644 --- a/builder/googlecompute/driver_gce.go +++ b/builder/googlecompute/driver_gce.go @@ -157,7 +157,6 @@ func (d *driverGCE) GetNatIP(zone, name string) (string, error) { if ni.AccessConfigs == nil { continue } - for _, ac := range ni.AccessConfigs { if ac.NatIP != "" { return ac.NatIP, nil @@ -168,6 +167,22 @@ func (d *driverGCE) GetNatIP(zone, name string) (string, error) { return "", nil } +func (d *driverGCE) GetInternalIP(zone, name string) (string, error) { + instance, err := d.service.Instances.Get(d.projectId, zone, name).Do() + if err != nil { + return "", err + } + + for _, ni := range instance.NetworkInterfaces { + if ni.NetworkIP == "" { + continue + } + return ni.NetworkIP, nil + } + + return "", nil +} + func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) { // Get the zone d.ui.Message(fmt.Sprintf("Loading zone: %s", c.Zone)) diff --git a/builder/googlecompute/driver_mock.go b/builder/googlecompute/driver_mock.go index 1a9c03ae9..196aa1d81 100644 --- a/builder/googlecompute/driver_mock.go +++ b/builder/googlecompute/driver_mock.go @@ -30,6 +30,11 @@ type DriverMock struct { GetNatIPResult string GetNatIPErr error + GetInternalIPZone string + GetInternalIPName string + GetInternalIPResult string + GetInternalIPErr error + RunInstanceConfig *InstanceConfig RunInstanceErrCh <-chan error RunInstanceErr error @@ -108,6 +113,12 @@ func (d *DriverMock) GetNatIP(zone, name string) (string, error) { return d.GetNatIPResult, d.GetNatIPErr } +func (d *DriverMock) GetInternalIP(zone, name string) (string, error) { + d.GetInternalIPZone = zone + d.GetInternalIPName = name + return d.GetInternalIPResult, d.GetInternalIPErr +} + func (d *DriverMock) RunInstance(c *InstanceConfig) (<-chan error, error) { d.RunInstanceConfig = c diff --git a/builder/googlecompute/ssh.go b/builder/googlecompute/ssh.go index a4e0151f4..e04029e44 100644 --- a/builder/googlecompute/ssh.go +++ b/builder/googlecompute/ssh.go @@ -1,9 +1,9 @@ package googlecompute import ( - "golang.org/x/crypto/ssh" "fmt" "github.com/mitchellh/multistep" + "golang.org/x/crypto/ssh" ) // sshAddress returns the ssh address. diff --git a/builder/googlecompute/step_instance_info.go b/builder/googlecompute/step_instance_info.go index b79e7c042..92f382f06 100644 --- a/builder/googlecompute/step_instance_info.go +++ b/builder/googlecompute/step_instance_info.go @@ -40,23 +40,41 @@ func (s *StepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - ip, err := driver.GetNatIP(config.Zone, instanceName) - if err != nil { - err := fmt.Errorf("Error retrieving instance nat ip address: %s", err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } - - if s.Debug { - if ip != "" { - ui.Message(fmt.Sprintf("Public IP: %s", ip)) + if config.UseInternalIP { + ip, err := driver.GetInternalIP(config.Zone, instanceName) + if err != nil { + err := fmt.Errorf("Error retrieving instance internal ip address: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt } - } - ui.Message(fmt.Sprintf("IP: %s", ip)) - state.Put("instance_ip", ip) - return multistep.ActionContinue + if s.Debug { + if ip != "" { + ui.Message(fmt.Sprintf("Internal IP: %s", ip)) + } + } + ui.Message(fmt.Sprintf("IP: %s", ip)) + state.Put("instance_ip", ip) + return multistep.ActionContinue + } else { + ip, err := driver.GetNatIP(config.Zone, instanceName) + if err != nil { + err := fmt.Errorf("Error retrieving instance nat ip address: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + if s.Debug { + if ip != "" { + ui.Message(fmt.Sprintf("Public IP: %s", ip)) + } + } + ui.Message(fmt.Sprintf("IP: %s", ip)) + state.Put("instance_ip", ip) + return multistep.ActionContinue + } } // Cleanup. diff --git a/builder/googlecompute/step_instance_info_test.go b/builder/googlecompute/step_instance_info_test.go index 8566ce722..5b6c01d0a 100644 --- a/builder/googlecompute/step_instance_info_test.go +++ b/builder/googlecompute/step_instance_info_test.go @@ -49,6 +49,46 @@ func TestStepInstanceInfo(t *testing.T) { } } +func TestStepInstanceInfo_InternalIP(t *testing.T) { + state := testState(t) + step := new(StepInstanceInfo) + defer step.Cleanup(state) + + state.Put("instance_name", "foo") + + config := state.Get("config").(*Config) + config.UseInternalIP = true + driver := state.Get("driver").(*DriverMock) + driver.GetNatIPResult = "1.2.3.4" + driver.GetInternalIPResult = "5.6.7.8" + + // run the step + if action := step.Run(state); action != multistep.ActionContinue { + t.Fatalf("bad action: %#v", action) + } + + // Verify state + if driver.WaitForInstanceState != "RUNNING" { + t.Fatalf("bad: %#v", driver.WaitForInstanceState) + } + if driver.WaitForInstanceZone != config.Zone { + t.Fatalf("bad: %#v", driver.WaitForInstanceZone) + } + if driver.WaitForInstanceName != "foo" { + t.Fatalf("bad: %#v", driver.WaitForInstanceName) + } + + ipRaw, ok := state.GetOk("instance_ip") + if !ok { + t.Fatal("should have ip") + } + if ip, ok := ipRaw.(string); !ok { + t.Fatal("ip is not a string") + } else if ip != "5.6.7.8" { + t.Fatalf("bad ip: %s", ip) + } +} + func TestStepInstanceInfo_getNatIPError(t *testing.T) { state := testState(t) step := new(StepInstanceInfo) From 3946346614f5d6898ede3b31c01049d6220db537 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 15:16:08 -0700 Subject: [PATCH 188/956] update CHANGELOG --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 783f8c49e..98a0d6c3a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,9 @@ FEATURES: + * **New config function: `template_dir`**: The directory to the template + being built. This should be used for template-relative paths. [GH-54] + IMPROVEMENTS: * builder/openstack: Add `rackconnect_wait` for Rackspace customers to wait for From 579264bb5b8060f0d1fa110e1683914850109914 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 15:35:55 -0700 Subject: [PATCH 189/956] command/push: interpolate --- command/command_test.go | 1 + command/push.go | 42 +++++++++++++------ command/push_test.go | 29 +++++++++++++ command/test-fixtures/push-vars/template.json | 11 +++++ packer/core.go | 19 +++++---- template/interpolate/render_test.go | 41 ++++++++++++++++++ 6 files changed, 122 insertions(+), 21 deletions(-) create mode 100644 command/test-fixtures/push-vars/template.json diff --git a/command/command_test.go b/command/command_test.go index 49e0c7276..126897810 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -28,6 +28,7 @@ func testMeta(t *testing.T) Meta { var out, err bytes.Buffer return Meta{ + CoreConfig: packer.TestCoreConfig(t), Ui: &packer.BasicUi{ Writer: &out, ErrorWriter: &err, diff --git a/command/push.go b/command/push.go index 297de804a..95e50597f 100644 --- a/command/push.go +++ b/command/push.go @@ -1,7 +1,6 @@ package command import ( - "flag" "fmt" "io" "os" @@ -12,6 +11,7 @@ import ( "github.com/hashicorp/atlas-go/archive" "github.com/hashicorp/atlas-go/v1" "github.com/mitchellh/packer/template" + "github.com/mitchellh/packer/template/interpolate" ) // archiveTemplateEntry is the name the template always takes within the slug. @@ -37,7 +37,7 @@ func (c *PushCommand) Run(args []string) int { var name string var create bool - f := flag.NewFlagSet("push", flag.ContinueOnError) + f := c.Meta.FlagSet("push", FlagSetVars) f.Usage = func() { c.Ui.Error(c.Help()) } f.StringVar(&token, "token", "", "token") f.StringVar(&message, "m", "", "message") @@ -67,9 +67,23 @@ func (c *PushCommand) Run(args []string) int { return 1 } + // Get the core + core, err := c.Meta.Core(tpl) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + push := tpl.Push + pushRaw, err := interpolate.RenderInterface(&push, core.Context()) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + push = *pushRaw.(*template.Push) + // If we didn't pass name from the CLI, use the template if name == "" { - name = tpl.Push.Name + name = push.Name } // Validate some things @@ -83,14 +97,14 @@ func (c *PushCommand) Run(args []string) int { // Determine our token if token == "" { - token = tpl.Push.Token + token = push.Token } // Build our client defer func() { c.client = nil }() c.client = atlas.DefaultClient() - if tpl.Push.Address != "" { - c.client, err = atlas.NewClient(tpl.Push.Address) + if push.Address != "" { + c.client, err = atlas.NewClient(push.Address) if err != nil { c.Ui.Error(fmt.Sprintf( "Error setting up API client: %s", err)) @@ -103,9 +117,9 @@ func (c *PushCommand) Run(args []string) int { // Build the archiving options var opts archive.ArchiveOpts - opts.Include = tpl.Push.Include - opts.Exclude = tpl.Push.Exclude - opts.VCS = tpl.Push.VCS + opts.Include = push.Include + opts.Exclude = push.Exclude + opts.VCS = push.VCS opts.Extra = map[string]string{ archiveTemplateEntry: args[0], } @@ -120,7 +134,7 @@ func (c *PushCommand) Run(args []string) int { // 3.) BaseDir is relative, so we use the path relative to the directory // of the template. // - path := tpl.Push.BaseDir + path := push.BaseDir if path == "" || !filepath.IsAbs(path) { tplPath, err := filepath.Abs(args[0]) if err != nil { @@ -150,7 +164,7 @@ func (c *PushCommand) Run(args []string) int { // Build the upload options var uploadOpts uploadOpts - uploadOpts.Slug = tpl.Push.Name + uploadOpts.Slug = push.Name uploadOpts.Builds = make(map[string]*uploadBuildInfo) for _, b := range tpl.Builders { info := &uploadBuildInfo{Type: b.Type} @@ -229,7 +243,7 @@ func (c *PushCommand) Run(args []string) int { return 1 } - c.Ui.Say(fmt.Sprintf("Push successful to '%s'", tpl.Push.Name)) + c.Ui.Say(fmt.Sprintf("Push successful to '%s'", push.Name)) return 0 } @@ -257,6 +271,10 @@ Options: "username/name". -token= The access token to use to when uploading + + -var 'key=value' Variable for templates, can be used multiple times. + + -var-file=path JSON file containing user variables. ` return strings.TrimSpace(helpText) diff --git a/command/push_test.go b/command/push_test.go index 322637049..f1b7fd306 100644 --- a/command/push_test.go +++ b/command/push_test.go @@ -190,6 +190,35 @@ func TestPush_uploadErrorCh(t *testing.T) { } } +func TestPush_vars(t *testing.T) { + var actualOpts *uploadOpts + uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) { + actualOpts = opts + + doneCh := make(chan struct{}) + close(doneCh) + return doneCh, nil, nil + } + + c := &PushCommand{ + Meta: testMeta(t), + uploadFn: uploadFn, + } + + args := []string{ + "-var", "name=foo/bar", + filepath.Join(testFixture("push-vars"), "template.json"), + } + if code := c.Run(args); code != 0 { + fatalCommand(t, c.Meta) + } + + expected := "foo/bar" + if actualOpts.Slug != expected { + t.Fatalf("bad: %#v", actualOpts.Slug) + } +} + func testArchive(t *testing.T, r io.Reader) []string { // Finish the archiving process in-memory var buf bytes.Buffer diff --git a/command/test-fixtures/push-vars/template.json b/command/test-fixtures/push-vars/template.json new file mode 100644 index 000000000..3085062ae --- /dev/null +++ b/command/test-fixtures/push-vars/template.json @@ -0,0 +1,11 @@ +{ + "variables": { + "name": null + }, + + "builders": [{"type": "dummy"}], + + "push": { + "name": "{{user `name`}}" + } +} diff --git a/packer/core.go b/packer/core.go index 0f7886772..bdf9cb65a 100644 --- a/packer/core.go +++ b/packer/core.go @@ -66,7 +66,7 @@ func NewCore(c *CoreConfig) (*Core, error) { // to do this at this point with the variables. result.builds = make(map[string]*template.Builder) for _, b := range c.Template.Builders { - v, err := interpolate.Render(b.Name, result.context()) + v, err := interpolate.Render(b.Name, result.Context()) if err != nil { return nil, fmt.Errorf( "Error interpolating builder '%s': %s", @@ -206,6 +206,14 @@ func (c *Core) Build(n string) (Build, error) { }, nil } +// Context returns an interpolation context. +func (c *Core) Context() *interpolate.Context { + return &interpolate.Context{ + TemplatePath: c.template.Path, + UserVariables: c.variables, + } +} + // validate does a full validation of the template. // // This will automatically call template.validate() in addition to doing @@ -241,7 +249,7 @@ func (c *Core) init() error { } // Go through the variables and interpolate the environment variables - ctx := c.context() + ctx := c.Context() ctx.EnableEnv = true ctx.UserVariables = nil for k, v := range c.template.Variables { @@ -268,10 +276,3 @@ func (c *Core) init() error { return nil } - -func (c *Core) context() *interpolate.Context { - return &interpolate.Context{ - TemplatePath: c.template.Path, - UserVariables: c.variables, - } -} diff --git a/template/interpolate/render_test.go b/template/interpolate/render_test.go index 60a88f6fb..7156c344c 100644 --- a/template/interpolate/render_test.go +++ b/template/interpolate/render_test.go @@ -5,6 +5,47 @@ import ( "testing" ) +func TestRenderInterface(t *testing.T) { + type Test struct { + Foo string + } + + cases := map[string]struct { + Input interface{} + Output interface{} + }{ + "basic": { + map[string]interface{}{ + "foo": "{{upper `bar`}}", + }, + map[string]interface{}{ + "foo": "BAR", + }, + }, + + "struct": { + &Test{ + Foo: "{{upper `bar`}}", + }, + &Test{ + Foo: "BAR", + }, + }, + } + + ctx := &Context{} + for k, tc := range cases { + actual, err := RenderInterface(tc.Input, ctx) + if err != nil { + t.Fatalf("err: %s\n\n%s", k, err) + } + + if !reflect.DeepEqual(actual, tc.Output) { + t.Fatalf("err: %s\n\n%#v\n\n%#v", k, actual, tc.Output) + } + } +} + func TestRenderMap(t *testing.T) { cases := map[string]struct { Input interface{} From 1b775cca2e818cf5afb13e28279724035361774b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 15:41:52 -0700 Subject: [PATCH 190/956] packer: core interpolates Push --- command/push.go | 9 +------- packer/core.go | 28 ++++++++++++++---------- packer/core_test.go | 34 +++++++++++++++++++++++++++++ packer/test-fixtures/push-vars.json | 11 ++++++++++ 4 files changed, 63 insertions(+), 19 deletions(-) create mode 100644 packer/test-fixtures/push-vars.json diff --git a/command/push.go b/command/push.go index 95e50597f..6c04917dd 100644 --- a/command/push.go +++ b/command/push.go @@ -11,7 +11,6 @@ import ( "github.com/hashicorp/atlas-go/archive" "github.com/hashicorp/atlas-go/v1" "github.com/mitchellh/packer/template" - "github.com/mitchellh/packer/template/interpolate" ) // archiveTemplateEntry is the name the template always takes within the slug. @@ -73,13 +72,7 @@ func (c *PushCommand) Run(args []string) int { c.Ui.Error(err.Error()) return 1 } - push := tpl.Push - pushRaw, err := interpolate.RenderInterface(&push, core.Context()) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - push = *pushRaw.(*template.Push) + push := core.Template.Push // If we didn't pass name from the CLI, use the template if name == "" { diff --git a/packer/core.go b/packer/core.go index bdf9cb65a..3bc5d295e 100644 --- a/packer/core.go +++ b/packer/core.go @@ -12,8 +12,9 @@ import ( // Core is the main executor of Packer. If Packer is being used as a // library, this is the struct you'll want to instantiate to get anything done. type Core struct { + Template *template.Template + components ComponentFinder - template *template.Template variables map[string]string builds map[string]*template.Builder } @@ -51,8 +52,8 @@ type ComponentFinder struct { // NewCore creates a new Core. func NewCore(c *CoreConfig) (*Core, error) { result := &Core{ + Template: c.Template, components: c.Components, - template: c.Template, variables: c.Variables, } if err := result.validate(); err != nil { @@ -112,8 +113,8 @@ func (c *Core) Build(n string) (Build, error) { rawName := configBuilder.Name // Setup the provisioners for this build - provisioners := make([]coreBuildProvisioner, 0, len(c.template.Provisioners)) - for _, rawP := range c.template.Provisioners { + provisioners := make([]coreBuildProvisioner, 0, len(c.Template.Provisioners)) + for _, rawP := range c.Template.Provisioners { // If we're skipping this, then ignore it if rawP.Skip(rawName) { continue @@ -155,8 +156,8 @@ func (c *Core) Build(n string) (Build, error) { } // Setup the post-processors - postProcessors := make([][]coreBuildPostProcessor, 0, len(c.template.PostProcessors)) - for _, rawPs := range c.template.PostProcessors { + postProcessors := make([][]coreBuildPostProcessor, 0, len(c.Template.PostProcessors)) + for _, rawPs := range c.Template.PostProcessors { current := make([]coreBuildPostProcessor, 0, len(rawPs)) for _, rawP := range rawPs { // If we skip, ignore @@ -201,7 +202,7 @@ func (c *Core) Build(n string) (Build, error) { builderType: configBuilder.Type, postProcessors: postProcessors, provisioners: provisioners, - templatePath: c.template.Path, + templatePath: c.Template.Path, variables: c.variables, }, nil } @@ -209,7 +210,7 @@ func (c *Core) Build(n string) (Build, error) { // Context returns an interpolation context. func (c *Core) Context() *interpolate.Context { return &interpolate.Context{ - TemplatePath: c.template.Path, + TemplatePath: c.Template.Path, UserVariables: c.variables, } } @@ -221,13 +222,13 @@ func (c *Core) Context() *interpolate.Context { func (c *Core) validate() error { // First validate the template in general, we can't do anything else // unless the template itself is valid. - if err := c.template.Validate(); err != nil { + if err := c.Template.Validate(); err != nil { return err } // Validate variables are set var err error - for n, v := range c.template.Variables { + for n, v := range c.Template.Variables { if v.Required { if _, ok := c.variables[n]; !ok { err = multierror.Append(err, fmt.Errorf( @@ -252,7 +253,7 @@ func (c *Core) init() error { ctx := c.Context() ctx.EnableEnv = true ctx.UserVariables = nil - for k, v := range c.template.Variables { + for k, v := range c.Template.Variables { // Ignore variables that are required if v.Required { continue @@ -274,5 +275,10 @@ func (c *Core) init() error { c.variables[k] = def } + // Interpolate the push configuration + if _, err := interpolate.RenderInterface(&c.Template.Push, c.Context()); err != nil { + return fmt.Errorf("Error interpolating 'push': %s", err) + } + return nil } diff --git a/packer/core_test.go b/packer/core_test.go index cc03574ed..f11242d0c 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -368,6 +368,40 @@ func TestCoreBuild_templatePath(t *testing.T) { } } +func TestCore_pushInterpolate(t *testing.T) { + cases := []struct { + File string + Vars map[string]string + Result template.Push + }{ + { + "push-vars.json", + map[string]string{"foo": "bar"}, + template.Push{Name: "bar"}, + }, + } + + for _, tc := range cases { + tpl, err := template.ParseFile(fixtureDir(tc.File)) + if err != nil { + t.Fatalf("err: %s\n\n%s", tc.File, err) + } + + core, err := NewCore(&CoreConfig{ + Template: tpl, + Variables: tc.Vars, + }) + if err != nil { + t.Fatalf("err: %s\n\n%s", tc.File, err) + } + + expected := core.Template.Push + if !reflect.DeepEqual(expected, tc.Result) { + t.Fatalf("err: %s\n\n%#v", tc.File, expected) + } + } +} + func TestCoreValidate(t *testing.T) { cases := []struct { File string diff --git a/packer/test-fixtures/push-vars.json b/packer/test-fixtures/push-vars.json new file mode 100644 index 000000000..b5f518100 --- /dev/null +++ b/packer/test-fixtures/push-vars.json @@ -0,0 +1,11 @@ +{ + "variables": { + "foo": null + }, + + "builders": [{"type": "test"}], + + "push": { + "name": "{{user `foo`}}" + } +} From b0c63ce88aeab4509222672466c38b059fc88812 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 16:03:02 -0700 Subject: [PATCH 191/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 98a0d6c3a..8b0e62bb9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ IMPROVEMENTS: * buidler/openstakc: Add `ssh_interface` option for rackconnect for users that have prohibitive firewalls * command/push: Add `-name` flag for specifying name from CLI [GH-2042] + * command/push: Push configuration in templates supports variables [GH-1861] BUG FIXES: From f1aad91f262a85f9f1630c13738a9084c8b6dc6c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 16:05:36 -0700 Subject: [PATCH 192/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b0e62bb9..27d400170 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ IMPROVEMENTS: have prohibitive firewalls * command/push: Add `-name` flag for specifying name from CLI [GH-2042] * command/push: Push configuration in templates supports variables [GH-1861] + * post-processor/docker-tag: Support `force` option [GH-2055] BUG FIXES: From c49fe672b3c38f6062c28f6477e4313927daac68 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 16:09:37 -0700 Subject: [PATCH 193/956] command/validate: bail if can't initialize build [GH-2139] --- command/validate.go | 1 + 1 file changed, 1 insertion(+) diff --git a/command/validate.go b/command/validate.go index 5d7e16c5d..c8f333fcf 100644 --- a/command/validate.go +++ b/command/validate.go @@ -60,6 +60,7 @@ func (c *ValidateCommand) Run(args []string) int { c.Ui.Error(fmt.Sprintf( "Failed to initialize build '%s': %s", n, err)) + return 1 } builds = append(builds, b) From 911a868ac57fae7937b76fbaacce3927b6fdb4b3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 16:10:03 -0700 Subject: [PATCH 194/956] update cHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 27d400170..d935fc24d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ BUG FIXES: * builder/virtualbox: Added SCSI support * builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989] * builder/vmware: More robust IP parsing from ifconfig output [GH-1999] + * command/validate: don't crash for invalid builds [GH-2139] * post-processor/atlas: Find common archive prefix for Windows [GH-1874] * post-processor/vagrant-cloud: Fixed failing on response * post-processor/vagrant-cloud: Don't delete version on error [GH-2014] From 819986d19fdfc803b6abcf7b845e7bd3c1418faa Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 16:24:29 -0700 Subject: [PATCH 195/956] builder/docker: validate export path is not a dir [GH-2105] --- CHANGELOG.md | 1 + builder/docker/config.go | 8 ++++++++ builder/docker/config_test.go | 13 +++++++++++++ 3 files changed, 22 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d935fc24d..d3f7473ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ BUG FIXES: * builder/docker: Use `docker exec` for newer versions of Docker for running scripts [GH-1993] * builder/docker: Fix crash that could occur at certain timed ctrl-c [GH-1838] + * builder/docker: validate that `export_path` is not a directory [GH-2105] * builder/qemu: Add `disk_discard` option [GH-2120] * builder/virtualbox: Added SCSI support * builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989] diff --git a/builder/docker/config.go b/builder/docker/config.go index 4fc6f762a..024b915af 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -2,6 +2,7 @@ package docker import ( "fmt" + "os" "github.com/mitchellh/mapstructure" "github.com/mitchellh/packer/common" @@ -79,6 +80,13 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { fmt.Errorf("both commit and export_path cannot be set")) } + if c.ExportPath != "" { + if fi, err := os.Stat(c.ExportPath); err == nil && fi.IsDir() { + errs = packer.MultiErrorAppend(errs, fmt.Errorf( + "export_path must be a file, not a directory")) + } + } + if errs != nil && len(errs.Errors) > 0 { return nil, nil, errs } diff --git a/builder/docker/config_test.go b/builder/docker/config_test.go index 3f535b9e9..907222b4f 100644 --- a/builder/docker/config_test.go +++ b/builder/docker/config_test.go @@ -1,6 +1,8 @@ package docker import ( + "io/ioutil" + "os" "testing" ) @@ -42,6 +44,12 @@ func testConfigOk(t *testing.T, warns []string, err error) { } func TestConfigPrepare_exportPath(t *testing.T) { + td, err := ioutil.TempDir("", "packer") + if err != nil { + t.Fatalf("err: %s", err) + } + defer os.RemoveAll(td) + raw := testConfig() // No export path @@ -53,6 +61,11 @@ func TestConfigPrepare_exportPath(t *testing.T) { raw["export_path"] = "good" _, warns, errs = NewConfig(raw) testConfigOk(t, warns, errs) + + // Bad export path (directory) + raw["export_path"] = td + _, warns, errs = NewConfig(raw) + testConfigErr(t, warns, errs) } func TestConfigPrepare_exportPathAndCommit(t *testing.T) { From 6109c511156b071a94c360cddfa19743c3e20fbf Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 16:35:41 -0700 Subject: [PATCH 196/956] post-processor/docker-tag: fix failing test --- post-processor/docker-tag/post-processor_test.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/post-processor/docker-tag/post-processor_test.go b/post-processor/docker-tag/post-processor_test.go index 1501a3e15..1c0221c1f 100644 --- a/post-processor/docker-tag/post-processor_test.go +++ b/post-processor/docker-tag/post-processor_test.go @@ -76,11 +76,10 @@ func TestPostProcessor_PostProcess(t *testing.T) { func TestPostProcessor_PostProcess_Force(t *testing.T) { driver := &docker.MockDriver{} p := &PostProcessor{Driver: driver} - config := testConfig() - config["force"] = true - _, err := common.DecodeConfig(&p.config, config) - if err != nil { - t.Fatalf("err %s", err) + c := testConfig() + c["force"] = true + if err := p.Configure(c); err != nil { + t.Fatalf("err: %s", err) } artifact := &packer.MockArtifact{ From 3f636ef7f358f217d4fe07acb9583b6ba9287317 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 17:04:11 -0700 Subject: [PATCH 197/956] vmware/vmx: clarify messaging for source path required error --- builder/vmware/vmx/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/vmware/vmx/config.go b/builder/vmware/vmx/config.go index bf21f2e54..9010d4b58 100644 --- a/builder/vmware/vmx/config.go +++ b/builder/vmware/vmx/config.go @@ -63,7 +63,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { errs = packer.MultiErrorAppend(errs, c.VMXConfig.Prepare(&c.ctx)...) if c.SourcePath == "" { - errs = packer.MultiErrorAppend(errs, fmt.Errorf("source_path is required")) + errs = packer.MultiErrorAppend(errs, fmt.Errorf("source_path is blank, but is required")) } else { if _, err := os.Stat(c.SourcePath); err != nil { errs = packer.MultiErrorAppend(errs, From edf3415c6e2248c8096f9c16cbad5022b73d6f21 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 29 May 2015 17:10:14 -0700 Subject: [PATCH 198/956] builder/amazon: delete physical private key for debug mode [GH-1801] --- CHANGELOG.md | 2 ++ builder/amazon/common/step_key_pair.go | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d3f7473ed..d42ea0bdb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,8 @@ BUG FIXES: consistency. [GH-2129] * builder/amazon: If no AZ is specified, use AZ chosen automatically by AWS for spot instance. [GH-2017] + * builder/amazon: Private key file (only available in debug mode) + is deleted on cleanup. [GH-1801] * builder/amazon/chroot: Retry waiting for disk attachments [GH-2046] * builder/amazon/instance: Use `-i` in sudo commands so PATH is inherited. [GH-1930] * builder/digitalocean: Ignore invalid fields from the ever-changing v2 API diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index 5082d7b26..0b41d6bea 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -91,10 +91,19 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) { ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) + // Remove the keypair ui.Say("Deleting temporary keypair...") _, err := ec2conn.DeleteKeyPair(&ec2.DeleteKeyPairInput{KeyName: &s.keyName}) if err != nil { ui.Error(fmt.Sprintf( "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) } + + // Also remove the physical key if we're debugging. + if s.Debug { + if err := os.Remove(s.DebugKeyPath); err != nil { + ui.Error(fmt.Sprintf( + "Error removing debug key '%s': %s", s.DebugKeyPath, err)) + } + } } From 295f7a2ebd3cecfb022d1546b582f5f309d7ee81 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 1 Jun 2015 15:40:43 -0700 Subject: [PATCH 199/956] Fail the build when govet returns non-zero exit code --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index d440fd501..0574cbb5c 100644 --- a/Makefile +++ b/Makefile @@ -41,6 +41,7 @@ vet: echo ""; \ echo "Vet found suspicious constructs. Please check the reported constructs"; \ echo "and fix them if necessary before submitting the code for reviewal."; \ + exit 1; \ fi .PHONY: bin default generate test testacc updatedeps vet From 33ca8b7fb54be30cf7c6c8610aad34ed79fdb322 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Wed, 3 Jun 2015 17:13:52 -0400 Subject: [PATCH 200/956] Migrate to new AWS repo --- builder/amazon/chroot/builder.go | 2 +- builder/amazon/chroot/step_attach_volume.go | 2 +- builder/amazon/chroot/step_check_root_device.go | 2 +- builder/amazon/chroot/step_create_volume.go | 2 +- builder/amazon/chroot/step_instance_info.go | 2 +- builder/amazon/chroot/step_mount_device.go | 2 +- builder/amazon/chroot/step_register_ami.go | 4 ++-- builder/amazon/chroot/step_register_ami_test.go | 4 ++-- builder/amazon/chroot/step_snapshot.go | 2 +- builder/amazon/common/access_config.go | 4 ++-- builder/amazon/common/artifact.go | 4 ++-- builder/amazon/common/block_device.go | 4 ++-- builder/amazon/common/block_device_test.go | 4 ++-- builder/amazon/common/ssh.go | 2 +- builder/amazon/common/state.go | 4 ++-- builder/amazon/common/step_ami_region_copy.go | 2 +- builder/amazon/common/step_create_tags.go | 4 ++-- builder/amazon/common/step_key_pair.go | 2 +- builder/amazon/common/step_modify_ami_attributes.go | 4 ++-- builder/amazon/common/step_run_source_instance.go | 4 ++-- builder/amazon/common/step_security_group.go | 4 ++-- builder/amazon/common/step_source_ami_info.go | 2 +- builder/amazon/ebs/builder.go | 2 +- builder/amazon/ebs/step_create_ami.go | 2 +- builder/amazon/ebs/step_modify_instance.go | 2 +- builder/amazon/ebs/step_stop_instance.go | 2 +- builder/amazon/instance/builder.go | 2 +- builder/amazon/instance/step_bundle_volume.go | 2 +- builder/amazon/instance/step_register_ami.go | 2 +- 29 files changed, 40 insertions(+), 40 deletions(-) diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index f5ba97f40..66650b01e 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -9,7 +9,7 @@ import ( "log" "runtime" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/common" diff --git a/builder/amazon/chroot/step_attach_volume.go b/builder/amazon/chroot/step_attach_volume.go index b305046f1..c450d3b02 100644 --- a/builder/amazon/chroot/step_attach_volume.go +++ b/builder/amazon/chroot/step_attach_volume.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" diff --git a/builder/amazon/chroot/step_check_root_device.go b/builder/amazon/chroot/step_check_root_device.go index 49a83178a..29df2933d 100644 --- a/builder/amazon/chroot/step_check_root_device.go +++ b/builder/amazon/chroot/step_check_root_device.go @@ -3,7 +3,7 @@ package chroot import ( "fmt" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) diff --git a/builder/amazon/chroot/step_create_volume.go b/builder/amazon/chroot/step_create_volume.go index d1d12d65b..2d7205e8d 100644 --- a/builder/amazon/chroot/step_create_volume.go +++ b/builder/amazon/chroot/step_create_volume.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" diff --git a/builder/amazon/chroot/step_instance_info.go b/builder/amazon/chroot/step_instance_info.go index 23191c54d..ee8dbb3e6 100644 --- a/builder/amazon/chroot/step_instance_info.go +++ b/builder/amazon/chroot/step_instance_info.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" diff --git a/builder/amazon/chroot/step_mount_device.go b/builder/amazon/chroot/step_mount_device.go index e1b42ec13..0e3cdad52 100644 --- a/builder/amazon/chroot/step_mount_device.go +++ b/builder/amazon/chroot/step_mount_device.go @@ -7,7 +7,7 @@ import ( "os" "path/filepath" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" diff --git a/builder/amazon/chroot/step_register_ami.go b/builder/amazon/chroot/step_register_ami.go index 25b87592a..ee2cf48e4 100644 --- a/builder/amazon/chroot/step_register_ami.go +++ b/builder/amazon/chroot/step_register_ami.go @@ -3,8 +3,8 @@ package chroot import ( "fmt" - "github.com/awslabs/aws-sdk-go/aws" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" diff --git a/builder/amazon/chroot/step_register_ami_test.go b/builder/amazon/chroot/step_register_ami_test.go index 9d44ba684..ac473b302 100644 --- a/builder/amazon/chroot/step_register_ami_test.go +++ b/builder/amazon/chroot/step_register_ami_test.go @@ -3,8 +3,8 @@ package chroot import ( "testing" - "github.com/awslabs/aws-sdk-go/aws" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" ) func testImage() ec2.Image { diff --git a/builder/amazon/chroot/step_snapshot.go b/builder/amazon/chroot/step_snapshot.go index e798a3a3e..b98e10861 100644 --- a/builder/amazon/chroot/step_snapshot.go +++ b/builder/amazon/chroot/step_snapshot.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go index 16d5041f3..4479e0181 100644 --- a/builder/amazon/common/access_config.go +++ b/builder/amazon/common/access_config.go @@ -7,8 +7,8 @@ import ( "strings" "unicode" - "github.com/awslabs/aws-sdk-go/aws" - "github.com/awslabs/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/mitchellh/packer/template/interpolate" ) diff --git a/builder/amazon/common/artifact.go b/builder/amazon/common/artifact.go index 89f3d9fa2..7b2537072 100644 --- a/builder/amazon/common/artifact.go +++ b/builder/amazon/common/artifact.go @@ -6,8 +6,8 @@ import ( "sort" "strings" - "github.com/awslabs/aws-sdk-go/aws" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/packer/packer" ) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index 9bd8344a3..14add3276 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -1,8 +1,8 @@ package common import ( - "github.com/awslabs/aws-sdk-go/aws" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/packer/template/interpolate" ) diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index a4c1dbb79..c4f644f67 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -4,8 +4,8 @@ import ( "reflect" "testing" - "github.com/awslabs/aws-sdk-go/aws" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" ) func TestBlockDevice(t *testing.T) { diff --git a/builder/amazon/common/ssh.go b/builder/amazon/common/ssh.go index 811215572..302a90beb 100644 --- a/builder/amazon/common/ssh.go +++ b/builder/amazon/common/ssh.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "golang.org/x/crypto/ssh" ) diff --git a/builder/amazon/common/state.go b/builder/amazon/common/state.go index 5f79f1968..390568cbf 100644 --- a/builder/amazon/common/state.go +++ b/builder/amazon/common/state.go @@ -9,8 +9,8 @@ import ( "strconv" "time" - "github.com/awslabs/aws-sdk-go/aws/awserr" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" ) diff --git a/builder/amazon/common/step_ami_region_copy.go b/builder/amazon/common/step_ami_region_copy.go index 33d67c655..591dcbdf0 100644 --- a/builder/amazon/common/step_ami_region_copy.go +++ b/builder/amazon/common/step_ami_region_copy.go @@ -5,7 +5,7 @@ import ( "sync" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" diff --git a/builder/amazon/common/step_create_tags.go b/builder/amazon/common/step_create_tags.go index fc17458ee..a3346c76e 100644 --- a/builder/amazon/common/step_create_tags.go +++ b/builder/amazon/common/step_create_tags.go @@ -3,8 +3,8 @@ package common import ( "fmt" - "github.com/awslabs/aws-sdk-go/aws" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index 0b41d6bea..e15e0c218 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -6,7 +6,7 @@ import ( "os" "runtime" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) diff --git a/builder/amazon/common/step_modify_ami_attributes.go b/builder/amazon/common/step_modify_ami_attributes.go index 0628109b5..98bcfaf8c 100644 --- a/builder/amazon/common/step_modify_ami_attributes.go +++ b/builder/amazon/common/step_modify_ami_attributes.go @@ -3,8 +3,8 @@ package common import ( "fmt" - "github.com/awslabs/aws-sdk-go/aws" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index fac240df7..c678ef54d 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -7,8 +7,8 @@ import ( "strconv" "time" - "github.com/awslabs/aws-sdk-go/aws" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" diff --git a/builder/amazon/common/step_security_group.go b/builder/amazon/common/step_security_group.go index 4bca41828..d870fd1c3 100644 --- a/builder/amazon/common/step_security_group.go +++ b/builder/amazon/common/step_security_group.go @@ -5,8 +5,8 @@ import ( "log" "time" - "github.com/awslabs/aws-sdk-go/aws" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common/uuid" "github.com/mitchellh/packer/packer" diff --git a/builder/amazon/common/step_source_ami_info.go b/builder/amazon/common/step_source_ami_info.go index b0c941fda..5ab36e5da 100644 --- a/builder/amazon/common/step_source_ami_info.go +++ b/builder/amazon/common/step_source_ami_info.go @@ -3,7 +3,7 @@ package common import ( "fmt" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index e4f8eb0b2..274e3cd6d 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -9,7 +9,7 @@ import ( "fmt" "log" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/common" diff --git a/builder/amazon/ebs/step_create_ami.go b/builder/amazon/ebs/step_create_ami.go index 4fa601dfd..dff7d88b0 100644 --- a/builder/amazon/ebs/step_create_ami.go +++ b/builder/amazon/ebs/step_create_ami.go @@ -3,7 +3,7 @@ package ebs import ( "fmt" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" diff --git a/builder/amazon/ebs/step_modify_instance.go b/builder/amazon/ebs/step_modify_instance.go index 0fa766326..d7b30e42c 100644 --- a/builder/amazon/ebs/step_modify_instance.go +++ b/builder/amazon/ebs/step_modify_instance.go @@ -3,7 +3,7 @@ package ebs import ( "fmt" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) diff --git a/builder/amazon/ebs/step_stop_instance.go b/builder/amazon/ebs/step_stop_instance.go index c01de8fdc..9f7cb8029 100644 --- a/builder/amazon/ebs/step_stop_instance.go +++ b/builder/amazon/ebs/step_stop_instance.go @@ -3,7 +3,7 @@ package ebs import ( "fmt" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index e6cb24efd..4808a4031 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -9,7 +9,7 @@ import ( "os" "strings" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/common" diff --git a/builder/amazon/instance/step_bundle_volume.go b/builder/amazon/instance/step_bundle_volume.go index 433c1ca6e..fe1092741 100644 --- a/builder/amazon/instance/step_bundle_volume.go +++ b/builder/amazon/instance/step_bundle_volume.go @@ -3,7 +3,7 @@ package instance import ( "fmt" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" diff --git a/builder/amazon/instance/step_register_ami.go b/builder/amazon/instance/step_register_ami.go index 349c7f856..c3150449d 100644 --- a/builder/amazon/instance/step_register_ami.go +++ b/builder/amazon/instance/step_register_ami.go @@ -3,7 +3,7 @@ package instance import ( "fmt" - "github.com/awslabs/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/packer" From 408250ec7682a35eb5905cd49fc05e937901886b Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 3 Jun 2015 17:13:26 -0700 Subject: [PATCH 201/956] Wrap output in if statement to catch zero values from select --- packer/communicator.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/packer/communicator.go b/packer/communicator.go index df7216d9c..7bfc17a68 100644 --- a/packer/communicator.go +++ b/packer/communicator.go @@ -1,11 +1,12 @@ package packer import ( - "github.com/mitchellh/iochan" "io" "os" "strings" "sync" + + "github.com/mitchellh/iochan" ) // RemoteCmd represents a remote command being prepared or run. @@ -132,9 +133,13 @@ OutputLoop: for { select { case output := <-stderrCh: - ui.Message(r.cleanOutputLine(output)) + if output != "" { + ui.Message(r.cleanOutputLine(output)) + } case output := <-stdoutCh: - ui.Message(r.cleanOutputLine(output)) + if output != "" { + ui.Message(r.cleanOutputLine(output)) + } case <-exitCh: break OutputLoop } From a7eeb6a6a796690af4c84d0ee6cda0abc48fbc8f Mon Sep 17 00:00:00 2001 From: Brandon Heller Date: Thu, 4 Jun 2015 02:43:16 -0700 Subject: [PATCH 202/956] vmware/iso: support hierarchical output directories When providing a hierarchical output_directory value like 'transient/jenkins-slave', the VM would fail to build in the CreateDisk step. The properly created output directory would not match the location provided to CreateDisk, since datastorePath() did not properly split such paths. Now this case works; tested hierarchical and singular output_directory values. --- builder/vmware/iso/driver_esx5.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index 2b9332760..0d6fd6531 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -311,8 +311,8 @@ func (d *ESX5Driver) String() string { } func (d *ESX5Driver) datastorePath(path string) string { - baseDir := filepath.Base(filepath.Dir(path)) - return filepath.ToSlash(filepath.Join("/vmfs/volumes", d.Datastore, baseDir, filepath.Base(path))) + dirPath := filepath.Dir(path) + return filepath.ToSlash(filepath.Join("/vmfs/volumes", d.Datastore, dirPath, filepath.Base(path))) } func (d *ESX5Driver) cachePath(path string) string { From a5c476c6fef5f17a98ced38834c2df110f45eee0 Mon Sep 17 00:00:00 2001 From: Henry Huang Date: Thu, 4 Jun 2015 06:16:44 -0400 Subject: [PATCH 203/956] check the region before do the ami copy to fix [GH-2123] --- builder/amazon/common/step_ami_region_copy.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/builder/amazon/common/step_ami_region_copy.go b/builder/amazon/common/step_ami_region_copy.go index 591dcbdf0..70af06aa5 100644 --- a/builder/amazon/common/step_ami_region_copy.go +++ b/builder/amazon/common/step_ami_region_copy.go @@ -32,6 +32,12 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { var wg sync.WaitGroup errs := new(packer.MultiError) for _, region := range s.Regions { + + if region == ec2conn.Config.Region { + ui.Message(fmt.Sprintf("Avoid copying AMI (%s) to %s", ec2conn.Config.Region, region)) + continue + } + wg.Add(1) ui.Message(fmt.Sprintf("Copying to: %s", region)) From 793698f8a7e398c3177d3d031bcd456556b89a6b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 4 Jun 2015 16:25:14 +0200 Subject: [PATCH 204/956] command/build: skip nil builds [GH-2163] --- command/build.go | 1 + 1 file changed, 1 insertion(+) diff --git a/command/build.go b/command/build.go index fbadd6dab..7a035a883 100644 --- a/command/build.go +++ b/command/build.go @@ -59,6 +59,7 @@ func (c BuildCommand) Run(args []string) int { c.Ui.Error(fmt.Sprintf( "Failed to initialize build '%s': %s", n, err)) + continue } builds = append(builds, b) From fab9ca9cdbbefd236c10c351d57cba940fcc66b0 Mon Sep 17 00:00:00 2001 From: Eric Richardson Date: Tue, 4 Mar 2014 13:23:07 -0500 Subject: [PATCH 205/956] Initial work to implement additional disk support in the vmware-iso builder * Matches the syntax from mitchellh/packer#703 * Creates disk(s), adds them to the vmx template, and runs compact at the end --- builder/vmware/common/step_compact_disk.go | 12 +++++++++ builder/vmware/iso/builder.go | 1 + builder/vmware/iso/step_create_disk.go | 22 ++++++++++++++++ builder/vmware/iso/step_create_vmx.go | 30 ++++++++++++++++++++++ 4 files changed, 65 insertions(+) diff --git a/builder/vmware/common/step_compact_disk.go b/builder/vmware/common/step_compact_disk.go index 777e8aaea..a76cf4d7f 100644 --- a/builder/vmware/common/step_compact_disk.go +++ b/builder/vmware/common/step_compact_disk.go @@ -36,6 +36,18 @@ func (s StepCompactDisk) Run(state multistep.StateBag) multistep.StepAction { state.Put("error", fmt.Errorf("Error compacting disk: %s", err)) return multistep.ActionHalt } + + moreDisks := state.Get("additional_disk_paths").([]string) + if len(moreDisks) > 0 { + for i, path := range moreDisks { + ui.Say(fmt.Sprintf("Compacting additional disk image %d",i+1)) + if err := driver.CompactDisk(path); err != nil { + state.Put("error", fmt.Errorf("Error compacting additional disk %d: %s", i+1, err)) + return multistep.ActionHalt + } + } + + } return multistep.ActionContinue } diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index c63bfdc8b..065d20603 100644 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -35,6 +35,7 @@ type Config struct { vmwcommon.ToolsConfig `mapstructure:",squash"` vmwcommon.VMXConfig `mapstructure:",squash"` + AdditionalDiskSize []uint `mapstructure:"additionaldisk_size"` DiskName string `mapstructure:"vmdk_name"` DiskSize uint `mapstructure:"disk_size"` DiskTypeId string `mapstructure:"disk_type_id"` diff --git a/builder/vmware/iso/step_create_disk.go b/builder/vmware/iso/step_create_disk.go index b357cad01..b5a1bfd87 100644 --- a/builder/vmware/iso/step_create_disk.go +++ b/builder/vmware/iso/step_create_disk.go @@ -34,6 +34,28 @@ func (stepCreateDisk) Run(state multistep.StateBag) multistep.StepAction { } state.Put("full_disk_path", full_disk_path) + + if len(config.AdditionalDiskSize) > 0 { + // stash the disk paths we create + additional_paths := make([]string,len(config.AdditionalDiskSize)) + + ui.Say("Creating additional hard drives...") + for i, additionalsize := range config.AdditionalDiskSize { + additionalpath := filepath.Join(config.OutputDir, fmt.Sprintf("%s-%d.vmdk",config.DiskName,i+1)) + size := fmt.Sprintf("%dM", uint64(additionalsize)) + + if err := driver.CreateDisk(additionalpath, size, config.DiskTypeId); err != nil { + err := fmt.Errorf("Error creating additional disk: %s", err) + state.Put("error",err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + additional_paths[i] = additionalpath + } + + state.Put("additional_disk_paths", additional_paths) + } return multistep.ActionContinue } diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 5a1a829b5..dde3859a0 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -20,6 +20,11 @@ type vmxTemplateData struct { Version string } +type additionalDiskTemplateData struct { + DiskNumber int + DiskName string +} + // This step creates the VMX file for the VM. // // Uses: @@ -70,6 +75,25 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { vmxTemplate = string(rawBytes) } + + if len(config.AdditionalDiskSize) > 0 { + for i, _ := range config.AdditionalDiskSize { + data := &additionalDiskTemplateData{ + DiskNumber: i+1, + DiskName: config.DiskName, + } + + diskTemplate, err := config.tpl.Process(DefaultAdditionalDiskTemplate,data) + if err != nil { + err := fmt.Errorf("Error preparing VMX template for additional disk: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + vmxTemplate += diskTemplate + } + } vmxContents, err := interpolate.Render(vmxTemplate, &ctx) if err != nil { @@ -191,3 +215,9 @@ vmci0.pciSlotNumber = "35" vmci0.present = "TRUE" vmotion.checkpointFBSize = "65536000" ` + +const DefaultAdditionalDiskTemplate = ` +scsi0:{{ .DiskNumber }}.fileName = "{{ .DiskName}}-{{ .DiskNumber }}.vmdk" +scsi0:{{ .DiskNumber }}.present = "TRUE" +scsi0:{{ .DiskNumber }}.redo = "" +` From 7dfb837ddb91fa0cd8562b5d16bd84a29c5b2ad7 Mon Sep 17 00:00:00 2001 From: Eric Richardson Date: Tue, 4 Mar 2014 15:00:24 -0500 Subject: [PATCH 206/956] Formatting cleanups from `go fmt` --- builder/vmware/iso/step_create_disk.go | 16 ++++++++-------- builder/vmware/iso/step_create_vmx.go | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/builder/vmware/iso/step_create_disk.go b/builder/vmware/iso/step_create_disk.go index b5a1bfd87..ded3a86da 100644 --- a/builder/vmware/iso/step_create_disk.go +++ b/builder/vmware/iso/step_create_disk.go @@ -34,26 +34,26 @@ func (stepCreateDisk) Run(state multistep.StateBag) multistep.StepAction { } state.Put("full_disk_path", full_disk_path) - + if len(config.AdditionalDiskSize) > 0 { // stash the disk paths we create - additional_paths := make([]string,len(config.AdditionalDiskSize)) - + additional_paths := make([]string, len(config.AdditionalDiskSize)) + ui.Say("Creating additional hard drives...") for i, additionalsize := range config.AdditionalDiskSize { - additionalpath := filepath.Join(config.OutputDir, fmt.Sprintf("%s-%d.vmdk",config.DiskName,i+1)) + additionalpath := filepath.Join(config.OutputDir, fmt.Sprintf("%s-%d.vmdk", config.DiskName, i+1)) size := fmt.Sprintf("%dM", uint64(additionalsize)) - + if err := driver.CreateDisk(additionalpath, size, config.DiskTypeId); err != nil { err := fmt.Errorf("Error creating additional disk: %s", err) - state.Put("error",err) + state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - + additional_paths[i] = additionalpath } - + state.Put("additional_disk_paths", additional_paths) } diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index dde3859a0..97cd1262c 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -21,8 +21,8 @@ type vmxTemplateData struct { } type additionalDiskTemplateData struct { - DiskNumber int - DiskName string + DiskNumber int + DiskName string } // This step creates the VMX file for the VM. @@ -75,22 +75,22 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { vmxTemplate = string(rawBytes) } - + if len(config.AdditionalDiskSize) > 0 { for i, _ := range config.AdditionalDiskSize { data := &additionalDiskTemplateData{ - DiskNumber: i+1, - DiskName: config.DiskName, + DiskNumber: i + 1, + DiskName: config.DiskName, } - - diskTemplate, err := config.tpl.Process(DefaultAdditionalDiskTemplate,data) + + diskTemplate, err := config.tpl.Process(DefaultAdditionalDiskTemplate, data) if err != nil { err := fmt.Errorf("Error preparing VMX template for additional disk: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - + vmxTemplate += diskTemplate } } From 5f183026b337d8b64f5a50a20931de457a8ee023 Mon Sep 17 00:00:00 2001 From: Eric Richardson Date: Tue, 4 Mar 2014 15:48:15 -0500 Subject: [PATCH 207/956] Fix vmware compact_disk step when there are no additional disks --- builder/vmware/common/step_compact_disk.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/builder/vmware/common/step_compact_disk.go b/builder/vmware/common/step_compact_disk.go index a76cf4d7f..4319b7fe9 100644 --- a/builder/vmware/common/step_compact_disk.go +++ b/builder/vmware/common/step_compact_disk.go @@ -37,16 +37,16 @@ func (s StepCompactDisk) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - moreDisks := state.Get("additional_disk_paths").([]string) - if len(moreDisks) > 0 { - for i, path := range moreDisks { - ui.Say(fmt.Sprintf("Compacting additional disk image %d",i+1)) - if err := driver.CompactDisk(path); err != nil { - state.Put("error", fmt.Errorf("Error compacting additional disk %d: %s", i+1, err)) - return multistep.ActionHalt + if state.Get("additional_disk_paths") != nil { + if moreDisks := state.Get("additional_disk_paths").([]string); len(moreDisks) > 0 { + for i, path := range moreDisks { + ui.Say(fmt.Sprintf("Compacting additional disk image %d",i+1)) + if err := driver.CompactDisk(path); err != nil { + state.Put("error", fmt.Errorf("Error compacting additional disk %d: %s", i+1, err)) + return multistep.ActionHalt + } } } - } return multistep.ActionContinue From b665339b39f007a2e3121a99eae959487b4208b1 Mon Sep 17 00:00:00 2001 From: Shawn Neal Date: Mon, 15 Sep 2014 09:47:26 -0700 Subject: [PATCH 208/956] Added website docs for VMWare ISO additionaldisk_size --- website/source/docs/builders/vmware-iso.html.markdown | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index b1d026e44..b021ca532 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -72,6 +72,12 @@ each category, the available options are alphabetized and described. ### Optional: +* `additionaldisk_size` (array of integers) - The size(s) of any additional + hard disks for the VM in megabytes. If this is not specified then the VM will + only contain a primary hard disk. The builder uses expandable, not fixed-size + virtual hard disks, so the actual file representing the disk will not use the + full size unless it is full. + * `boot_command` (array of strings) - This is an array of commands to type when the virtual machine is first booted. The goal of these commands should be to type just enough to initialize the operating system installer. Special From e9a491ae45871c42bf9b86c302b6ed26657b8acf Mon Sep 17 00:00:00 2001 From: Shawn Neal Date: Thu, 4 Jun 2015 11:44:07 -0700 Subject: [PATCH 209/956] New interpolation for additional vmware disks --- builder/vmware/iso/step_create_vmx.go | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go index 97cd1262c..69cb3f261 100644 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -45,15 +45,6 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { ui.Say("Building and writing VMX file") - ctx := config.ctx - ctx.Data = &vmxTemplateData{ - Name: config.VMName, - GuestOS: config.GuestOSType, - DiskName: config.DiskName, - Version: config.Version, - ISOPath: isoPath, - } - vmxTemplate := DefaultVMXTemplate if config.VMXTemplatePath != "" { f, err := os.Open(config.VMXTemplatePath) @@ -76,14 +67,16 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { vmxTemplate = string(rawBytes) } + ctx := config.ctx + if len(config.AdditionalDiskSize) > 0 { for i, _ := range config.AdditionalDiskSize { - data := &additionalDiskTemplateData{ + ctx.Data = &additionalDiskTemplateData{ DiskNumber: i + 1, DiskName: config.DiskName, } - diskTemplate, err := config.tpl.Process(DefaultAdditionalDiskTemplate, data) + diskTemplate, err := interpolate.Render(DefaultAdditionalDiskTemplate, &ctx) if err != nil { err := fmt.Errorf("Error preparing VMX template for additional disk: %s", err) state.Put("error", err) @@ -95,6 +88,14 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { } } + ctx.Data = &vmxTemplateData{ + Name: config.VMName, + GuestOS: config.GuestOSType, + DiskName: config.DiskName, + Version: config.Version, + ISOPath: isoPath, + } + vmxContents, err := interpolate.Render(vmxTemplate, &ctx) if err != nil { err := fmt.Errorf("Error procesing VMX template: %s", err) From a5818b158f9ca52ff06f500e4bdc2e4c2379bd72 Mon Sep 17 00:00:00 2001 From: "James G. Kim" Date: Tue, 2 Jun 2015 18:38:27 +0900 Subject: [PATCH 210/956] Fixes 'unknown configuration key' errors for 'only' and 'except' --- template/parse.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/template/parse.go b/template/parse.go index a7b187f37..dbb29569d 100644 --- a/template/parse.go +++ b/template/parse.go @@ -134,6 +134,8 @@ func (r *rawTemplate) Template() (*Template, error) { } // Set the configuration + delete(c, "except") + delete(c, "only") delete(c, "keep_input_artifact") delete(c, "type") if len(c) > 0 { From d7c77895dcf0b415bb7018b0e5850603cf8879f7 Mon Sep 17 00:00:00 2001 From: "James G. Kim" Date: Fri, 5 Jun 2015 12:26:33 +0900 Subject: [PATCH 211/956] Add tests for only and except of post-processors --- template/parse_test.go | 34 +++++++++++++++++++++ template/test-fixtures/parse-pp-except.json | 8 +++++ template/test-fixtures/parse-pp-only.json | 8 +++++ 3 files changed, 50 insertions(+) create mode 100644 template/test-fixtures/parse-pp-except.json create mode 100644 template/test-fixtures/parse-pp-only.json diff --git a/template/parse_test.go b/template/parse_test.go index 46bb75ad8..9abca2f77 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -174,6 +174,40 @@ func TestParse(t *testing.T) { false, }, + { + "parse-pp-only.json", + &Template{ + PostProcessors: [][]*PostProcessor{ + []*PostProcessor{ + &PostProcessor{ + Type: "foo", + OnlyExcept: OnlyExcept{ + Only: []string{"bar"}, + }, + }, + }, + }, + }, + false, + }, + + { + "parse-pp-except.json", + &Template{ + PostProcessors: [][]*PostProcessor{ + []*PostProcessor{ + &PostProcessor{ + Type: "foo", + OnlyExcept: OnlyExcept{ + Except: []string{"bar"}, + }, + }, + }, + }, + }, + false, + }, + { "parse-pp-string.json", &Template{ diff --git a/template/test-fixtures/parse-pp-except.json b/template/test-fixtures/parse-pp-except.json new file mode 100644 index 000000000..dea70d3d3 --- /dev/null +++ b/template/test-fixtures/parse-pp-except.json @@ -0,0 +1,8 @@ +{ + "post-processors": [ + { + "type": "foo", + "except": ["bar"] + } + ] +} diff --git a/template/test-fixtures/parse-pp-only.json b/template/test-fixtures/parse-pp-only.json new file mode 100644 index 000000000..d2dbe07c6 --- /dev/null +++ b/template/test-fixtures/parse-pp-only.json @@ -0,0 +1,8 @@ +{ + "post-processors": [ + { + "type": "foo", + "only": ["bar"] + } + ] +} From 052b5e66996003f55e4513befafd3c7934fc7802 Mon Sep 17 00:00:00 2001 From: Henry Huang Date: Fri, 5 Jun 2015 11:15:48 +0000 Subject: [PATCH 212/956] "Name" parameter required for copying ami across regions [GH-2172] --- builder/amazon/chroot/builder.go | 4 +++- builder/amazon/common/step_ami_region_copy.go | 6 ++++-- builder/amazon/ebs/builder.go | 1 + builder/amazon/instance/builder.go | 1 + 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index 66650b01e..9e7452182 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -166,7 +166,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &StepSnapshot{}, &StepRegisterAMI{}, &awscommon.StepAMIRegionCopy{ - Regions: b.config.AMIRegions, + AccessConfig: &b.config.AccessConfig, + Regions: b.config.AMIRegions, + Name: b.config.AMIName, }, &awscommon.StepModifyAMIAttributes{ Description: b.config.AMIDescription, diff --git a/builder/amazon/common/step_ami_region_copy.go b/builder/amazon/common/step_ami_region_copy.go index 591dcbdf0..c27b33560 100644 --- a/builder/amazon/common/step_ami_region_copy.go +++ b/builder/amazon/common/step_ami_region_copy.go @@ -14,6 +14,7 @@ import ( type StepAMIRegionCopy struct { AccessConfig *AccessConfig Regions []string + Name string } func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { @@ -37,7 +38,7 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { go func(region string) { defer wg.Done() - id, err := amiRegionCopy(state, s.AccessConfig, ami, region, ec2conn.Config.Region) + id, err := amiRegionCopy(state, s.AccessConfig, s.Name, ami, region, ec2conn.Config.Region) lock.Lock() defer lock.Unlock() @@ -69,7 +70,7 @@ func (s *StepAMIRegionCopy) Cleanup(state multistep.StateBag) { // amiRegionCopy does a copy for the given AMI to the target region and // returns the resulting ID or error. -func amiRegionCopy(state multistep.StateBag, config *AccessConfig, imageId string, +func amiRegionCopy(state multistep.StateBag, config *AccessConfig, name string, imageId string, target string, source string) (string, error) { // Connect to the region where the AMI will be copied to @@ -83,6 +84,7 @@ func amiRegionCopy(state multistep.StateBag, config *AccessConfig, imageId strin resp, err := regionconn.CopyImage(&ec2.CopyImageInput{ SourceRegion: &source, SourceImageID: &imageId, + Name: &name, }) if err != nil { diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 274e3cd6d..5df69f658 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -123,6 +123,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &awscommon.StepAMIRegionCopy{ AccessConfig: &b.config.AccessConfig, Regions: b.config.AMIRegions, + Name: b.config.AMIName, }, &awscommon.StepModifyAMIAttributes{ Description: b.config.AMIDescription, diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 4808a4031..8534cf8dc 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -205,6 +205,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &awscommon.StepAMIRegionCopy{ AccessConfig: &b.config.AccessConfig, Regions: b.config.AMIRegions, + Name: b.config.AMIName, }, &awscommon.StepModifyAMIAttributes{ Description: b.config.AMIDescription, From 5bd2d4e6e06db97377c4ef75005a0eed6698dba4 Mon Sep 17 00:00:00 2001 From: "James G. Kim" Date: Fri, 5 Jun 2015 22:01:29 +0900 Subject: [PATCH 213/956] Fix a problem preventing `tag` or `save` from docker-tag artifacts --- post-processor/docker-save/post-processor.go | 4 +++- post-processor/docker-tag/post-processor.go | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/post-processor/docker-save/post-processor.go b/post-processor/docker-save/post-processor.go index f35b0053e..ab6170802 100644 --- a/post-processor/docker-save/post-processor.go +++ b/post-processor/docker-save/post-processor.go @@ -9,6 +9,7 @@ import ( "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/post-processor/docker-import" + "github.com/mitchellh/packer/post-processor/docker-tag" "github.com/mitchellh/packer/template/interpolate" ) @@ -44,7 +45,8 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { - if artifact.BuilderId() != dockerimport.BuilderId { + if artifact.BuilderId() != dockerimport.BuilderId && + artifact.BuilderId() != dockertag.BuilderId { err := fmt.Errorf( "Unknown artifact type: %s\nCan only save Docker builder artifacts.", artifact.BuilderId()) diff --git a/post-processor/docker-tag/post-processor.go b/post-processor/docker-tag/post-processor.go index 7fc47901f..a531f2820 100644 --- a/post-processor/docker-tag/post-processor.go +++ b/post-processor/docker-tag/post-processor.go @@ -45,7 +45,8 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { - if artifact.BuilderId() != dockerimport.BuilderId { + if artifact.BuilderId() != BuilderId && + artifact.BuilderId() != dockerimport.BuilderId { err := fmt.Errorf( "Unknown artifact type: %s\nCan only tag from Docker builder artifacts.", artifact.BuilderId()) From 3dabf60b52eba1286f1ecb0f1b2f093fc4c8abaa Mon Sep 17 00:00:00 2001 From: Basil Peace Date: Sat, 6 Jun 2015 23:43:06 +0300 Subject: [PATCH 214/956] Fix error in isotime demo in the documentation of Configuration Templates --- .../source/docs/templates/configuration-templates.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/templates/configuration-templates.html.markdown b/website/source/docs/templates/configuration-templates.html.markdown index f940ebcc1..514bf7820 100644 --- a/website/source/docs/templates/configuration-templates.html.markdown +++ b/website/source/docs/templates/configuration-templates.html.markdown @@ -118,7 +118,7 @@ isotime = June 7, 7:22:43pm 2014 {{isotime "2006-01-02"}} = 2014-06-07 {{isotime "Mon 1504"}} = Sat 1922 -{{isotime "01-Jan-06 03\_04\_05"}} = 07-Jun-2014 07\_22\_43 +{{isotime "02-Jan-06 03\_04\_05"}} = 07-Jun-2014 07\_22\_43 {{isotime "Hour15Year200603"}} = Hour19Year201407 ``` From 1539b9459f69adc2fcb86cbb5c9dac347d0faa4a Mon Sep 17 00:00:00 2001 From: Christoph Hartmann Date: Sun, 7 Jun 2015 15:12:05 +0200 Subject: [PATCH 215/956] replace opscode with chef --- .../source/docs/provisioners/chef-solo.html.markdown | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/source/docs/provisioners/chef-solo.html.markdown b/website/source/docs/provisioners/chef-solo.html.markdown index e4f27f627..178ba0c62 100644 --- a/website/source/docs/provisioners/chef-solo.html.markdown +++ b/website/source/docs/provisioners/chef-solo.html.markdown @@ -10,12 +10,12 @@ description: |- Type: `chef-solo` The Chef solo Packer provisioner installs and configures software on machines built -by Packer using [chef-solo](http://docs.opscode.com/chef_solo.html). Cookbooks +by Packer using [chef-solo](https://docs.chef.io/chef_solo.html). Cookbooks can be uploaded from your local machine to the remote machine or remote paths can be used. The provisioner will even install Chef onto your machine if it isn't already -installed, using the official Chef installers provided by Opscode. +installed, using the official Chef installers provided by Chef Inc. ## Basic Example @@ -82,11 +82,11 @@ configuration is actually required, but at least `run_list` is recommended. These will be uploaded to the remote machine in the directory specified by the `staging_directory`. By default, this is empty. -* `run_list` (array of strings) - The [run list](http://docs.opscode.com/essentials_node_object_run_lists.html) +* `run_list` (array of strings) - The [run list](https://docs.chef.io/run_lists.html) for Chef. By default this is empty. * `skip_install` (boolean) - If true, Chef will not automatically be installed - on the machine using the Opscode omnibus installers. + on the machine using the Chef omnibus installers. * `staging_directory` (string) - This is the directory where all the configuration of Chef by Packer will be placed. By default this is "/tmp/packer-chef-solo". @@ -149,7 +149,7 @@ for readability) to install Chef. This command can be customized if you want to install Chef in another way. ```text -curl -L https://www.opscode.com/chef/install.sh | \ +curl -L https://www.chef.io/chef/install.sh | \ {{if .Sudo}}sudo{{end}} bash ``` From 3d94462e37a2dace1600f64689142f0eaab90aeb Mon Sep 17 00:00:00 2001 From: Vasiliy Tolstov Date: Sun, 17 May 2015 17:35:39 +0300 Subject: [PATCH 216/956] remove image format from image name Signed-off-by: Vasiliy Tolstov --- builder/qemu/step_copy_disk.go | 7 +++---- builder/qemu/step_create_disk.go | 6 +++--- builder/qemu/step_resize_disk.go | 7 +++---- builder/qemu/step_run.go | 3 +-- 4 files changed, 10 insertions(+), 13 deletions(-) diff --git a/builder/qemu/step_copy_disk.go b/builder/qemu/step_copy_disk.go index 54c3084ac..dd5b6d8d7 100644 --- a/builder/qemu/step_copy_disk.go +++ b/builder/qemu/step_copy_disk.go @@ -3,7 +3,6 @@ package qemu import ( "fmt" "path/filepath" - "strings" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" @@ -18,13 +17,13 @@ func (s *stepCopyDisk) Run(state multistep.StateBag) multistep.StepAction { driver := state.Get("driver").(Driver) isoPath := state.Get("iso_path").(string) ui := state.Get("ui").(packer.Ui) - path := filepath.Join(config.OutputDir, fmt.Sprintf("%s.%s", config.VMName, - strings.ToLower(config.Format))) - name := config.VMName + "." + strings.ToLower(config.Format) + path := filepath.Join(config.OutputDir, fmt.Sprintf("%s", config.VMName)) + name := config.VMName command := []string{ "convert", "-f", config.Format, + "-O", config.Format, isoPath, path, } diff --git a/builder/qemu/step_create_disk.go b/builder/qemu/step_create_disk.go index 986df1d2b..ed757ca8c 100644 --- a/builder/qemu/step_create_disk.go +++ b/builder/qemu/step_create_disk.go @@ -2,10 +2,10 @@ package qemu import ( "fmt" + "path/filepath" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" - "path/filepath" - "strings" ) // This step creates the virtual disk that will be used as the @@ -16,7 +16,7 @@ func (s *stepCreateDisk) Run(state multistep.StateBag) multistep.StepAction { config := state.Get("config").(*config) driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) - name := config.VMName + "." + strings.ToLower(config.Format) + name := config.VMName path := filepath.Join(config.OutputDir, name) command := []string{ diff --git a/builder/qemu/step_resize_disk.go b/builder/qemu/step_resize_disk.go index 4e8536c32..6f27e6843 100644 --- a/builder/qemu/step_resize_disk.go +++ b/builder/qemu/step_resize_disk.go @@ -2,10 +2,10 @@ package qemu import ( "fmt" + "path/filepath" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" - "path/filepath" - "strings" ) // This step resizes the virtual disk that will be used as the @@ -16,8 +16,7 @@ func (s *stepResizeDisk) Run(state multistep.StateBag) multistep.StepAction { config := state.Get("config").(*config) driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) - path := filepath.Join(config.OutputDir, fmt.Sprintf("%s.%s", config.VMName, - strings.ToLower(config.Format))) + path := filepath.Join(config.OutputDir, config.VMName) command := []string{ "resize", diff --git a/builder/qemu/step_run.go b/builder/qemu/step_run.go index 3f900d651..8524d9f9e 100644 --- a/builder/qemu/step_run.go +++ b/builder/qemu/step_run.go @@ -64,8 +64,7 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error vnc := fmt.Sprintf("0.0.0.0:%d", vncPort-5900) vmName := config.VMName - imgPath := filepath.Join(config.OutputDir, - fmt.Sprintf("%s.%s", vmName, strings.ToLower(config.Format))) + imgPath := filepath.Join(config.OutputDir, vmName) defaultArgs := make(map[string]string) From 3427dc9ed47ad193b2e75449358699eed1587113 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 09:02:00 -0700 Subject: [PATCH 217/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d42ea0bdb..c279ebac3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,9 @@ IMPROVEMENTS: have prohibitive firewalls * command/push: Add `-name` flag for specifying name from CLI [GH-2042] * command/push: Push configuration in templates supports variables [GH-1861] + * post-processor/docker-save: Can be chained [GH-2179] * post-processor/docker-tag: Support `force` option [GH-2055] + * post-processor/docker-tag: Can be chained [GH-2179] BUG FIXES: From 769b7d20b9271f03a4d5122657925e7000fdc4de Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 09:16:01 -0700 Subject: [PATCH 218/956] amazon/ebs: acceptance test for region copy --- builder/amazon/ebs/builder_acc_test.go | 70 ++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/builder/amazon/ebs/builder_acc_test.go b/builder/amazon/ebs/builder_acc_test.go index 19af43512..b70f7f7b9 100644 --- a/builder/amazon/ebs/builder_acc_test.go +++ b/builder/amazon/ebs/builder_acc_test.go @@ -1,10 +1,14 @@ package ebs import ( + "fmt" "os" "testing" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/mitchellh/packer/builder/amazon/common" builderT "github.com/mitchellh/packer/helper/builder/testing" + "github.com/mitchellh/packer/packer" ) func TestBuilderAcc_basic(t *testing.T) { @@ -15,6 +19,48 @@ func TestBuilderAcc_basic(t *testing.T) { }) } +func TestBuilderAcc_regionCopy(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Builder: &Builder{}, + Template: testBuilderAccRegionCopy, + Check: checkRegionCopy([]string{"us-east-1", "us-west-2"}), + }) +} + +func checkRegionCopy(regions []string) builderT.TestCheckFunc { + return func(artifacts []packer.Artifact) error { + if len(artifacts) > 1 { + return fmt.Errorf("more than 1 artifact") + } + + // Get the actual *Artifact pointer so we can access the AMIs directly + artifactRaw := artifacts[0] + artifact, ok := artifactRaw.(*common.Artifact) + if !ok { + return fmt.Errorf("unknown artifact: %#v", artifactRaw) + } + + // Verify that we copied to only the regions given + regionSet := make(map[string]struct{}) + for _, r := range regions { + regionSet[r] = struct{}{} + } + for r, _ := range artifact.Amis { + if _, ok := regionSet[r]; !ok { + return fmt.Errorf("unknown region: %s", r) + } + + delete(regionSet, r) + } + if len(regionSet) > 0 { + return fmt.Errorf("didn't copy to: %#v", regionSet) + } + + return nil + } +} + func testAccPreCheck(t *testing.T) { if v := os.Getenv("AWS_ACCESS_KEY_ID"); v == "" { t.Fatal("AWS_ACCESS_KEY_ID must be set for acceptance tests") @@ -25,6 +71,16 @@ func testAccPreCheck(t *testing.T) { } } +func testEC2Conn() (*ec2.EC2, error) { + access := &common.AccessConfig{RawRegion: "us-east-1"} + config, err := access.Config() + if err != nil { + return nil, err + } + + return ec2.New(config), nil +} + const testBuilderAccBasic = ` { "builders": [{ @@ -37,3 +93,17 @@ const testBuilderAccBasic = ` }] } ` + +const testBuilderAccRegionCopy = ` +{ + "builders": [{ + "type": "test", + "region": "us-east-1", + "instance_type": "m3.medium", + "source_ami": "ami-76b2a71e", + "ssh_username": "ubuntu", + "ami_name": "packer-test {{timestamp}}", + "ami_regions": ["us-east-1", "us-west-2"] + }] +} +` From 8aec42e3634e23a40a4dcd5aa9fd1e03ff745754 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 09:25:56 -0700 Subject: [PATCH 219/956] update CHANGELOG --- CHANGELOG.md | 1 + builder/amazon/common/step_ami_region_copy.go | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c279ebac3..ae5d76330 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ BUG FIXES: AWS for spot instance. [GH-2017] * builder/amazon: Private key file (only available in debug mode) is deleted on cleanup. [GH-1801] + * builder/amazon: AMI copy won't copy to the source region [GH-2123] * builder/amazon/chroot: Retry waiting for disk attachments [GH-2046] * builder/amazon/instance: Use `-i` in sudo commands so PATH is inherited. [GH-1930] * builder/digitalocean: Ignore invalid fields from the ever-changing v2 API diff --git a/builder/amazon/common/step_ami_region_copy.go b/builder/amazon/common/step_ami_region_copy.go index de641c902..0f0c49fdb 100644 --- a/builder/amazon/common/step_ami_region_copy.go +++ b/builder/amazon/common/step_ami_region_copy.go @@ -33,9 +33,9 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { var wg sync.WaitGroup errs := new(packer.MultiError) for _, region := range s.Regions { - if region == ec2conn.Config.Region { - ui.Message(fmt.Sprintf("Avoid copying AMI (%s) to %s", ec2conn.Config.Region, region)) + ui.Message(fmt.Sprintf( + "Avoid copying AMI to duplicate region %s", region)) continue } From 984bbc3a1ee8e377e48a90424af140b39a26e180 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 09:34:53 -0700 Subject: [PATCH 220/956] amazon/common: wording nit --- builder/amazon/common/step_ami_region_copy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/common/step_ami_region_copy.go b/builder/amazon/common/step_ami_region_copy.go index 0f0c49fdb..3f545284f 100644 --- a/builder/amazon/common/step_ami_region_copy.go +++ b/builder/amazon/common/step_ami_region_copy.go @@ -35,7 +35,7 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { for _, region := range s.Regions { if region == ec2conn.Config.Region { ui.Message(fmt.Sprintf( - "Avoid copying AMI to duplicate region %s", region)) + "Avoiding copying AMI to duplicate region %s", region)) continue } From 231f01cd352c7fc93fa5091b7840d12ebe1b3b89 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 8 Jun 2015 17:08:39 -0500 Subject: [PATCH 221/956] builder/aws: Add pre validate step, to validate things before building. --- builder/amazon/common/step_pre_validate.go | 47 ++++++++++++++++++++++ builder/amazon/ebs/builder.go | 3 ++ 2 files changed, 50 insertions(+) create mode 100644 builder/amazon/common/step_pre_validate.go diff --git a/builder/amazon/common/step_pre_validate.go b/builder/amazon/common/step_pre_validate.go new file mode 100644 index 000000000..1fc3dbade --- /dev/null +++ b/builder/amazon/common/step_pre_validate.go @@ -0,0 +1,47 @@ +package common + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" +) + +// StepPreValidate provides an opportunity to pre-validate any configuration for +// the build before actually doing any time consuming work +// +type StepPreValidate struct { + DestAmiName string +} + +func (s *StepPreValidate) Run(state multistep.StateBag) multistep.StepAction { + ec2conn := state.Get("ec2").(*ec2.EC2) + ui := state.Get("ui").(packer.Ui) + + ui.Say("Prevalidating AMI Name...") + resp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ + Filters: []*ec2.Filter{&ec2.Filter{ + Name: aws.String("name"), + Values: []*string{aws.String(s.DestAmiName)}, + }}}) + + if err != nil { + err := fmt.Errorf("Error querying AMI: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + if len(resp.Images) > 0 { + err := fmt.Errorf("Error: an AMI with that name already exists") + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (s *StepPreValidate) Cleanup(multistep.StateBag) {} diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 274e3cd6d..93bb23e54 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -78,6 +78,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe // Build the steps steps := []multistep.Step{ + &awscommon.StepPreValidate{ + DestAmiName: b.config.AMIName, + }, &awscommon.StepSourceAMIInfo{ SourceAmi: b.config.SourceAmi, EnhancedNetworking: b.config.AMIEnhancedNetworking, From 68040f786c314ef9fc8a839f681f3c2220d56c55 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 8 Jun 2015 22:00:59 -0500 Subject: [PATCH 222/956] show AMI id in error message --- builder/amazon/common/step_pre_validate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/common/step_pre_validate.go b/builder/amazon/common/step_pre_validate.go index 1fc3dbade..5eb263eca 100644 --- a/builder/amazon/common/step_pre_validate.go +++ b/builder/amazon/common/step_pre_validate.go @@ -35,7 +35,7 @@ func (s *StepPreValidate) Run(state multistep.StateBag) multistep.StepAction { } if len(resp.Images) > 0 { - err := fmt.Errorf("Error: an AMI with that name already exists") + err := fmt.Errorf("Error: name conflicts with an existing AMI: %s", *resp.Images[0].ImageID) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt From 0885e03bbf48e5bff2378d2ccb18c99db5a99df0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 20:24:49 -0700 Subject: [PATCH 223/956] virtualbox/iso: acceptance test --- builder/virtualbox/iso/builder_acc_test.go | 30 ++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 builder/virtualbox/iso/builder_acc_test.go diff --git a/builder/virtualbox/iso/builder_acc_test.go b/builder/virtualbox/iso/builder_acc_test.go new file mode 100644 index 000000000..d756f452e --- /dev/null +++ b/builder/virtualbox/iso/builder_acc_test.go @@ -0,0 +1,30 @@ +package iso + +import ( + "testing" + + builderT "github.com/mitchellh/packer/helper/builder/testing" +) + +func TestBuilderAcc_basic(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + Builder: &Builder{}, + Template: testBuilderAccBasic, + }) +} + +const testBuilderAccBasic = ` +{ + "builders": [{ + "type": "test", + "guest_os_type": "Ubuntu_64", + "iso_url": "http://releases.ubuntu.com/12.04/ubuntu-12.04.5-server-amd64.iso", + "iso_checksum": "769474248a3897f4865817446f9a4a53", + "iso_checksum_type": "md5", + "ssh_username": "packer", + "ssh_password": "packer", + "ssh_wait_timeout": "30s", + "shutdown_command": "echo 'packer' | sudo -S shutdown -P now" + }] +} +` From b441348ba4ad50364be33ce73c163d140730d63a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 20:25:21 -0700 Subject: [PATCH 224/956] virtualbox/common: remove devices should delete floppy controller GH-1879 --- builder/virtualbox/common/step_remove_devices.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/builder/virtualbox/common/step_remove_devices.go b/builder/virtualbox/common/step_remove_devices.go index 8f06b1a61..c184c7c4f 100644 --- a/builder/virtualbox/common/step_remove_devices.go +++ b/builder/virtualbox/common/step_remove_devices.go @@ -38,6 +38,19 @@ func (s *StepRemoveDevices) Run(state multistep.StateBag) multistep.StepAction { ui.Error(err.Error()) return multistep.ActionHalt } + + // Don't forget to remove the floppy controller as well + command = []string{ + "storagectl", vmName, + "--name", "Floppy Controller", + "--remove", + } + if err := driver.VBoxManage(command...); err != nil { + err := fmt.Errorf("Error removing floppy controller: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } } if _, ok := state.GetOk("attachedIso"); ok { From 35246ba98632751f6a60217d508684cf41e8b332 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 20:31:53 -0700 Subject: [PATCH 225/956] virtualbox/common: fix test --- builder/virtualbox/common/step_remove_devices_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/builder/virtualbox/common/step_remove_devices_test.go b/builder/virtualbox/common/step_remove_devices_test.go index e3df5e6e7..6e6e28f79 100644 --- a/builder/virtualbox/common/step_remove_devices_test.go +++ b/builder/virtualbox/common/step_remove_devices_test.go @@ -102,10 +102,13 @@ func TestStepRemoveDevices_floppyPath(t *testing.T) { } // Test that both were removed - if len(driver.VBoxManageCalls) != 1 { + if len(driver.VBoxManageCalls) != 2 { t.Fatalf("bad: %#v", driver.VBoxManageCalls) } if driver.VBoxManageCalls[0][3] != "Floppy Controller" { t.Fatalf("bad: %#v", driver.VBoxManageCalls) } + if driver.VBoxManageCalls[1][3] != "Floppy Controller" { + t.Fatalf("bad: %#v", driver.VBoxManageCalls) + } } From e65e2d104afa8ed5e1846579c9bede7676d5518b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 20:41:39 -0700 Subject: [PATCH 226/956] common: StepDownload can force an extension --- common/step_download.go | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/common/step_download.go b/common/step_download.go index 8d6378adc..b8bd60b5e 100644 --- a/common/step_download.go +++ b/common/step_download.go @@ -1,6 +1,7 @@ package common import ( + "crypto/sha1" "encoding/hex" "fmt" "log" @@ -36,6 +37,12 @@ type StepDownload struct { // A list of URLs to attempt to download this thing. Url []string + + // Extension is the extension to force for the file that is downloaded. + // Some systems require a certain extension. If this isn't set, the + // extension on the URL is used. Otherwise, this will be forced + // on the downloaded file for every URL. + Extension string } func (s *StepDownload) Run(state multistep.StateBag) multistep.StepAction { @@ -60,9 +67,19 @@ func (s *StepDownload) Run(state multistep.StateBag) multistep.StepAction { targetPath := s.TargetPath if targetPath == "" { + // Determine a cache key. This is normally just the URL but + // if we force a certain extension we hash the URL and add + // the extension to force it. + cacheKey := url + if s.Extension != "" { + hash := sha1.Sum([]byte(url)) + cacheKey = fmt.Sprintf( + "%s.%s", hex.EncodeToString(hash[:]), s.Extension) + } + log.Printf("Acquiring lock to download: %s", url) - targetPath = cache.Lock(url) - defer cache.Unlock(url) + targetPath = cache.Lock(cacheKey) + defer cache.Unlock(cacheKey) } config := &DownloadConfig{ From 9ea34d4ea85d469272a12dc0ab6fe6837128e382 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 20:42:16 -0700 Subject: [PATCH 227/956] virtualbox/iso: force iso extension for downloads --- builder/virtualbox/iso/builder.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go index d095eae2d..b6ca982fd 100644 --- a/builder/virtualbox/iso/builder.go +++ b/builder/virtualbox/iso/builder.go @@ -230,6 +230,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Description: "ISO", ResultKey: "iso_path", Url: b.config.ISOUrls, + Extension: "iso", }, &vboxcommon.StepOutputDir{ Force: b.config.PackerForce, From 9f0b8b71dbb14dd4b53ec567ffa59abc470cee73 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 20:47:47 -0700 Subject: [PATCH 228/956] virtualbox,vmware: http server should listen on IPv4 --- builder/virtualbox/common/step_http_server.go | 2 +- builder/vmware/common/step_http_server.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/virtualbox/common/step_http_server.go b/builder/virtualbox/common/step_http_server.go index 440d9adbc..55874992e 100644 --- a/builder/virtualbox/common/step_http_server.go +++ b/builder/virtualbox/common/step_http_server.go @@ -49,7 +49,7 @@ func (s *StepHTTPServer) Run(state multistep.StateBag) multistep.StepAction { } httpPort = offset + s.HTTPPortMin - httpAddr = fmt.Sprintf(":%d", httpPort) + httpAddr = fmt.Sprintf("0.0.0.0:%d", httpPort) log.Printf("Trying port: %d", httpPort) s.l, err = net.Listen("tcp", httpAddr) if err == nil { diff --git a/builder/vmware/common/step_http_server.go b/builder/vmware/common/step_http_server.go index 440d9adbc..55874992e 100644 --- a/builder/vmware/common/step_http_server.go +++ b/builder/vmware/common/step_http_server.go @@ -49,7 +49,7 @@ func (s *StepHTTPServer) Run(state multistep.StateBag) multistep.StepAction { } httpPort = offset + s.HTTPPortMin - httpAddr = fmt.Sprintf(":%d", httpPort) + httpAddr = fmt.Sprintf("0.0.0.0:%d", httpPort) log.Printf("Trying port: %d", httpPort) s.l, err = net.Listen("tcp", httpAddr) if err == nil { From 646edf5ae238ee82f1af35a882b9fea8c411f786 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 20:56:55 -0700 Subject: [PATCH 229/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ae5d76330..e02523426 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ BUG FIXES: * builder/amazon: Private key file (only available in debug mode) is deleted on cleanup. [GH-1801] * builder/amazon: AMI copy won't copy to the source region [GH-2123] + * builder/amazon: Validate AMI doesn't exist with name prior to build [GH-1774] * builder/amazon/chroot: Retry waiting for disk attachments [GH-2046] * builder/amazon/instance: Use `-i` in sudo commands so PATH is inherited. [GH-1930] * builder/digitalocean: Ignore invalid fields from the ever-changing v2 API From 9dff0adfb16459842d6fe2dd30198465be2981f8 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 21:13:25 -0700 Subject: [PATCH 230/956] builder/google: don't hardcode SSH timeout [GH-1781] --- builder/googlecompute/builder.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/builder/googlecompute/builder.go b/builder/googlecompute/builder.go index cfb9c6e56..8e35598d2 100644 --- a/builder/googlecompute/builder.go +++ b/builder/googlecompute/builder.go @@ -4,11 +4,11 @@ package googlecompute import ( "fmt" + "log" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/packer" - "log" - "time" ) // The unique ID for this builder. @@ -63,7 +63,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &common.StepConnectSSH{ SSHAddress: sshAddress, SSHConfig: sshConfig, - SSHWaitTimeout: 5 * time.Minute, + SSHWaitTimeout: b.config.sshTimeout, }, new(common.StepProvision), new(StepTeardownInstance), From 325a44008f4ddb74bdbb186c9d22898369f67c13 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 21:16:35 -0700 Subject: [PATCH 231/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e02523426..b2a6d5443 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ BUG FIXES: running scripts [GH-1993] * builder/docker: Fix crash that could occur at certain timed ctrl-c [GH-1838] * builder/docker: validate that `export_path` is not a directory [GH-2105] + * builder/google: `ssh_timeout` is respected [GH-1781] * builder/qemu: Add `disk_discard` option [GH-2120] * builder/virtualbox: Added SCSI support * builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989] From 673c726b3df6b2c55c8e6721595cbbd315a50ed9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 21:17:18 -0700 Subject: [PATCH 232/956] update CHANGELOG --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b2a6d5443..bb7f5adf2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ IMPROVEMENTS: RackConnect data to appear * buidler/openstakc: Add `ssh_interface` option for rackconnect for users that have prohibitive firewalls + * builder/virtualbox: Added SCSI support * command/push: Add `-name` flag for specifying name from CLI [GH-2042] * command/push: Push configuration in templates supports variables [GH-1861] * post-processor/docker-save: Can be chained [GH-2179] @@ -39,7 +40,8 @@ BUG FIXES: * builder/docker: validate that `export_path` is not a directory [GH-2105] * builder/google: `ssh_timeout` is respected [GH-1781] * builder/qemu: Add `disk_discard` option [GH-2120] - * builder/virtualbox: Added SCSI support + * builder/virtualbox: Remove the floppy controller in addition to the + floppy disk. [GH-1879] * builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989] * builder/vmware: More robust IP parsing from ifconfig output [GH-1999] * command/validate: don't crash for invalid builds [GH-2139] From b8420df62252bc7e27f85e39232e13e0401fd087 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 21:18:51 -0700 Subject: [PATCH 233/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bb7f5adf2..24195a84f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ BUG FIXES: * builder/amazon/chroot: Retry waiting for disk attachments [GH-2046] * builder/amazon/instance: Use `-i` in sudo commands so PATH is inherited. [GH-1930] * builder/digitalocean: Ignore invalid fields from the ever-changing v2 API + * builder/digitalocean: Private images can be used as a source [GH-1792] * builder/docker: Fixed hang on prompt while copying script * builder/docker: Use `docker exec` for newer versions of Docker for running scripts [GH-1993] From 5701d3f6f2ada02c2a2afadca9e73b321235e5c2 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 21:20:51 -0700 Subject: [PATCH 234/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 24195a84f..4ebc6712c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ BUG FIXES: * builder/vmware: More robust IP parsing from ifconfig output [GH-1999] * command/validate: don't crash for invalid builds [GH-2139] * post-processor/atlas: Find common archive prefix for Windows [GH-1874] + * post-processor/atlas: Fix index out of range panic [GH-1959] * post-processor/vagrant-cloud: Fixed failing on response * post-processor/vagrant-cloud: Don't delete version on error [GH-2014] * provisioner/puppet-masterless: Allow manifest_file to be a directory From 888621d0291ff31ebbf2be23ae596192ce4a7078 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 21:24:21 -0700 Subject: [PATCH 235/956] update CHANGELOG --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ebc6712c..7c30477e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,8 +41,12 @@ BUG FIXES: * builder/docker: validate that `export_path` is not a directory [GH-2105] * builder/google: `ssh_timeout` is respected [GH-1781] * builder/qemu: Add `disk_discard` option [GH-2120] + * builder/virtualbox: Bind HTTP server to IPv4, which is more compatible with + OS installers. [GH-1709] * builder/virtualbox: Remove the floppy controller in addition to the floppy disk. [GH-1879] + * builder/vmware: Bind HTTP server to IPv4, which is more compatible with + OS installers. [GH-1709] * builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989] * builder/vmware: More robust IP parsing from ifconfig output [GH-1999] * command/validate: don't crash for invalid builds [GH-2139] From d18300e3d6e9d62aa932f25e09c5c1d9d68ad918 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 21:26:28 -0700 Subject: [PATCH 236/956] import style --- signal.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/signal.go b/signal.go index e63dd2fe5..99c9d4c73 100644 --- a/signal.go +++ b/signal.go @@ -1,11 +1,12 @@ package main import ( - "github.com/mitchellh/packer/packer" - "github.com/mitchellh/packer/packer/plugin" "log" "os" "os/signal" + + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/packer/plugin" ) // Prepares the signal handlers so that we handle interrupts properly. From 50fef50e4be908929daf5b917e95e48119678792 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 21:28:36 -0700 Subject: [PATCH 237/956] add interrupt handling for SIGTERM [GH-1858] --- CHANGELOG.md | 1 + signal.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c30477e5..fb3d26c5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ FEATURES: IMPROVEMENTS: + * core: Interrupt handling for SIGTERM signal as well. [GH-1858] * builder/openstack: Add `rackconnect_wait` for Rackspace customers to wait for RackConnect data to appear * buidler/openstakc: Add `ssh_interface` option for rackconnect for users that diff --git a/signal.go b/signal.go index 99c9d4c73..8846a3451 100644 --- a/signal.go +++ b/signal.go @@ -4,6 +4,7 @@ import ( "log" "os" "os/signal" + "syscall" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer/plugin" @@ -14,6 +15,7 @@ import ( func setupSignalHandlers(ui packer.Ui) { ch := make(chan os.Signal, 1) signal.Notify(ch, os.Interrupt) + signal.Notify(ch, syscall.SIGTERM) go func() { // First interrupt. We mostly ignore this because it allows the From fa2bcb8bc53b81ed6160cef571b103a1cab3fe38 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 21:33:53 -0700 Subject: [PATCH 238/956] update CHANGELOG --- CHANGELOG.md | 2 ++ builder/vmware/common/step_type_boot_command.go | 7 +++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fb3d26c5c..027fc6fed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,8 @@ BUG FIXES: OS installers. [GH-1709] * builder/virtualbox: Remove the floppy controller in addition to the floppy disk. [GH-1879] + * builder/vmware: Add 100ms delay between keystrokes to avoid subtle + timing issues in most cases. [GH-1663] * builder/vmware: Bind HTTP server to IPv4, which is more compatible with OS installers. [GH-1709] * builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989] diff --git a/builder/vmware/common/step_type_boot_command.go b/builder/vmware/common/step_type_boot_command.go index 49c579889..b23ede1da 100644 --- a/builder/vmware/common/step_type_boot_command.go +++ b/builder/vmware/common/step_type_boot_command.go @@ -200,10 +200,13 @@ func vncSendString(c *vnc.ClientConn, original string) { c.KeyEvent(KeyLeftShift, true) } + // Send the key events. We add a 100ms sleep after each key event + // to deal with network latency and the OS responding to the keystroke. + // It is kind of arbitrary but it is better than nothing. c.KeyEvent(keyCode, true) - time.Sleep(time.Second/10) + time.Sleep(100 * time.Millisecond) c.KeyEvent(keyCode, false) - time.Sleep(time.Second/10) + time.Sleep(100 * time.Millisecond) if keyShift { c.KeyEvent(KeyLeftShift, false) From 23a48d6619fd09fd0a8311111f37f0cd3163d560 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 8 Jun 2015 21:34:20 -0700 Subject: [PATCH 239/956] go fmt --- builder/digitalocean/ssh.go | 2 +- builder/googlecompute/ssh.go | 2 +- builder/null/ssh.go | 2 +- builder/openstack/ssh.go | 2 +- builder/parallels/common/ssh.go | 2 +- builder/qemu/ssh.go | 2 +- builder/virtualbox/common/ssh.go | 2 +- builder/vmware/common/ssh.go | 2 +- builder/vmware/iso/step_upload_vmx.go | 9 ++++----- common/step_connect_ssh.go | 2 +- communicator/ssh/communicator.go | 2 +- communicator/ssh/communicator_test.go | 2 +- post-processor/atlas/util_test.go | 2 +- 13 files changed, 16 insertions(+), 17 deletions(-) diff --git a/builder/digitalocean/ssh.go b/builder/digitalocean/ssh.go index bd0afc3fe..12046b04b 100644 --- a/builder/digitalocean/ssh.go +++ b/builder/digitalocean/ssh.go @@ -1,9 +1,9 @@ package digitalocean import ( - "golang.org/x/crypto/ssh" "fmt" "github.com/mitchellh/multistep" + "golang.org/x/crypto/ssh" ) func sshAddress(state multistep.StateBag) (string, error) { diff --git a/builder/googlecompute/ssh.go b/builder/googlecompute/ssh.go index a4e0151f4..e04029e44 100644 --- a/builder/googlecompute/ssh.go +++ b/builder/googlecompute/ssh.go @@ -1,9 +1,9 @@ package googlecompute import ( - "golang.org/x/crypto/ssh" "fmt" "github.com/mitchellh/multistep" + "golang.org/x/crypto/ssh" ) // sshAddress returns the ssh address. diff --git a/builder/null/ssh.go b/builder/null/ssh.go index a9c2af330..e6ac9ab16 100644 --- a/builder/null/ssh.go +++ b/builder/null/ssh.go @@ -1,10 +1,10 @@ package null import ( - gossh "golang.org/x/crypto/ssh" "fmt" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/communicator/ssh" + gossh "golang.org/x/crypto/ssh" "io/ioutil" ) diff --git a/builder/openstack/ssh.go b/builder/openstack/ssh.go index 16afda64d..d20f24170 100644 --- a/builder/openstack/ssh.go +++ b/builder/openstack/ssh.go @@ -1,10 +1,10 @@ package openstack import ( - "golang.org/x/crypto/ssh" "errors" "fmt" "github.com/mitchellh/multistep" + "golang.org/x/crypto/ssh" "time" "github.com/mitchellh/gophercloud-fork-40444fb" diff --git a/builder/parallels/common/ssh.go b/builder/parallels/common/ssh.go index 142b6c99d..becf68e42 100644 --- a/builder/parallels/common/ssh.go +++ b/builder/parallels/common/ssh.go @@ -3,10 +3,10 @@ package common import ( "fmt" - "golang.org/x/crypto/ssh" "github.com/mitchellh/multistep" commonssh "github.com/mitchellh/packer/common/ssh" packerssh "github.com/mitchellh/packer/communicator/ssh" + "golang.org/x/crypto/ssh" ) func SSHAddress(state multistep.StateBag) (string, error) { diff --git a/builder/qemu/ssh.go b/builder/qemu/ssh.go index deb7ba405..9724d7483 100644 --- a/builder/qemu/ssh.go +++ b/builder/qemu/ssh.go @@ -3,10 +3,10 @@ package qemu import ( "fmt" - gossh "golang.org/x/crypto/ssh" "github.com/mitchellh/multistep" commonssh "github.com/mitchellh/packer/common/ssh" "github.com/mitchellh/packer/communicator/ssh" + gossh "golang.org/x/crypto/ssh" ) func sshAddress(state multistep.StateBag) (string, error) { diff --git a/builder/virtualbox/common/ssh.go b/builder/virtualbox/common/ssh.go index c07c2ce9c..9ca2529b8 100644 --- a/builder/virtualbox/common/ssh.go +++ b/builder/virtualbox/common/ssh.go @@ -3,10 +3,10 @@ package common import ( "fmt" - gossh "golang.org/x/crypto/ssh" "github.com/mitchellh/multistep" commonssh "github.com/mitchellh/packer/common/ssh" "github.com/mitchellh/packer/communicator/ssh" + gossh "golang.org/x/crypto/ssh" ) func SSHAddress(state multistep.StateBag) (string, error) { diff --git a/builder/vmware/common/ssh.go b/builder/vmware/common/ssh.go index bfd0b8bc7..167bd6792 100644 --- a/builder/vmware/common/ssh.go +++ b/builder/vmware/common/ssh.go @@ -7,10 +7,10 @@ import ( "log" "os" - gossh "golang.org/x/crypto/ssh" "github.com/mitchellh/multistep" commonssh "github.com/mitchellh/packer/common/ssh" "github.com/mitchellh/packer/communicator/ssh" + gossh "golang.org/x/crypto/ssh" ) func SSHAddressFunc(config *SSHConfig) func(multistep.StateBag) (string, error) { diff --git a/builder/vmware/iso/step_upload_vmx.go b/builder/vmware/iso/step_upload_vmx.go index 39e14af16..96dde2cfc 100644 --- a/builder/vmware/iso/step_upload_vmx.go +++ b/builder/vmware/iso/step_upload_vmx.go @@ -3,12 +3,11 @@ package iso import ( "fmt" "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" vmwcommon "github.com/mitchellh/packer/builder/vmware/common" + "github.com/mitchellh/packer/packer" "path/filepath" ) - // This step upload the VMX to the remote host // // Uses: @@ -18,8 +17,8 @@ import ( // // Produces: // -type StepUploadVMX struct{ - RemoteType string +type StepUploadVMX struct { + RemoteType string } func (c *StepUploadVMX) Run(state multistep.StateBag) multistep.StepAction { @@ -31,7 +30,7 @@ func (c *StepUploadVMX) Run(state multistep.StateBag) multistep.StepAction { if c.RemoteType == "esx5" { remoteDriver, ok := driver.(RemoteDriver) if ok { - remoteVmxPath := filepath.ToSlash(filepath.Join(fmt.Sprintf("%s",remoteDriver), filepath.Base(vmxPath))) + remoteVmxPath := filepath.ToSlash(filepath.Join(fmt.Sprintf("%s", remoteDriver), filepath.Base(vmxPath))) if err := remoteDriver.upload(remoteVmxPath, vmxPath); err != nil { state.Put("error", fmt.Errorf("Error writing VMX: %s", err)) return multistep.ActionHalt diff --git a/common/step_connect_ssh.go b/common/step_connect_ssh.go index 30064c7f2..c3d8aac2d 100644 --- a/common/step_connect_ssh.go +++ b/common/step_connect_ssh.go @@ -1,12 +1,12 @@ package common import ( - gossh "golang.org/x/crypto/ssh" "errors" "fmt" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/communicator/ssh" "github.com/mitchellh/packer/packer" + gossh "golang.org/x/crypto/ssh" "log" "strings" "time" diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 611622750..3f0e6191b 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -3,10 +3,10 @@ package ssh import ( "bufio" "bytes" - "golang.org/x/crypto/ssh" "errors" "fmt" "github.com/mitchellh/packer/packer" + "golang.org/x/crypto/ssh" "io" "io/ioutil" "log" diff --git a/communicator/ssh/communicator_test.go b/communicator/ssh/communicator_test.go index 26cf76757..e9f73d2dc 100644 --- a/communicator/ssh/communicator_test.go +++ b/communicator/ssh/communicator_test.go @@ -4,9 +4,9 @@ package ssh import ( "bytes" - "golang.org/x/crypto/ssh" "fmt" "github.com/mitchellh/packer/packer" + "golang.org/x/crypto/ssh" "net" "testing" ) diff --git a/post-processor/atlas/util_test.go b/post-processor/atlas/util_test.go index 9f2535ccb..b6b9da3d9 100644 --- a/post-processor/atlas/util_test.go +++ b/post-processor/atlas/util_test.go @@ -6,7 +6,7 @@ import ( func TestLongestCommonPrefix(t *testing.T) { cases := []struct { - Input []string + Input []string Output string }{ { From 8c321138f4ecf78c22a19338656d00028740850a Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 9 Jun 2015 10:41:39 -0500 Subject: [PATCH 240/956] Revert "Changing --region to --location" This reverts commit f40fd36c3150440de0ad939e2e8c46782d709a59. According to the documentation below, `--region` is the current flag. If you're using an older version of the tools that use `--location`, you can customize the commands in your Packer config with `bundle_vol_command` and `bundle_upload_command` - http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/CLTRG-ami-upload-bundle.html - https://www.packer.io/docs/builders/amazon-instance.html --- builder/amazon/instance/builder.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 8534cf8dc..d164f91ee 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -80,7 +80,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { "-s {{.SecretKey}} " + "-d {{.BundleDirectory}} " + "--batch " + - "--location {{.Region}} " + + "--region {{.Region}} " + "--retry" } From 999b0874cce88279a3d39dce441a342e7c5f5017 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 9 Jun 2015 11:38:53 -0500 Subject: [PATCH 241/956] Update AWS EBS builder to fix invalid params --- builder/amazon/common/block_device.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index 14add3276..c44d03561 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -29,13 +29,23 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { for _, blockDevice := range b { ebsBlockDevice := &ec2.EBSBlockDevice{ - SnapshotID: &blockDevice.SnapshotId, - Encrypted: &blockDevice.Encrypted, - IOPS: &blockDevice.IOPS, VolumeType: &blockDevice.VolumeType, VolumeSize: &blockDevice.VolumeSize, DeleteOnTermination: &blockDevice.DeleteOnTermination, } + + // IOPS is only valid for SSD Volumes + if blockDevice.VolumeType != "standard" && blockDevice.VolumeType != "gp2" { + ebsBlockDevice.IOPS = &blockDevice.IOPS + } + + // You cannot specify Encrypted if you specify a Snapshot ID + if blockDevice.SnapshotId != "" { + ebsBlockDevice.SnapshotID = &blockDevice.SnapshotId + } else { + ebsBlockDevice.Encrypted = &blockDevice.Encrypted + } + mapping := &ec2.BlockDeviceMapping{ EBS: ebsBlockDevice, DeviceName: &blockDevice.DeviceName, From 4da118c64f22b950cb596c5c370751be5f59121b Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 9 Jun 2015 11:56:40 -0500 Subject: [PATCH 242/956] fix up tests --- builder/amazon/common/block_device_test.go | 27 +++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index c4f644f67..aacd54cb4 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/service/ec2" ) @@ -28,11 +29,31 @@ func TestBlockDevice(t *testing.T) { DeviceName: aws.String("/dev/sdb"), VirtualName: aws.String("ephemeral0"), EBS: &ec2.EBSBlockDevice{ - Encrypted: aws.Boolean(false), SnapshotID: aws.String("snap-1234"), VolumeType: aws.String("standard"), VolumeSize: aws.Long(8), DeleteOnTermination: aws.Boolean(true), + }, + }, + }, + { + Config: &BlockDevice{ + DeviceName: "/dev/sdb", + VirtualName: "ephemeral0", + VolumeType: "io1", + VolumeSize: 8, + DeleteOnTermination: true, + IOPS: 1000, + }, + + Result: &ec2.BlockDeviceMapping{ + DeviceName: aws.String("/dev/sdb"), + VirtualName: aws.String("ephemeral0"), + EBS: &ec2.EBSBlockDevice{ + Encrypted: aws.Boolean(false), + VolumeType: aws.String("io1"), + VolumeSize: aws.Long(8), + DeleteOnTermination: aws.Boolean(true), IOPS: aws.Long(1000), }, }, @@ -48,11 +69,11 @@ func TestBlockDevice(t *testing.T) { expected := []*ec2.BlockDeviceMapping{tc.Result} got := blockDevices.BuildAMIDevices() if !reflect.DeepEqual(expected, got) { - t.Fatalf("bad: %#v", expected) + t.Fatalf("Bad block device, \nexpected: %s\n\ngot: %s", awsutil.StringValue(expected), awsutil.StringValue(got)) } if !reflect.DeepEqual(expected, blockDevices.BuildLaunchDevices()) { - t.Fatalf("bad: %#v", expected) + t.Fatalf("Bad block device, \nexpected: %s\n\ngot: %s", awsutil.StringValue(expected), awsutil.StringValue(blockDevices.BuildLaunchDevices())) } } } From 91cfe5edf45404dc44b7decb0854baa3f2cc7236 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 9 Jun 2015 20:57:32 -0700 Subject: [PATCH 243/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 027fc6fed..26f0274a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,8 @@ BUG FIXES: OS installers. [GH-1709] * builder/virtualbox: Remove the floppy controller in addition to the floppy disk. [GH-1879] + * builder/virtualbox: Fixed regression where downloading ISO without a + ".iso" extension didn't work. [GH-1839] * builder/vmware: Add 100ms delay between keystrokes to avoid subtle timing issues in most cases. [GH-1663] * builder/vmware: Bind HTTP server to IPv4, which is more compatible with From 6e9fb6a9d1c019da4d9b442b73b46312c7c91d87 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 9 Jun 2015 21:09:09 -0700 Subject: [PATCH 244/956] command/push: the -name parameter actually works --- command/push.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/push.go b/command/push.go index 6c04917dd..b4007db80 100644 --- a/command/push.go +++ b/command/push.go @@ -157,7 +157,7 @@ func (c *PushCommand) Run(args []string) int { // Build the upload options var uploadOpts uploadOpts - uploadOpts.Slug = push.Name + uploadOpts.Slug = name uploadOpts.Builds = make(map[string]*uploadBuildInfo) for _, b := range tpl.Builders { info := &uploadBuildInfo{Type: b.Type} From 2a6c4e0d2c2ee20b3715691ca0ed1934c71e3bec Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 9 Jun 2015 21:09:56 -0700 Subject: [PATCH 245/956] command/push: output fix --- command/push.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/push.go b/command/push.go index b4007db80..f888186de 100644 --- a/command/push.go +++ b/command/push.go @@ -236,7 +236,7 @@ func (c *PushCommand) Run(args []string) int { return 1 } - c.Ui.Say(fmt.Sprintf("Push successful to '%s'", push.Name)) + c.Ui.Say(fmt.Sprintf("Push successful to '%s'", name)) return 0 } From 8393b85ed579dd2b6a04325c8e5e7c14ed1610d6 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Wed, 10 Jun 2015 07:41:12 +0200 Subject: [PATCH 246/956] Support Parallels Desktop 11 --- builder/parallels/common/driver.go | 6 ++++++ builder/parallels/common/driver_10.go | 1 + 2 files changed, 7 insertions(+) diff --git a/builder/parallels/common/driver.go b/builder/parallels/common/driver.go index bdb73719c..5c3d93c09 100644 --- a/builder/parallels/common/driver.go +++ b/builder/parallels/common/driver.go @@ -73,6 +73,12 @@ func NewDriver() (Driver, error) { log.Printf("prlctl path: %s", prlctlPath) drivers = map[string]Driver{ + "11": &Parallels10Driver{ + Parallels9Driver: Parallels9Driver{ + PrlctlPath: prlctlPath, + dhcp_lease_file: dhcp_lease_file, + }, + }, "10": &Parallels10Driver{ Parallels9Driver: Parallels9Driver{ PrlctlPath: prlctlPath, diff --git a/builder/parallels/common/driver_10.go b/builder/parallels/common/driver_10.go index ae39240f4..9ab0754de 100644 --- a/builder/parallels/common/driver_10.go +++ b/builder/parallels/common/driver_10.go @@ -1,6 +1,7 @@ package common // Parallels10Driver are inherited from Parallels9Driver. +// Used for Parallels v 10 & 11 type Parallels10Driver struct { Parallels9Driver } From 5d600c70f2356c7b5d8a06eb363e9c05ea824a77 Mon Sep 17 00:00:00 2001 From: Jan Schumann Date: Wed, 10 Jun 2015 17:20:41 +0200 Subject: [PATCH 247/956] added test to illustrate wrong behavior --- builder/amazon/common/block_device_test.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index aacd54cb4..12d1530bf 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -36,6 +36,23 @@ func TestBlockDevice(t *testing.T) { }, }, }, + { + Config: &BlockDevice{ + DeviceName: "/dev/sdb", + VolumeSize: 8, + }, + + Result: &ec2.BlockDeviceMapping{ + DeviceName: aws.String("/dev/sdb"), + VirtualName: aws.String(""), + EBS: &ec2.EBSBlockDevice{ + Encrypted: aws.Boolean(false), + VolumeType: aws.String(""), + VolumeSize: aws.Long(8), + DeleteOnTermination: aws.Boolean(false), + }, + }, + }, { Config: &BlockDevice{ DeviceName: "/dev/sdb", From 802cfa38715bd12851f2e8719c5165338c78f6ed Mon Sep 17 00:00:00 2001 From: Jan Schumann Date: Wed, 10 Jun 2015 17:21:25 +0200 Subject: [PATCH 248/956] make sure IOPS is not set for empty VolumeType --- builder/amazon/common/block_device.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index c44d03561..e97cd4107 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -35,7 +35,7 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { } // IOPS is only valid for SSD Volumes - if blockDevice.VolumeType != "standard" && blockDevice.VolumeType != "gp2" { + if blockDevice.VolumeType != "" && blockDevice.VolumeType != "standard" && blockDevice.VolumeType != "gp2" { ebsBlockDevice.IOPS = &blockDevice.IOPS } From 7410289cbad45e6b8f15aee27aef0ab8443d9869 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 10:21:32 -0700 Subject: [PATCH 249/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 26f0274a2..39f222fae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ FEATURES: IMPROVEMENTS: * core: Interrupt handling for SIGTERM signal as well. [GH-1858] + * builder/parallels: Support Parallels Desktop 11 [GH-2199] * builder/openstack: Add `rackconnect_wait` for Rackspace customers to wait for RackConnect data to appear * buidler/openstakc: Add `ssh_interface` option for rackconnect for users that From be4a82dfae69f990092db29a24749a43cf3c3dfb Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 10:33:01 -0700 Subject: [PATCH 250/956] amazon/*: fix some merge conflicts --- builder/amazon/common/state.go | 2 +- builder/amazon/common/step_run_source_instance.go | 2 +- builder/amazon/ebs/step_stop_instance.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/builder/amazon/common/state.go b/builder/amazon/common/state.go index 9acedd020..075ce8ef7 100644 --- a/builder/amazon/common/state.go +++ b/builder/amazon/common/state.go @@ -91,7 +91,7 @@ func InstanceStateRefreshFunc(conn *ec2.EC2, instanceId string) StateRefreshFunc return nil, "", nil } - i = resp.Reservations[0].Instances[0] + i := resp.Reservations[0].Instances[0] return i, *i.State.Name, nil } } diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index 87ac077b9..92dafa564 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -310,7 +310,7 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) { } stateChange := StateChangeConf{ Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"}, - Refresh: InstanceStateRefreshFunc(ec2conn, s.instance.InstanceId), + Refresh: InstanceStateRefreshFunc(ec2conn, *s.instance.InstanceID), Target: "terminated", } diff --git a/builder/amazon/ebs/step_stop_instance.go b/builder/amazon/ebs/step_stop_instance.go index 7312fd200..967e5bbf4 100644 --- a/builder/amazon/ebs/step_stop_instance.go +++ b/builder/amazon/ebs/step_stop_instance.go @@ -40,7 +40,7 @@ func (s *stepStopInstance) Run(state multistep.StateBag) multistep.StepAction { stateChange := awscommon.StateChangeConf{ Pending: []string{"running", "stopping"}, Target: "stopped", - Refresh: awscommon.InstanceStateRefreshFunc(ec2conn, instance.InstanceId), + Refresh: awscommon.InstanceStateRefreshFunc(ec2conn, *instance.InstanceID), StepState: state, } _, err = awscommon.WaitForState(&stateChange) From 77c8df1e7b3b8910e8fb60d6f8ad37d57d4e417f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 10:33:30 -0700 Subject: [PATCH 251/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 39f222fae..7542c7b97 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ BUG FIXES: is deleted on cleanup. [GH-1801] * builder/amazon: AMI copy won't copy to the source region [GH-2123] * builder/amazon: Validate AMI doesn't exist with name prior to build [GH-1774] + * builder/amazon: Improved retry logic around waiting for instances. [GH-1764] * builder/amazon/chroot: Retry waiting for disk attachments [GH-2046] * builder/amazon/instance: Use `-i` in sudo commands so PATH is inherited. [GH-1930] * builder/digitalocean: Ignore invalid fields from the ever-changing v2 API From bd04b52b323472922d0fa1dbec9a870296179836 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 10:50:08 -0700 Subject: [PATCH 252/956] virtualbox/common: style --- builder/virtualbox/common/step_export.go | 9 +++------ builder/virtualbox/common/step_forward_ssh.go | 7 ++----- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/builder/virtualbox/common/step_export.go b/builder/virtualbox/common/step_export.go index 0a3cd816c..56b013d9f 100644 --- a/builder/virtualbox/common/step_export.go +++ b/builder/virtualbox/common/step_export.go @@ -31,12 +31,10 @@ func (s *StepExport) Run(state multistep.StateBag) multistep.StepAction { // Wait a second to ensure VM is really shutdown. log.Println("1 second timeout to ensure VM is really shutdown") time.Sleep(1 * time.Second) + ui.Say("Preparing to export machine...") // Clear out the Packer-created forwarding rule - ui.Say("Preparing to export machine...") - var command []string - - if s.SkipNatMapping == false { + if !s.SkipNatMapping { ui.Message(fmt.Sprintf( "Deleting forwarded port mapping for SSH (host port %d)", state.Get("sshHostPort"))) @@ -52,13 +50,12 @@ func (s *StepExport) Run(state multistep.StateBag) multistep.StepAction { // Export the VM to an OVF outputPath := filepath.Join(s.OutputDir, vmName+"."+s.Format) - command = []string{ + command := []string{ "export", vmName, "--output", outputPath, } - command = append(command, s.ExportOpts...) ui.Say("Exporting virtual machine...") diff --git a/builder/virtualbox/common/step_forward_ssh.go b/builder/virtualbox/common/step_forward_ssh.go index d6d604e00..fe6004281 100644 --- a/builder/virtualbox/common/step_forward_ssh.go +++ b/builder/virtualbox/common/step_forward_ssh.go @@ -30,11 +30,8 @@ func (s *StepForwardSSH) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) vmName := state.Get("vmName").(string) - var sshHostPort uint - if s.SkipNatMapping { - sshHostPort = s.GuestPort - log.Printf("Skipping SSH NAT mapping and using SSH port %d", sshHostPort) - } else { + sshHostPort := s.GuestPort + if !s.SkipNatMapping { log.Printf("Looking for available SSH port between %d and %d", s.HostPortMin, s.HostPortMax) var offset uint = 0 From 9b83b7f2e05fd3a83ee65b53eb05aab1f4786527 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 10:50:40 -0700 Subject: [PATCH 253/956] update CHANGELOg --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7542c7b97..a98e0bedc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,8 @@ IMPROVEMENTS: RackConnect data to appear * buidler/openstakc: Add `ssh_interface` option for rackconnect for users that have prohibitive firewalls + * builder/virtualbox: Added option: `ssh_skip_nat_mapping` to skip the + automatic port forward for SSH and to use the guest port directly. [GH-1078] * builder/virtualbox: Added SCSI support * command/push: Add `-name` flag for specifying name from CLI [GH-2042] * command/push: Push configuration in templates supports variables [GH-1861] From 626243baed02592f76088c3e5ae7140e73d82b5b Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 10 Jun 2015 13:18:17 -0500 Subject: [PATCH 254/956] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a98e0bedc..a6f179461 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ FEATURES: IMPROVEMENTS: * core: Interrupt handling for SIGTERM signal as well. [GH-1858] + * builder/amazon-ebs: Add pre-build step, validate unique AMI name [GH-2187] * builder/parallels: Support Parallels Desktop 11 [GH-2199] * builder/openstack: Add `rackconnect_wait` for Rackspace customers to wait for RackConnect data to appear From 0ff508c80d57447c740830a968eaff32cf42257b Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 10 Jun 2015 13:20:39 -0500 Subject: [PATCH 255/956] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a6f179461..b0abd0cb9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,6 @@ FEATURES: IMPROVEMENTS: * core: Interrupt handling for SIGTERM signal as well. [GH-1858] - * builder/amazon-ebs: Add pre-build step, validate unique AMI name [GH-2187] * builder/parallels: Support Parallels Desktop 11 [GH-2199] * builder/openstack: Add `rackconnect_wait` for Rackspace customers to wait for RackConnect data to appear @@ -38,6 +37,7 @@ BUG FIXES: * builder/amazon: Improved retry logic around waiting for instances. [GH-1764] * builder/amazon/chroot: Retry waiting for disk attachments [GH-2046] * builder/amazon/instance: Use `-i` in sudo commands so PATH is inherited. [GH-1930] + * builder/amazon/instance: Use `--region` flag for bundle upload command. [GH-1931] * builder/digitalocean: Ignore invalid fields from the ever-changing v2 API * builder/digitalocean: Private images can be used as a source [GH-1792] * builder/docker: Fixed hang on prompt while copying script From 83fc70fa00d84cee46399bef2ff8cdaeddfa49bf Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 10 Jun 2015 13:22:36 -0500 Subject: [PATCH 256/956] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0abd0cb9..8c9a2c151 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ BUG FIXES: * builder/amazon: AMI copy won't copy to the source region [GH-2123] * builder/amazon: Validate AMI doesn't exist with name prior to build [GH-1774] * builder/amazon: Improved retry logic around waiting for instances. [GH-1764] + * builder/amazon: Fix issues with creating Block Devices. [GH-2195] * builder/amazon/chroot: Retry waiting for disk attachments [GH-2046] * builder/amazon/instance: Use `-i` in sudo commands so PATH is inherited. [GH-1930] * builder/amazon/instance: Use `--region` flag for bundle upload command. [GH-1931] From fafdfc962f7272afe390961e804de4fe67879f85 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 11:31:20 -0700 Subject: [PATCH 257/956] vmware/common: detect Vmware 'unknown error' and show better message --- builder/vmware/common/driver.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/builder/vmware/common/driver.go b/builder/vmware/common/driver.go index c645f40b7..ee8dbc30e 100644 --- a/builder/vmware/common/driver.go +++ b/builder/vmware/common/driver.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "os/exec" + "regexp" "runtime" "strconv" "strings" @@ -135,6 +136,18 @@ func runAndLog(cmd *exec.Cmd) (string, string, error) { } err = fmt.Errorf("VMware error: %s", message) + + // If "unknown error" is in there, add some additional notes + re := regexp.MustCompile(`(?i)unknown error`) + if re.MatchString(message) { + err = fmt.Errorf( + "%s\n\n%s", err, + "Packer detected a VMware 'Unknown Error'. Unfortunately VMware\n"+ + "often has extremely vague error messages such as this and Packer\n"+ + "itself can't do much about that. Please check the vmware.log files\n"+ + "created by VMware when a VM is started (in the directory of the\n"+ + "vmx file), which often contains more detailed error information.") + } } log.Printf("stdout: %s", stdoutString) From 802a76685ee14cbd1ece648f8721d7564287c831 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 11:34:37 -0700 Subject: [PATCH 258/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c9a2c151..86e1cbdb1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,7 @@ BUG FIXES: OS installers. [GH-1709] * builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989] * builder/vmware: More robust IP parsing from ifconfig output [GH-1999] + * builder/vmware: Nested output directories for ESXi work [GH-2174] * command/validate: don't crash for invalid builds [GH-2139] * post-processor/atlas: Find common archive prefix for Windows [GH-1874] * post-processor/atlas: Fix index out of range panic [GH-1959] From 52269b66b98e119bb4600a470007997c6476dafc Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 10 Jun 2015 12:30:18 -0700 Subject: [PATCH 259/956] Added new compress post-processor, contributed by Vasiliy Tolstov --- post-processor/compress/LICENSE | 21 ++ post-processor/compress/artifact.go | 32 +- post-processor/compress/benchmark.go | 197 ++++++++++ post-processor/compress/post-processor.go | 419 +++++++++++++++++++--- 4 files changed, 600 insertions(+), 69 deletions(-) create mode 100644 post-processor/compress/LICENSE create mode 100644 post-processor/compress/benchmark.go diff --git a/post-processor/compress/LICENSE b/post-processor/compress/LICENSE new file mode 100644 index 000000000..38bbf26f3 --- /dev/null +++ b/post-processor/compress/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Vasiliy Tolstov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/post-processor/compress/artifact.go b/post-processor/compress/artifact.go index 34a7ce8d6..f428a3b55 100644 --- a/post-processor/compress/artifact.go +++ b/post-processor/compress/artifact.go @@ -5,40 +5,34 @@ import ( "os" ) -const BuilderId = "packer.post-processor.compress" +const BuilderId = "vtolstov.compress" type Artifact struct { - Path string - Provider string + builderId string + dir string + f []string } -func NewArtifact(provider, path string) *Artifact { - return &Artifact{ - Path: path, - Provider: provider, - } -} - -func (*Artifact) BuilderId() string { +func (a *Artifact) BuilderId() string { return BuilderId } -func (self *Artifact) Id() string { - return "" +func (a *Artifact) Files() []string { + return a.f } -func (self *Artifact) Files() []string { - return []string{self.Path} +func (*Artifact) Id() string { + return "COMPRESS" } -func (self *Artifact) String() string { - return fmt.Sprintf("'%s' compressing: %s", self.Provider, self.Path) +func (a *Artifact) String() string { + return fmt.Sprintf("VM compressed files in directory: %s", a.dir) } func (*Artifact) State(name string) interface{} { return nil } -func (self *Artifact) Destroy() error { - return os.Remove(self.Path) +func (a *Artifact) Destroy() error { + return os.RemoveAll(a.dir) } diff --git a/post-processor/compress/benchmark.go b/post-processor/compress/benchmark.go new file mode 100644 index 000000000..a2585bc89 --- /dev/null +++ b/post-processor/compress/benchmark.go @@ -0,0 +1,197 @@ +// +build ignore + +package main + +import ( + "compress/flate" + gzip "compress/gzip" + "io" + "io/ioutil" + "fmt" + "os" + "runtime" + "testing" + + bgzf "github.com/biogo/hts/bgzf" + pgzip "github.com/klauspost/pgzip" + lz4 "github.com/pierrec/lz4" +) + +type Compressor struct { + r *os.File + w *os.File + sr int64 + sw int64 +} + +func (c *Compressor) Close() error { + var err error + + fi, _ := c.w.Stat() + c.sw = fi.Size() + if err = c.w.Close(); err != nil { + return err + } + + fi, _ = c.r.Stat() + c.sr = fi.Size() + if err = c.r.Close(); err != nil { + return err + } + + return nil +} + +func NewCompressor(src, dst string) (*Compressor, error) { + r, err := os.Open(src) + if err != nil { + return nil, err + } + + w, err := os.Create(dst) + if err != nil { + r.Close() + return nil, err + } + + c := &Compressor{r: r, w: w} + return c, nil +} + +func main() { + + runtime.GOMAXPROCS(runtime.NumCPU()) + + var resw testing.BenchmarkResult + var resr testing.BenchmarkResult + + c, err := NewCompressor("/tmp/image.r", "/tmp/image.w") + if err != nil { + panic(err) + } + resw = testing.Benchmark(c.BenchmarkGZIPWriter) + c.w.Seek(0, 0) + resr = testing.Benchmark(c.BenchmarkGZIPReader) + c.Close() + fmt.Printf("gzip:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) + + c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") + if err != nil { + panic(err) + } + resw = testing.Benchmark(c.BenchmarkBGZFWriter) + c.w.Seek(0, 0) + resr = testing.Benchmark(c.BenchmarkBGZFReader) + c.Close() + fmt.Printf("bgzf:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) + + c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") + if err != nil { + panic(err) + } + resw = testing.Benchmark(c.BenchmarkPGZIPWriter) + c.w.Seek(0, 0) + resr = testing.Benchmark(c.BenchmarkPGZIPReader) + c.Close() + fmt.Printf("pgzip:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) + + c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") + if err != nil { + panic(err) + } + resw = testing.Benchmark(c.BenchmarkLZ4Writer) + c.w.Seek(0, 0) + resr = testing.Benchmark(c.BenchmarkLZ4Reader) + c.Close() + fmt.Printf("lz4:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) + +} + +func (c *Compressor) BenchmarkGZIPWriter(b *testing.B) { + cw, _ := gzip.NewWriterLevel(c.w, flate.BestSpeed) + b.ResetTimer() + + _, err := io.Copy(cw, c.r) + if err != nil { + b.Fatal(err) + } + cw.Close() + c.w.Sync() +} + +func (c *Compressor) BenchmarkGZIPReader(b *testing.B) { + cr, _ := gzip.NewReader(c.w) + b.ResetTimer() + + _, err := io.Copy(ioutil.Discard, cr) + if err != nil { + b.Fatal(err) + } +} + +func (c *Compressor) BenchmarkBGZFWriter(b *testing.B) { + cw, _ := bgzf.NewWriterLevel(c.w, flate.BestSpeed, runtime.NumCPU()) + b.ResetTimer() + + _, err := io.Copy(cw, c.r) + if err != nil { + b.Fatal(err) + } + c.w.Sync() +} + +func (c *Compressor) BenchmarkBGZFReader(b *testing.B) { + cr, _ := bgzf.NewReader(c.w, 0) + b.ResetTimer() + + _, err := io.Copy(ioutil.Discard, cr) + if err != nil { + b.Fatal(err) + } +} + +func (c *Compressor) BenchmarkPGZIPWriter(b *testing.B) { + cw, _ := pgzip.NewWriterLevel(c.w, flate.BestSpeed) + b.ResetTimer() + + _, err := io.Copy(cw, c.r) + if err != nil { + b.Fatal(err) + } + cw.Close() + c.w.Sync() +} + +func (c *Compressor) BenchmarkPGZIPReader(b *testing.B) { + cr, _ := pgzip.NewReader(c.w) + b.ResetTimer() + + _, err := io.Copy(ioutil.Discard, cr) + if err != nil { + b.Fatal(err) + } +} + +func (c *Compressor) BenchmarkLZ4Writer(b *testing.B) { + cw := lz4.NewWriter(c.w) +// cw.Header.HighCompression = true + cw.Header.NoChecksum = true + b.ResetTimer() + + _, err := io.Copy(cw, c.r) + if err != nil { + b.Fatal(err) + } + cw.Close() + c.w.Sync() +} + +func (c *Compressor) BenchmarkLZ4Reader(b *testing.B) { + cr := lz4.NewReader(c.w) + b.ResetTimer() + + _, err := io.Copy(ioutil.Discard, cr) + if err != nil { + b.Fatal(err) + } +} diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index ccf300946..f62bea858 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -1,98 +1,417 @@ package compress import ( - "archive/tar" - "compress/gzip" + tar "archive/tar" + zip "archive/zip" + "compress/flate" + gzip "compress/gzip" "fmt" "io" "os" + "path/filepath" + "runtime" + "strings" + "time" + bgzf "github.com/biogo/hts/bgzf" + pgzip "github.com/klauspost/pgzip" "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" - "github.com/mitchellh/packer/template/interpolate" + lz4 "github.com/pierrec/lz4" + "gopkg.in/yaml.v2" ) +type Metadata map[string]Metaitem + +type Metaitem struct { + CompSize int64 `yaml:"compsize"` + OrigSize int64 `yaml:"origsize"` + CompType string `yaml:"comptype"` + CompDate string `yaml:"compdate"` +} + type Config struct { common.PackerConfig `mapstructure:",squash"` - OutputPath string `mapstructure:"output"` - - ctx interpolate.Context + OutputPath string `mapstructure:"output"` + OutputFile string `mapstructure:"file"` + Compression int `mapstructure:"compression"` + Metadata bool `mapstructure:"metadata"` + NumCPU int `mapstructure:"numcpu"` + Format string `mapstructure:"format"` + KeepInputArtifact bool `mapstructure:"keep_input_artifact"` + tpl *packer.ConfigTemplate } -type PostProcessor struct { - config Config +type CompressPostProcessor struct { + cfg Config } -func (self *PostProcessor) Configure(raws ...interface{}) error { - err := config.Decode(&self.config, &config.DecodeOpts{ - Interpolate: true, - InterpolateFilter: &interpolate.RenderFilter{ - Exclude: []string{}, - }, - }, raws...) +func (p *CompressPostProcessor) Configure(raws ...interface{}) error { + p.cfg.Compression = -1 + _, err := common.DecodeConfig(&p.cfg, raws...) if err != nil { return err } + errs := new(packer.MultiError) + + p.cfg.tpl, err = packer.NewConfigTemplate() + if err != nil { + return err + } + p.cfg.tpl.UserVars = p.cfg.PackerUserVars + + if p.cfg.OutputPath == "" { + p.cfg.OutputPath = "packer_{{.BuildName}}_{{.Provider}}" + } + + if err = p.cfg.tpl.Validate(p.cfg.OutputPath); err != nil { + errs = packer.MultiErrorAppend( + errs, fmt.Errorf("Error parsing target template: %s", err)) + } + + templates := map[string]*string{ + "output": &p.cfg.OutputPath, + } + + if p.cfg.Compression > flate.BestCompression { + p.cfg.Compression = flate.BestCompression + } + if p.cfg.Compression == -1 { + p.cfg.Compression = flate.DefaultCompression + } + + if p.cfg.NumCPU < 1 { + p.cfg.NumCPU = runtime.NumCPU() + } + + runtime.GOMAXPROCS(p.cfg.NumCPU) + + for key, ptr := range templates { + if *ptr == "" { + errs = packer.MultiErrorAppend( + errs, fmt.Errorf("%s must be set", key)) + } + + *ptr, err = p.cfg.tpl.Process(*ptr, nil) + if err != nil { + errs = packer.MultiErrorAppend( + errs, fmt.Errorf("Error processing %s: %s", key, err)) + } + } + + if len(errs.Errors) > 0 { + return errs + } + return nil } -func (self *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { - ui.Say(fmt.Sprintf("Creating archive for '%s'", artifact.BuilderId())) +func (p *CompressPostProcessor) fillMetadata(metadata Metadata, files []string) Metadata { + // layout shows by example how the reference time should be represented. + const layout = "2006-01-02_15-04-05" + t := time.Now() - // Create the compressed archive file at the appropriate OutputPath. - fw, err := os.Create(self.config.OutputPath) + if !p.cfg.Metadata { + return metadata + } + for _, f := range files { + if fi, err := os.Stat(f); err != nil { + continue + } else { + if i, ok := metadata[filepath.Base(f)]; !ok { + metadata[filepath.Base(f)] = Metaitem{CompType: p.cfg.Format, OrigSize: fi.Size(), CompDate: t.Format(layout)} + } else { + i.CompSize = fi.Size() + i.CompDate = t.Format(layout) + metadata[filepath.Base(f)] = i + } + } + } + return metadata +} + +func (p *CompressPostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { + newartifact := &Artifact{builderId: artifact.BuilderId(), dir: p.cfg.OutputPath} + var metafile string = filepath.Join(p.cfg.OutputPath, "metadata") + + _, err := os.Stat(newartifact.dir) + if err == nil { + return nil, false, fmt.Errorf("output dir must not exists: %s", err) + } + err = os.MkdirAll(newartifact.dir, 0755) if err != nil { - return nil, false, fmt.Errorf( - "Failed creating file for compressed archive: %s", self.config.OutputPath) + return nil, false, fmt.Errorf("failed to create output: %s", err) + } + + formats := strings.Split(p.cfg.Format, ".") + files := artifact.Files() + + metadata := make(Metadata, 0) + metadata = p.fillMetadata(metadata, files) + + for _, compress := range formats { + switch compress { + case "tar": + files, err = p.cmpTAR(files, filepath.Join(p.cfg.OutputPath, p.cfg.OutputFile)) + metadata = p.fillMetadata(metadata, files) + case "zip": + files, err = p.cmpZIP(files, filepath.Join(p.cfg.OutputPath, p.cfg.OutputFile)) + metadata = p.fillMetadata(metadata, files) + case "pgzip": + files, err = p.cmpPGZIP(files, p.cfg.OutputPath) + metadata = p.fillMetadata(metadata, files) + case "gzip": + files, err = p.cmpGZIP(files, p.cfg.OutputPath) + metadata = p.fillMetadata(metadata, files) + case "bgzf": + files, err = p.cmpBGZF(files, p.cfg.OutputPath) + metadata = p.fillMetadata(metadata, files) + case "lz4": + files, err = p.cmpLZ4(files, p.cfg.OutputPath) + metadata = p.fillMetadata(metadata, files) + case "e2fs": + files, err = p.cmpE2FS(files, filepath.Join(p.cfg.OutputPath, p.cfg.OutputFile)) + metadata = p.fillMetadata(metadata, files) + } + if err != nil { + return nil, false, fmt.Errorf("Failed to compress: %s", err) + } + } + + if p.cfg.Metadata { + fp, err := os.Create(metafile) + if err != nil { + return nil, false, err + } + if buf, err := yaml.Marshal(metadata); err != nil { + fp.Close() + return nil, false, err + } else { + if _, err = fp.Write(buf); err != nil { + fp.Close() + return nil, false, err + } + fp.Close() + } + } + + newartifact.f = append(newartifact.f, files...) + if p.cfg.Metadata { + newartifact.f = append(newartifact.f, metafile) + } + + return newartifact, p.cfg.KeepInputArtifact, nil +} + +func (p *CompressPostProcessor) cmpTAR(src []string, dst string) ([]string, error) { + fw, err := os.Create(dst) + if err != nil { + return nil, fmt.Errorf("tar error: %s", err) } defer fw.Close() - gw := gzip.NewWriter(fw) - defer gw.Close() + tw := tar.NewWriter(fw) + defer tw.Close() - // Iterate through all of the artifact's files and put them into the - // compressed archive using the tar/gzip writers. - for _, path := range artifact.Files() { - fi, err := os.Stat(path) + for _, name := range src { + fi, err := os.Stat(name) if err != nil { - return nil, false, fmt.Errorf( - "Failed stating file: %s", path) + return nil, fmt.Errorf("tar error: %s", err) } - target, _ := os.Readlink(path) + target, _ := os.Readlink(name) header, err := tar.FileInfoHeader(fi, target) if err != nil { - return nil, false, fmt.Errorf( - "Failed creating archive header: %s", path) + return nil, fmt.Errorf("tar erorr: %s", err) } - tw := tar.NewWriter(gw) - defer tw.Close() - - // Write the header first to the archive. This takes partial data - // from the FileInfo that is grabbed by running the stat command. - if err := tw.WriteHeader(header); err != nil { - return nil, false, fmt.Errorf( - "Failed writing archive header: %s", path) + if err = tw.WriteHeader(header); err != nil { + return nil, fmt.Errorf("tar error: %s", err) } - // Open the target file for archiving and compressing. - fr, err := os.Open(path) + fr, err := os.Open(name) if err != nil { - return nil, false, fmt.Errorf( - "Failed opening file '%s' to write compressed archive.", path) + return nil, fmt.Errorf("tar error: %s", err) } - defer fr.Close() if _, err = io.Copy(tw, fr); err != nil { - return nil, false, fmt.Errorf( - "Failed copying file to archive: %s", path) + fr.Close() + return nil, fmt.Errorf("tar error: %s", err) } + fr.Close() } - - return NewArtifact(artifact.BuilderId(), self.config.OutputPath), false, nil + return []string{dst}, nil +} + +func (p *CompressPostProcessor) cmpGZIP(src []string, dst string) ([]string, error) { + var res []string + for _, name := range src { + filename := filepath.Join(dst, filepath.Base(name)) + fw, err := os.Create(filename) + if err != nil { + return nil, fmt.Errorf("gzip error: %s", err) + } + cw, err := gzip.NewWriterLevel(fw, p.cfg.Compression) + if err != nil { + fw.Close() + return nil, fmt.Errorf("gzip error: %s", err) + } + fr, err := os.Open(name) + if err != nil { + cw.Close() + fw.Close() + return nil, fmt.Errorf("gzip error: %s", err) + } + if _, err = io.Copy(cw, fr); err != nil { + cw.Close() + fr.Close() + fw.Close() + return nil, fmt.Errorf("gzip error: %s", err) + } + cw.Close() + fr.Close() + fw.Close() + res = append(res, filename) + } + return res, nil +} + +func (p *CompressPostProcessor) cmpPGZIP(src []string, dst string) ([]string, error) { + var res []string + for _, name := range src { + filename := filepath.Join(dst, filepath.Base(name)) + fw, err := os.Create(filename) + if err != nil { + return nil, fmt.Errorf("pgzip error: %s", err) + } + cw, err := pgzip.NewWriterLevel(fw, p.cfg.Compression) + if err != nil { + fw.Close() + return nil, fmt.Errorf("pgzip error: %s", err) + } + fr, err := os.Open(name) + if err != nil { + cw.Close() + fw.Close() + return nil, fmt.Errorf("pgzip error: %s", err) + } + if _, err = io.Copy(cw, fr); err != nil { + cw.Close() + fr.Close() + fw.Close() + return nil, fmt.Errorf("pgzip error: %s", err) + } + cw.Close() + fr.Close() + fw.Close() + res = append(res, filename) + } + return res, nil +} + +func (p *CompressPostProcessor) cmpLZ4(src []string, dst string) ([]string, error) { + var res []string + for _, name := range src { + filename := filepath.Join(dst, filepath.Base(name)) + fw, err := os.Create(filename) + if err != nil { + return nil, fmt.Errorf("lz4 error: %s", err) + } + cw := lz4.NewWriter(fw) + if err != nil { + fw.Close() + return nil, fmt.Errorf("lz4 error: %s", err) + } + if p.cfg.Compression > flate.DefaultCompression { + cw.Header.HighCompression = true + } + fr, err := os.Open(name) + if err != nil { + cw.Close() + fw.Close() + return nil, fmt.Errorf("lz4 error: %s", err) + } + if _, err = io.Copy(cw, fr); err != nil { + cw.Close() + fr.Close() + fw.Close() + return nil, fmt.Errorf("lz4 error: %s", err) + } + cw.Close() + fr.Close() + fw.Close() + res = append(res, filename) + } + return res, nil +} + +func (p *CompressPostProcessor) cmpBGZF(src []string, dst string) ([]string, error) { + var res []string + for _, name := range src { + filename := filepath.Join(dst, filepath.Base(name)) + fw, err := os.Create(filename) + if err != nil { + return nil, fmt.Errorf("bgzf error: %s", err) + } + + cw, err := bgzf.NewWriterLevel(fw, p.cfg.Compression, runtime.NumCPU()) + if err != nil { + return nil, fmt.Errorf("bgzf error: %s", err) + } + fr, err := os.Open(name) + if err != nil { + cw.Close() + fw.Close() + return nil, fmt.Errorf("bgzf error: %s", err) + } + if _, err = io.Copy(cw, fr); err != nil { + cw.Close() + fr.Close() + fw.Close() + return nil, fmt.Errorf("bgzf error: %s", err) + } + cw.Close() + fr.Close() + fw.Close() + res = append(res, filename) + } + return res, nil +} + +func (p *CompressPostProcessor) cmpE2FS(src []string, dst string) ([]string, error) { + panic("not implemented") +} + +func (p *CompressPostProcessor) cmpZIP(src []string, dst string) ([]string, error) { + fw, err := os.Create(dst) + if err != nil { + return nil, fmt.Errorf("zip error: %s", err) + } + defer fw.Close() + + zw := zip.NewWriter(fw) + defer zw.Close() + + for _, name := range src { + header, err := zw.Create(name) + if err != nil { + return nil, fmt.Errorf("zip erorr: %s", err) + } + + fr, err := os.Open(name) + if err != nil { + return nil, fmt.Errorf("zip error: %s", err) + } + + if _, err = io.Copy(header, fr); err != nil { + fr.Close() + return nil, fmt.Errorf("zip error: %s", err) + } + fr.Close() + } + return []string{dst}, nil + } From ffdd5e6772f2785e4d8ee3ea97d7438a76c1892e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 12:40:20 -0700 Subject: [PATCH 260/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 86e1cbdb1..3a33dbc92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ IMPROVEMENTS: * builder/virtualbox: Added option: `ssh_skip_nat_mapping` to skip the automatic port forward for SSH and to use the guest port directly. [GH-1078] * builder/virtualbox: Added SCSI support + * builder/vmware: Support for additional disks [GH-1382] * command/push: Add `-name` flag for specifying name from CLI [GH-2042] * command/push: Push configuration in templates supports variables [GH-1861] * post-processor/docker-save: Can be chained [GH-2179] From 9da9ce6046b3c40a9fe5ac09a38aea6661325d9d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 12:41:17 -0700 Subject: [PATCH 261/956] vmware/iso: disk_additional_size --- builder/vmware/iso/builder.go | 28 +++++++++---------- .../docs/builders/vmware-iso.html.markdown | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index 065d20603..352d02960 100644 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -35,20 +35,20 @@ type Config struct { vmwcommon.ToolsConfig `mapstructure:",squash"` vmwcommon.VMXConfig `mapstructure:",squash"` - AdditionalDiskSize []uint `mapstructure:"additionaldisk_size"` - DiskName string `mapstructure:"vmdk_name"` - DiskSize uint `mapstructure:"disk_size"` - DiskTypeId string `mapstructure:"disk_type_id"` - FloppyFiles []string `mapstructure:"floppy_files"` - GuestOSType string `mapstructure:"guest_os_type"` - ISOChecksum string `mapstructure:"iso_checksum"` - ISOChecksumType string `mapstructure:"iso_checksum_type"` - ISOUrls []string `mapstructure:"iso_urls"` - Version string `mapstructure:"version"` - VMName string `mapstructure:"vm_name"` - BootCommand []string `mapstructure:"boot_command"` - SkipCompaction bool `mapstructure:"skip_compaction"` - VMXTemplatePath string `mapstructure:"vmx_template_path"` + AdditionalDiskSize []uint `mapstructure:"disk_additional_size"` + DiskName string `mapstructure:"vmdk_name"` + DiskSize uint `mapstructure:"disk_size"` + DiskTypeId string `mapstructure:"disk_type_id"` + FloppyFiles []string `mapstructure:"floppy_files"` + GuestOSType string `mapstructure:"guest_os_type"` + ISOChecksum string `mapstructure:"iso_checksum"` + ISOChecksumType string `mapstructure:"iso_checksum_type"` + ISOUrls []string `mapstructure:"iso_urls"` + Version string `mapstructure:"version"` + VMName string `mapstructure:"vm_name"` + BootCommand []string `mapstructure:"boot_command"` + SkipCompaction bool `mapstructure:"skip_compaction"` + VMXTemplatePath string `mapstructure:"vmx_template_path"` RemoteType string `mapstructure:"remote_type"` RemoteDatastore string `mapstructure:"remote_datastore"` diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index b021ca532..177bfc608 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -72,7 +72,7 @@ each category, the available options are alphabetized and described. ### Optional: -* `additionaldisk_size` (array of integers) - The size(s) of any additional +* `disk_additional_size` (array of integers) - The size(s) of any additional hard disks for the VM in megabytes. If this is not specified then the VM will only contain a primary hard disk. The builder uses expandable, not fixed-size virtual hard disks, so the actual file representing the disk will not use the From c9b413e3d1214f94fae52f91666e277a6c5fa85e Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Wed, 10 Jun 2015 15:18:05 -0500 Subject: [PATCH 262/956] builder/amazon-instance: Omit access, secrety key if using IAM Instance Profile --- builder/amazon/instance/builder.go | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index d164f91ee..91355b913 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -73,15 +73,25 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } if b.config.BundleUploadCommand == "" { - b.config.BundleUploadCommand = "sudo -i -n ec2-upload-bundle " + - "-b {{.BucketName}} " + - "-m {{.ManifestPath}} " + - "-a {{.AccessKey}} " + - "-s {{.SecretKey}} " + - "-d {{.BundleDirectory}} " + - "--batch " + - "--region {{.Region}} " + - "--retry" + if b.config.IamInstanceProfile != "" { + b.config.BundleUploadCommand = "sudo -i -n ec2-upload-bundle " + + "-b {{.BucketName}} " + + "-m {{.ManifestPath}} " + + "-d {{.BundleDirectory}} " + + "--batch " + + "--region {{.Region}} " + + "--retry" + } else { + b.config.BundleUploadCommand = "sudo -i -n ec2-upload-bundle " + + "-b {{.BucketName}} " + + "-m {{.ManifestPath}} " + + "-a {{.AccessKey}} " + + "-s {{.SecretKey}} " + + "-d {{.BundleDirectory}} " + + "--batch " + + "--region {{.Region}} " + + "--retry" + } } if b.config.BundleVolCommand == "" { From c4fc365c657038e8929f17ce49c8db2434d92a12 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 10 Jun 2015 13:33:50 -0700 Subject: [PATCH 263/956] Updated to reflect changes to template code --- plugin/post-processor-compress/main.go | 2 +- post-processor/compress/post-processor.go | 26 +++++++++++------------ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/plugin/post-processor-compress/main.go b/plugin/post-processor-compress/main.go index 3acc85228..15bf6f223 100644 --- a/plugin/post-processor-compress/main.go +++ b/plugin/post-processor-compress/main.go @@ -10,6 +10,6 @@ func main() { if err != nil { panic(err) } - server.RegisterPostProcessor(new(compress.PostProcessor)) + server.RegisterPostProcessor(new(compress.CompressPostProcessor)) server.Serve() } diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index f62bea858..9751a24a8 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -16,7 +16,9 @@ import ( bgzf "github.com/biogo/hts/bgzf" pgzip "github.com/klauspost/pgzip" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" lz4 "github.com/pierrec/lz4" "gopkg.in/yaml.v2" ) @@ -40,7 +42,7 @@ type Config struct { NumCPU int `mapstructure:"numcpu"` Format string `mapstructure:"format"` KeepInputArtifact bool `mapstructure:"keep_input_artifact"` - tpl *packer.ConfigTemplate + ctx *interpolate.Context } type CompressPostProcessor struct { @@ -49,24 +51,22 @@ type CompressPostProcessor struct { func (p *CompressPostProcessor) Configure(raws ...interface{}) error { p.cfg.Compression = -1 - _, err := common.DecodeConfig(&p.cfg, raws...) - if err != nil { - return err - } + err := config.Decode(&p.cfg, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + // TODO figure out if something needs to go here. + }, + }, + }, raws...) errs := new(packer.MultiError) - p.cfg.tpl, err = packer.NewConfigTemplate() - if err != nil { - return err - } - p.cfg.tpl.UserVars = p.cfg.PackerUserVars - if p.cfg.OutputPath == "" { p.cfg.OutputPath = "packer_{{.BuildName}}_{{.Provider}}" } - if err = p.cfg.tpl.Validate(p.cfg.OutputPath); err != nil { + if err = interpolate.Validate(p.cfg.OutputPath, p.cfg.ctx); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing target template: %s", err)) } @@ -94,7 +94,7 @@ func (p *CompressPostProcessor) Configure(raws ...interface{}) error { errs, fmt.Errorf("%s must be set", key)) } - *ptr, err = p.cfg.tpl.Process(*ptr, nil) + *ptr, err = interpolate.Render(p.cfg.OutputPath, p.cfg.ctx) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", key, err)) From 42d749ab5f38d01b8e89b781d7056bc949c32f91 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 10 Jun 2015 13:46:21 -0700 Subject: [PATCH 264/956] Light style and typo cleanup --- plugin/post-processor-compress/main.go | 2 +- post-processor/compress/post-processor.go | 28 +++++++++++------------ 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/plugin/post-processor-compress/main.go b/plugin/post-processor-compress/main.go index 15bf6f223..3acc85228 100644 --- a/plugin/post-processor-compress/main.go +++ b/plugin/post-processor-compress/main.go @@ -10,6 +10,6 @@ func main() { if err != nil { panic(err) } - server.RegisterPostProcessor(new(compress.CompressPostProcessor)) + server.RegisterPostProcessor(new(compress.PostProcessor)) server.Serve() } diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 9751a24a8..74228d909 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -45,11 +45,11 @@ type Config struct { ctx *interpolate.Context } -type CompressPostProcessor struct { +type PostProcessor struct { cfg Config } -func (p *CompressPostProcessor) Configure(raws ...interface{}) error { +func (p *PostProcessor) Configure(raws ...interface{}) error { p.cfg.Compression = -1 err := config.Decode(&p.cfg, &config.DecodeOpts{ Interpolate: true, @@ -109,7 +109,7 @@ func (p *CompressPostProcessor) Configure(raws ...interface{}) error { } -func (p *CompressPostProcessor) fillMetadata(metadata Metadata, files []string) Metadata { +func (p *PostProcessor) fillMetadata(metadata Metadata, files []string) Metadata { // layout shows by example how the reference time should be represented. const layout = "2006-01-02_15-04-05" t := time.Now() @@ -133,9 +133,9 @@ func (p *CompressPostProcessor) fillMetadata(metadata Metadata, files []string) return metadata } -func (p *CompressPostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { newartifact := &Artifact{builderId: artifact.BuilderId(), dir: p.cfg.OutputPath} - var metafile string = filepath.Join(p.cfg.OutputPath, "metadata") + metafile := filepath.Join(p.cfg.OutputPath, "metadata") _, err := os.Stat(newartifact.dir) if err == nil { @@ -206,7 +206,7 @@ func (p *CompressPostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifa return newartifact, p.cfg.KeepInputArtifact, nil } -func (p *CompressPostProcessor) cmpTAR(src []string, dst string) ([]string, error) { +func (p *PostProcessor) cmpTAR(src []string, dst string) ([]string, error) { fw, err := os.Create(dst) if err != nil { return nil, fmt.Errorf("tar error: %s", err) @@ -225,7 +225,7 @@ func (p *CompressPostProcessor) cmpTAR(src []string, dst string) ([]string, erro target, _ := os.Readlink(name) header, err := tar.FileInfoHeader(fi, target) if err != nil { - return nil, fmt.Errorf("tar erorr: %s", err) + return nil, fmt.Errorf("tar error: %s", err) } if err = tw.WriteHeader(header); err != nil { @@ -246,7 +246,7 @@ func (p *CompressPostProcessor) cmpTAR(src []string, dst string) ([]string, erro return []string{dst}, nil } -func (p *CompressPostProcessor) cmpGZIP(src []string, dst string) ([]string, error) { +func (p *PostProcessor) cmpGZIP(src []string, dst string) ([]string, error) { var res []string for _, name := range src { filename := filepath.Join(dst, filepath.Base(name)) @@ -279,7 +279,7 @@ func (p *CompressPostProcessor) cmpGZIP(src []string, dst string) ([]string, err return res, nil } -func (p *CompressPostProcessor) cmpPGZIP(src []string, dst string) ([]string, error) { +func (p *PostProcessor) cmpPGZIP(src []string, dst string) ([]string, error) { var res []string for _, name := range src { filename := filepath.Join(dst, filepath.Base(name)) @@ -312,7 +312,7 @@ func (p *CompressPostProcessor) cmpPGZIP(src []string, dst string) ([]string, er return res, nil } -func (p *CompressPostProcessor) cmpLZ4(src []string, dst string) ([]string, error) { +func (p *PostProcessor) cmpLZ4(src []string, dst string) ([]string, error) { var res []string for _, name := range src { filename := filepath.Join(dst, filepath.Base(name)) @@ -348,7 +348,7 @@ func (p *CompressPostProcessor) cmpLZ4(src []string, dst string) ([]string, erro return res, nil } -func (p *CompressPostProcessor) cmpBGZF(src []string, dst string) ([]string, error) { +func (p *PostProcessor) cmpBGZF(src []string, dst string) ([]string, error) { var res []string for _, name := range src { filename := filepath.Join(dst, filepath.Base(name)) @@ -381,11 +381,11 @@ func (p *CompressPostProcessor) cmpBGZF(src []string, dst string) ([]string, err return res, nil } -func (p *CompressPostProcessor) cmpE2FS(src []string, dst string) ([]string, error) { +func (p *PostProcessor) cmpE2FS(src []string, dst string) ([]string, error) { panic("not implemented") } -func (p *CompressPostProcessor) cmpZIP(src []string, dst string) ([]string, error) { +func (p *PostProcessor) cmpZIP(src []string, dst string) ([]string, error) { fw, err := os.Create(dst) if err != nil { return nil, fmt.Errorf("zip error: %s", err) @@ -398,7 +398,7 @@ func (p *CompressPostProcessor) cmpZIP(src []string, dst string) ([]string, erro for _, name := range src { header, err := zw.Create(name) if err != nil { - return nil, fmt.Errorf("zip erorr: %s", err) + return nil, fmt.Errorf("zip error: %s", err) } fr, err := os.Open(name) From d9c48e82fbc06d42d2b840eded67d344874ee53b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 14:02:06 -0700 Subject: [PATCH 265/956] builder/digitalocean: switch to new lib --- builder/digitalocean/api.go | 76 ---- builder/digitalocean/api_v1.go | 382 ---------------- builder/digitalocean/api_v2.go | 462 -------------------- builder/digitalocean/artifact.go | 9 +- builder/digitalocean/builder.go | 196 +-------- builder/digitalocean/builder_acc_test.go | 30 ++ builder/digitalocean/builder_test.go | 84 ---- builder/digitalocean/config.go | 140 ++++++ builder/digitalocean/step_create_droplet.go | 39 +- builder/digitalocean/step_create_ssh_key.go | 26 +- builder/digitalocean/step_droplet_info.go | 20 +- builder/digitalocean/step_power_off.go | 11 +- builder/digitalocean/step_shutdown.go | 9 +- builder/digitalocean/step_snapshot.go | 14 +- builder/digitalocean/token_source.go | 15 + builder/digitalocean/wait.go | 10 +- 16 files changed, 277 insertions(+), 1246 deletions(-) delete mode 100644 builder/digitalocean/api.go delete mode 100644 builder/digitalocean/api_v1.go delete mode 100644 builder/digitalocean/api_v2.go create mode 100644 builder/digitalocean/builder_acc_test.go create mode 100644 builder/digitalocean/config.go create mode 100644 builder/digitalocean/token_source.go diff --git a/builder/digitalocean/api.go b/builder/digitalocean/api.go deleted file mode 100644 index 87339ffc9..000000000 --- a/builder/digitalocean/api.go +++ /dev/null @@ -1,76 +0,0 @@ -// All of the methods used to communicate with the digital_ocean API -// are here. Their API is on a path to V2, so just plain JSON is used -// in place of a proper client library for now. - -package digitalocean - -type Region struct { - Slug string `json:"slug"` - Name string `json:"name"` - - // v1 only - Id uint `json:"id,omitempty"` - - // v2 only - Sizes []string `json:"sizes,omitempty"` - Available bool `json:"available,omitempty"` - Features []string `json:"features,omitempty"` -} - -type RegionsResp struct { - Regions []Region -} - -type Size struct { - Slug string `json:"slug"` - - // v1 only - Id uint `json:"id,omitempty"` - Name string `json:"name,omitempty"` - - // v2 only - Memory uint `json:"memory,omitempty"` - VCPUS uint `json:"vcpus,omitempty"` - Disk uint `json:"disk,omitempty"` - Transfer float64 `json:"transfer,omitempty"` - PriceMonthly float64 `json:"price_monthly,omitempty"` - PriceHourly float64 `json:"price_hourly,omitempty"` -} - -type SizesResp struct { - Sizes []Size -} - -type Image struct { - Id uint `json:"id"` - Name string `json:"name"` - Slug string `json:"slug"` - Distribution string `json:"distribution"` - - // v2 only - Public bool `json:"public,omitempty"` - ActionIds []string `json:"action_ids,omitempty"` - CreatedAt string `json:"created_at,omitempty"` -} - -type ImagesResp struct { - Images []Image -} - -type DigitalOceanClient interface { - CreateKey(string, string) (uint, error) - DestroyKey(uint) error - CreateDroplet(string, string, string, string, uint, bool) (uint, error) - DestroyDroplet(uint) error - PowerOffDroplet(uint) error - ShutdownDroplet(uint) error - CreateSnapshot(uint, string) error - Images() ([]Image, error) - DestroyImage(uint) error - DropletStatus(uint) (string, string, error) - Image(string) (Image, error) - Regions() ([]Region, error) - Region(string) (Region, error) - Sizes() ([]Size, error) - Size(string) (Size, error) -} diff --git a/builder/digitalocean/api_v1.go b/builder/digitalocean/api_v1.go deleted file mode 100644 index 23746d11f..000000000 --- a/builder/digitalocean/api_v1.go +++ /dev/null @@ -1,382 +0,0 @@ -// All of the methods used to communicate with the digital_ocean API -// are here. Their API is on a path to V2, so just plain JSON is used -// in place of a proper client library for now. - -package digitalocean - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/mitchellh/mapstructure" -) - -type DigitalOceanClientV1 struct { - // The http client for communicating - client *http.Client - - // Credentials - ClientID string - APIKey string - // The base URL of the API - APIURL string -} - -// Creates a new client for communicating with DO -func DigitalOceanClientNewV1(client string, key string, url string) *DigitalOceanClientV1 { - c := &DigitalOceanClientV1{ - client: &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - }, - }, - APIURL: url, - ClientID: client, - APIKey: key, - } - return c -} - -// Creates an SSH Key and returns it's id -func (d DigitalOceanClientV1) CreateKey(name string, pub string) (uint, error) { - params := url.Values{} - params.Set("name", name) - params.Set("ssh_pub_key", pub) - - body, err := NewRequestV1(d, "ssh_keys/new", params) - if err != nil { - return 0, err - } - - // Read the SSH key's ID we just created - key := body["ssh_key"].(map[string]interface{}) - keyId := key["id"].(float64) - return uint(keyId), nil -} - -// Destroys an SSH key -func (d DigitalOceanClientV1) DestroyKey(id uint) error { - path := fmt.Sprintf("ssh_keys/%v/destroy", id) - _, err := NewRequestV1(d, path, url.Values{}) - return err -} - -// Creates a droplet and returns it's id -func (d DigitalOceanClientV1) CreateDroplet(name string, size string, image string, region string, keyId uint, privateNetworking bool) (uint, error) { - params := url.Values{} - params.Set("name", name) - - found_size, err := d.Size(size) - if err != nil { - return 0, fmt.Errorf("Invalid size or lookup failure: '%s': %s", size, err) - } - - found_image, err := d.Image(image) - if err != nil { - return 0, fmt.Errorf("Invalid image or lookup failure: '%s': %s", image, err) - } - - found_region, err := d.Region(region) - if err != nil { - return 0, fmt.Errorf("Invalid region or lookup failure: '%s': %s", region, err) - } - - params.Set("size_slug", found_size.Slug) - params.Set("image_slug", found_image.Slug) - params.Set("region_slug", found_region.Slug) - params.Set("ssh_key_ids", fmt.Sprintf("%v", keyId)) - params.Set("private_networking", fmt.Sprintf("%v", privateNetworking)) - - body, err := NewRequestV1(d, "droplets/new", params) - if err != nil { - return 0, err - } - - // Read the Droplets ID - droplet := body["droplet"].(map[string]interface{}) - dropletId := droplet["id"].(float64) - return uint(dropletId), err -} - -// Destroys a droplet -func (d DigitalOceanClientV1) DestroyDroplet(id uint) error { - path := fmt.Sprintf("droplets/%v/destroy", id) - _, err := NewRequestV1(d, path, url.Values{}) - return err -} - -// Powers off a droplet -func (d DigitalOceanClientV1) PowerOffDroplet(id uint) error { - path := fmt.Sprintf("droplets/%v/power_off", id) - _, err := NewRequestV1(d, path, url.Values{}) - return err -} - -// Shutsdown a droplet. This is a "soft" shutdown. -func (d DigitalOceanClientV1) ShutdownDroplet(id uint) error { - path := fmt.Sprintf("droplets/%v/shutdown", id) - _, err := NewRequestV1(d, path, url.Values{}) - return err -} - -// Creates a snaphot of a droplet by it's ID -func (d DigitalOceanClientV1) CreateSnapshot(id uint, name string) error { - path := fmt.Sprintf("droplets/%v/snapshot", id) - - params := url.Values{} - params.Set("name", name) - - _, err := NewRequestV1(d, path, params) - - return err -} - -// Returns all available images. -func (d DigitalOceanClientV1) Images() ([]Image, error) { - resp, err := NewRequestV1(d, "images", url.Values{}) - if err != nil { - return nil, err - } - - var result ImagesResp - if err := mapstructure.Decode(resp, &result); err != nil { - return nil, err - } - - return result.Images, nil -} - -// Destroys an image by its ID. -func (d DigitalOceanClientV1) DestroyImage(id uint) error { - path := fmt.Sprintf("images/%d/destroy", id) - _, err := NewRequestV1(d, path, url.Values{}) - return err -} - -// Returns DO's string representation of status "off" "new" "active" etc. -func (d DigitalOceanClientV1) DropletStatus(id uint) (string, string, error) { - path := fmt.Sprintf("droplets/%v", id) - - body, err := NewRequestV1(d, path, url.Values{}) - if err != nil { - return "", "", err - } - - var ip string - - // Read the droplet's "status" - droplet := body["droplet"].(map[string]interface{}) - status := droplet["status"].(string) - - if droplet["ip_address"] != nil { - ip = droplet["ip_address"].(string) - } - - return ip, status, err -} - -// Sends an api request and returns a generic map[string]interface of -// the response. -func NewRequestV1(d DigitalOceanClientV1, path string, params url.Values) (map[string]interface{}, error) { - client := d.client - - // Add the authentication parameters - params.Set("client_id", d.ClientID) - params.Set("api_key", d.APIKey) - - url := fmt.Sprintf("%s/%s?%s", d.APIURL, path, params.Encode()) - - // Do some basic scrubbing so sensitive information doesn't appear in logs - scrubbedUrl := strings.Replace(url, d.ClientID, "CLIENT_ID", -1) - scrubbedUrl = strings.Replace(scrubbedUrl, d.APIKey, "API_KEY", -1) - log.Printf("sending new request to digitalocean: %s", scrubbedUrl) - - var lastErr error - for attempts := 1; attempts < 10; attempts++ { - resp, err := client.Get(url) - if err != nil { - return nil, err - } - - body, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return nil, err - } - - log.Printf("response from digitalocean: %s", body) - - var decodedResponse map[string]interface{} - err = json.Unmarshal(body, &decodedResponse) - if err != nil { - err = errors.New(fmt.Sprintf("Failed to decode JSON response (HTTP %v) from DigitalOcean: %s", - resp.StatusCode, body)) - return decodedResponse, err - } - - // Check for errors sent by digitalocean - status := decodedResponse["status"].(string) - if status == "OK" { - return decodedResponse, nil - } - - if status == "ERROR" { - statusRaw, ok := decodedResponse["error_message"] - if ok { - status = statusRaw.(string) - } else { - status = fmt.Sprintf( - "Unknown error. Full response body: %s", body) - } - } - - lastErr = errors.New(fmt.Sprintf("Received error from DigitalOcean (%d): %s", - resp.StatusCode, status)) - log.Println(lastErr) - if strings.Contains(status, "a pending event") { - // Retry, DigitalOcean sends these dumb "pending event" - // errors all the time. - time.Sleep(5 * time.Second) - continue - } - - // Some other kind of error. Just return. - return decodedResponse, lastErr - } - - return nil, lastErr -} - -func (d DigitalOceanClientV1) Image(slug_or_name_or_id string) (Image, error) { - images, err := d.Images() - if err != nil { - return Image{}, err - } - - for _, image := range images { - if strings.EqualFold(image.Slug, slug_or_name_or_id) { - return image, nil - } - } - - for _, image := range images { - if strings.EqualFold(image.Name, slug_or_name_or_id) { - return image, nil - } - } - - for _, image := range images { - id, err := strconv.Atoi(slug_or_name_or_id) - if err == nil { - if image.Id == uint(id) { - return image, nil - } - } - } - - err = errors.New(fmt.Sprintf("Unknown image '%v'", slug_or_name_or_id)) - - return Image{}, err -} - -// Returns all available regions. -func (d DigitalOceanClientV1) Regions() ([]Region, error) { - resp, err := NewRequestV1(d, "regions", url.Values{}) - if err != nil { - return nil, err - } - - var result RegionsResp - if err := mapstructure.Decode(resp, &result); err != nil { - return nil, err - } - - return result.Regions, nil -} - -func (d DigitalOceanClientV1) Region(slug_or_name_or_id string) (Region, error) { - regions, err := d.Regions() - if err != nil { - return Region{}, err - } - - for _, region := range regions { - if strings.EqualFold(region.Slug, slug_or_name_or_id) { - return region, nil - } - } - - for _, region := range regions { - if strings.EqualFold(region.Name, slug_or_name_or_id) { - return region, nil - } - } - - for _, region := range regions { - id, err := strconv.Atoi(slug_or_name_or_id) - if err == nil { - if region.Id == uint(id) { - return region, nil - } - } - } - - err = errors.New(fmt.Sprintf("Unknown region '%v'", slug_or_name_or_id)) - - return Region{}, err -} - -// Returns all available sizes. -func (d DigitalOceanClientV1) Sizes() ([]Size, error) { - resp, err := NewRequestV1(d, "sizes", url.Values{}) - if err != nil { - return nil, err - } - - var result SizesResp - if err := mapstructure.Decode(resp, &result); err != nil { - return nil, err - } - - return result.Sizes, nil -} - -func (d DigitalOceanClientV1) Size(slug_or_name_or_id string) (Size, error) { - sizes, err := d.Sizes() - if err != nil { - return Size{}, err - } - - for _, size := range sizes { - if strings.EqualFold(size.Slug, slug_or_name_or_id) { - return size, nil - } - } - - for _, size := range sizes { - if strings.EqualFold(size.Name, slug_or_name_or_id) { - return size, nil - } - } - - for _, size := range sizes { - id, err := strconv.Atoi(slug_or_name_or_id) - if err == nil { - if size.Id == uint(id) { - return size, nil - } - } - } - - err = errors.New(fmt.Sprintf("Unknown size '%v'", slug_or_name_or_id)) - - return Size{}, err -} diff --git a/builder/digitalocean/api_v2.go b/builder/digitalocean/api_v2.go deleted file mode 100644 index 46454a9f8..000000000 --- a/builder/digitalocean/api_v2.go +++ /dev/null @@ -1,462 +0,0 @@ -// are here. Their API is on a path to V2, so just plain JSON is used -// in place of a proper client library for now. - -package digitalocean - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "net/http" - "strconv" - "strings" -) - -type DigitalOceanClientV2 struct { - // The http client for communicating - client *http.Client - - // Credentials - APIToken string - - // The base URL of the API - APIURL string -} - -// Creates a new client for communicating with DO -func DigitalOceanClientNewV2(token string, url string) *DigitalOceanClientV2 { - c := &DigitalOceanClientV2{ - client: &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - }, - }, - APIURL: url, - APIToken: token, - } - return c -} - -// Creates an SSH Key and returns it's id -func (d DigitalOceanClientV2) CreateKey(name string, pub string) (uint, error) { - type KeyReq struct { - Name string `json:"name"` - PublicKey string `json:"public_key"` - } - type KeyRes struct { - SSHKey struct { - Id uint - Name string - Fingerprint string - PublicKey string `json:"public_key"` - } `json:"ssh_key"` - } - req := &KeyReq{Name: name, PublicKey: pub} - res := KeyRes{} - err := NewRequestV2(d, "v2/account/keys", "POST", req, &res) - if err != nil { - return 0, err - } - - return res.SSHKey.Id, err -} - -// Destroys an SSH key -func (d DigitalOceanClientV2) DestroyKey(id uint) error { - path := fmt.Sprintf("v2/account/keys/%v", id) - return NewRequestV2(d, path, "DELETE", nil, nil) -} - -// Creates a droplet and returns it's id -func (d DigitalOceanClientV2) CreateDroplet(name string, size string, image string, region string, keyId uint, privateNetworking bool) (uint, error) { - type DropletReq struct { - Name string `json:"name"` - Region string `json:"region"` - Size string `json:"size"` - Image string `json:"image"` - SSHKeys []string `json:"ssh_keys,omitempty"` - Backups bool `json:"backups,omitempty"` - IPv6 bool `json:"ipv6,omitempty"` - PrivateNetworking bool `json:"private_networking,omitempty"` - } - type DropletRes struct { - Droplet struct { - Id uint - Name string - Memory uint - VCPUS uint `json:"vcpus"` - Disk uint - Region Region - Image Image - Size Size - Locked bool - CreateAt string `json:"created_at"` - Status string - Networks struct { - V4 []struct { - IPAddr string `json:"ip_address"` - Netmask string - Gateway string - Type string - } `json:"v4,omitempty"` - V6 []struct { - IPAddr string `json:"ip_address"` - CIDR uint `json:"cidr"` - Gateway string - Type string - } `json:"v6,omitempty"` - } - Kernel struct { - Id uint - Name string - Version string - } - BackupIds []uint - SnapshotIds []uint - ActionIds []uint - Features []string `json:"features,omitempty"` - } - } - req := &DropletReq{Name: name} - res := DropletRes{} - - found_size, err := d.Size(size) - if err != nil { - return 0, fmt.Errorf("Invalid size or lookup failure: '%s': %s", size, err) - } - - found_image, err := d.Image(image) - if err != nil { - return 0, fmt.Errorf("Invalid image or lookup failure: '%s': %s", image, err) - } - - found_region, err := d.Region(region) - if err != nil { - return 0, fmt.Errorf("Invalid region or lookup failure: '%s': %s", region, err) - } - - if found_image.Slug == "" { - req.Image = strconv.Itoa(int(found_image.Id)) - } else { - req.Image = found_image.Slug - } - - req.Size = found_size.Slug - req.Region = found_region.Slug - req.SSHKeys = []string{fmt.Sprintf("%v", keyId)} - req.PrivateNetworking = privateNetworking - - err = NewRequestV2(d, "v2/droplets", "POST", req, &res) - if err != nil { - return 0, err - } - - return res.Droplet.Id, err -} - -// Destroys a droplet -func (d DigitalOceanClientV2) DestroyDroplet(id uint) error { - path := fmt.Sprintf("v2/droplets/%v", id) - return NewRequestV2(d, path, "DELETE", nil, nil) -} - -// Powers off a droplet -func (d DigitalOceanClientV2) PowerOffDroplet(id uint) error { - type ActionReq struct { - Type string `json:"type"` - } - type ActionRes struct { - } - req := &ActionReq{Type: "power_off"} - path := fmt.Sprintf("v2/droplets/%v/actions", id) - return NewRequestV2(d, path, "POST", req, nil) -} - -// Shutsdown a droplet. This is a "soft" shutdown. -func (d DigitalOceanClientV2) ShutdownDroplet(id uint) error { - type ActionReq struct { - Type string `json:"type"` - } - type ActionRes struct { - } - req := &ActionReq{Type: "shutdown"} - - path := fmt.Sprintf("v2/droplets/%v/actions", id) - return NewRequestV2(d, path, "POST", req, nil) -} - -// Creates a snaphot of a droplet by it's ID -func (d DigitalOceanClientV2) CreateSnapshot(id uint, name string) error { - type ActionReq struct { - Type string `json:"type"` - Name string `json:"name"` - } - type ActionRes struct { - } - req := &ActionReq{Type: "snapshot", Name: name} - path := fmt.Sprintf("v2/droplets/%v/actions", id) - return NewRequestV2(d, path, "POST", req, nil) -} - -// Returns all available images. -func (d DigitalOceanClientV2) Images() ([]Image, error) { - res := ImagesResp{} - - err := NewRequestV2(d, "v2/images?per_page=200", "GET", nil, &res) - if err != nil { - return nil, err - } - - return res.Images, nil -} - -// Destroys an image by its ID. -func (d DigitalOceanClientV2) DestroyImage(id uint) error { - path := fmt.Sprintf("v2/images/%d", id) - return NewRequestV2(d, path, "DELETE", nil, nil) -} - -// Returns DO's string representation of status "off" "new" "active" etc. -func (d DigitalOceanClientV2) DropletStatus(id uint) (string, string, error) { - path := fmt.Sprintf("v2/droplets/%v", id) - type DropletRes struct { - Droplet struct { - Id uint - Name string - Memory uint - VCPUS uint `json:"vcpus"` - Disk uint - Region Region - Image Image - Size Size - Locked bool - CreateAt string `json:"created_at"` - Status string - Networks struct { - V4 []struct { - IPAddr string `json:"ip_address"` - Netmask string - Gateway string - Type string - } `json:"v4,omitempty"` - V6 []struct { - IPAddr string `json:"ip_address"` - CIDR uint `json:"cidr"` - Gateway string - Type string - } `json:"v6,omitempty"` - } - Kernel struct { - Id uint - Name string - Version string - } - BackupIds []uint - SnapshotIds []uint - ActionIds []uint - Features []string `json:"features,omitempty"` - } - } - res := DropletRes{} - err := NewRequestV2(d, path, "GET", nil, &res) - if err != nil { - return "", "", err - } - var ip string - - for _, n := range res.Droplet.Networks.V4 { - if n.Type == "public" { - ip = n.IPAddr - } - } - - return ip, res.Droplet.Status, err -} - -// Sends an api request and returns a generic map[string]interface of -// the response. -func NewRequestV2(d DigitalOceanClientV2, path string, method string, req interface{}, res interface{}) error { - var err error - var request *http.Request - - client := d.client - - buf := new(bytes.Buffer) - // Add the authentication parameters - url := fmt.Sprintf("%s/%s", d.APIURL, path) - if req != nil { - enc := json.NewEncoder(buf) - enc.Encode(req) - defer buf.Reset() - request, err = http.NewRequest(method, url, buf) - request.Header.Add("Content-Type", "application/json") - } else { - request, err = http.NewRequest(method, url, nil) - } - if err != nil { - return err - } - - // Add the authentication parameters - request.Header.Add("Authorization", "Bearer "+d.APIToken) - if buf != nil { - log.Printf("sending new request to digitalocean: %s buffer: %s", url, buf) - } else { - log.Printf("sending new request to digitalocean: %s", url) - } - resp, err := client.Do(request) - if err != nil { - return err - } - - if method == "DELETE" && resp.StatusCode == 204 { - if resp.Body != nil { - resp.Body.Close() - } - return nil - } - - if resp.Body == nil { - return errors.New("Request returned empty body") - } - - body, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return err - } - - log.Printf("response from digitalocean: %s", body) - - err = json.Unmarshal(body, &res) - if err != nil { - return errors.New(fmt.Sprintf("Failed to decode JSON response %s (HTTP %v) from DigitalOcean: %s", err.Error(), - resp.StatusCode, body)) - } - switch resp.StatusCode { - case 403, 401, 429, 422, 404, 503, 500: - return errors.New(fmt.Sprintf("digitalocean request error: %+v", res)) - } - return nil -} - -func (d DigitalOceanClientV2) Image(slug_or_name_or_id string) (Image, error) { - images, err := d.Images() - if err != nil { - return Image{}, err - } - - for _, image := range images { - if strings.EqualFold(image.Slug, slug_or_name_or_id) { - return image, nil - } - } - - for _, image := range images { - if strings.EqualFold(image.Name, slug_or_name_or_id) { - return image, nil - } - } - - for _, image := range images { - id, err := strconv.Atoi(slug_or_name_or_id) - if err == nil { - if image.Id == uint(id) { - return image, nil - } - } - } - - err = errors.New(fmt.Sprintf("Unknown image '%v'", slug_or_name_or_id)) - - return Image{}, err -} - -// Returns all available regions. -func (d DigitalOceanClientV2) Regions() ([]Region, error) { - res := RegionsResp{} - err := NewRequestV2(d, "v2/regions?per_page=200", "GET", nil, &res) - if err != nil { - return nil, err - } - - return res.Regions, nil -} - -func (d DigitalOceanClientV2) Region(slug_or_name_or_id string) (Region, error) { - regions, err := d.Regions() - if err != nil { - return Region{}, err - } - - for _, region := range regions { - if strings.EqualFold(region.Slug, slug_or_name_or_id) { - return region, nil - } - } - - for _, region := range regions { - if strings.EqualFold(region.Name, slug_or_name_or_id) { - return region, nil - } - } - - for _, region := range regions { - id, err := strconv.Atoi(slug_or_name_or_id) - if err == nil { - if region.Id == uint(id) { - return region, nil - } - } - } - - err = errors.New(fmt.Sprintf("Unknown region '%v'", slug_or_name_or_id)) - - return Region{}, err -} - -// Returns all available sizes. -func (d DigitalOceanClientV2) Sizes() ([]Size, error) { - res := SizesResp{} - err := NewRequestV2(d, "v2/sizes?per_page=200", "GET", nil, &res) - if err != nil { - return nil, err - } - - return res.Sizes, nil -} - -func (d DigitalOceanClientV2) Size(slug_or_name_or_id string) (Size, error) { - sizes, err := d.Sizes() - if err != nil { - return Size{}, err - } - - for _, size := range sizes { - if strings.EqualFold(size.Slug, slug_or_name_or_id) { - return size, nil - } - } - - for _, size := range sizes { - if strings.EqualFold(size.Name, slug_or_name_or_id) { - return size, nil - } - } - - for _, size := range sizes { - id, err := strconv.Atoi(slug_or_name_or_id) - if err == nil { - if size.Id == uint(id) { - return size, nil - } - } - } - - err = errors.New(fmt.Sprintf("Unknown size '%v'", slug_or_name_or_id)) - - return Size{}, err -} diff --git a/builder/digitalocean/artifact.go b/builder/digitalocean/artifact.go index d1d878193..3b6a05e53 100644 --- a/builder/digitalocean/artifact.go +++ b/builder/digitalocean/artifact.go @@ -4,6 +4,8 @@ import ( "fmt" "log" "strconv" + + "github.com/digitalocean/godo" ) type Artifact struct { @@ -11,13 +13,13 @@ type Artifact struct { snapshotName string // The ID of the image - snapshotId uint + snapshotId int // The name of the region regionName string // The client for making API calls - client DigitalOceanClient + client *godo.Client } func (*Artifact) BuilderId() string { @@ -43,5 +45,6 @@ func (a *Artifact) State(name string) interface{} { func (a *Artifact) Destroy() error { log.Printf("Destroying image: %d (%s)", a.snapshotId, a.snapshotName) - return a.client.DestroyImage(a.snapshotId) + _, err := a.client.Images.Delete(a.snapshotId) + return err } diff --git a/builder/digitalocean/builder.go b/builder/digitalocean/builder.go index 3292bea10..3ba7074a2 100644 --- a/builder/digitalocean/builder.go +++ b/builder/digitalocean/builder.go @@ -4,18 +4,14 @@ package digitalocean import ( - "errors" - "fmt" "log" - "os" "time" + "github.com/digitalocean/godo" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common" - "github.com/mitchellh/packer/common/uuid" - "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" - "github.com/mitchellh/packer/template/interpolate" + "golang.org/x/oauth2" ) // see https://api.digitalocean.com/images/?client_id=[client_id]&api_key=[api_key] @@ -33,179 +29,25 @@ const DefaultSize = "512mb" // The unique id for the builder const BuilderId = "pearkes.digitalocean" -// Configuration tells the builder the credentials -// to use while communicating with DO and describes the image -// you are creating -type Config struct { - common.PackerConfig `mapstructure:",squash"` - - ClientID string `mapstructure:"client_id"` - APIKey string `mapstructure:"api_key"` - APIURL string `mapstructure:"api_url"` - APIToken string `mapstructure:"api_token"` - RegionID uint `mapstructure:"region_id"` - SizeID uint `mapstructure:"size_id"` - ImageID uint `mapstructure:"image_id"` - - Region string `mapstructure:"region"` - Size string `mapstructure:"size"` - Image string `mapstructure:"image"` - - PrivateNetworking bool `mapstructure:"private_networking"` - SnapshotName string `mapstructure:"snapshot_name"` - DropletName string `mapstructure:"droplet_name"` - SSHUsername string `mapstructure:"ssh_username"` - SSHPort uint `mapstructure:"ssh_port"` - - RawSSHTimeout string `mapstructure:"ssh_timeout"` - RawStateTimeout string `mapstructure:"state_timeout"` - - // These are unexported since they're set by other fields - // being set. - sshTimeout time.Duration - stateTimeout time.Duration - - ctx *interpolate.Context -} - type Builder struct { config Config runner multistep.Runner } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - err := config.Decode(&b.config, &config.DecodeOpts{ - Interpolate: true, - }, raws...) - if err != nil { - return nil, err + c, warnings, errs := NewConfig(raws...) + if errs != nil { + return warnings, errs } + b.config = *c - // Optional configuration with defaults - if b.config.APIKey == "" { - // Default to environment variable for api_key, if it exists - b.config.APIKey = os.Getenv("DIGITALOCEAN_API_KEY") - } - - if b.config.ClientID == "" { - // Default to environment variable for client_id, if it exists - b.config.ClientID = os.Getenv("DIGITALOCEAN_CLIENT_ID") - } - - if b.config.APIURL == "" { - // Default to environment variable for api_url, if it exists - b.config.APIURL = os.Getenv("DIGITALOCEAN_API_URL") - } - - if b.config.APIToken == "" { - // Default to environment variable for api_token, if it exists - b.config.APIToken = os.Getenv("DIGITALOCEAN_API_TOKEN") - } - - if b.config.Region == "" { - if b.config.RegionID != 0 { - b.config.Region = fmt.Sprintf("%v", b.config.RegionID) - } else { - b.config.Region = DefaultRegion - } - } - - if b.config.Size == "" { - if b.config.SizeID != 0 { - b.config.Size = fmt.Sprintf("%v", b.config.SizeID) - } else { - b.config.Size = DefaultSize - } - } - - if b.config.Image == "" { - if b.config.ImageID != 0 { - b.config.Image = fmt.Sprintf("%v", b.config.ImageID) - } else { - b.config.Image = DefaultImage - } - } - - if b.config.SnapshotName == "" { - // Default to packer-{{ unix timestamp (utc) }} - b.config.SnapshotName = "packer-{{timestamp}}" - } - - if b.config.DropletName == "" { - // Default to packer-[time-ordered-uuid] - b.config.DropletName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) - } - - if b.config.SSHUsername == "" { - // Default to "root". You can override this if your - // SourceImage has a different user account then the DO default - b.config.SSHUsername = "root" - } - - if b.config.SSHPort == 0 { - // Default to port 22 per DO default - b.config.SSHPort = 22 - } - - if b.config.RawSSHTimeout == "" { - // Default to 1 minute timeouts - b.config.RawSSHTimeout = "1m" - } - - if b.config.RawStateTimeout == "" { - // Default to 6 minute timeouts waiting for - // desired state. i.e waiting for droplet to become active - b.config.RawStateTimeout = "6m" - } - - var errs *packer.MultiError - if b.config.APIToken == "" { - // Required configurations that will display errors if not set - if b.config.ClientID == "" { - errs = packer.MultiErrorAppend( - errs, errors.New("a client_id for v1 auth or api_token for v2 auth must be specified")) - } - - if b.config.APIKey == "" { - errs = packer.MultiErrorAppend( - errs, errors.New("a api_key for v1 auth or api_token for v2 auth must be specified")) - } - } - - if b.config.APIURL == "" { - b.config.APIURL = "https://api.digitalocean.com" - } - - sshTimeout, err := time.ParseDuration(b.config.RawSSHTimeout) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) - } - b.config.sshTimeout = sshTimeout - - stateTimeout, err := time.ParseDuration(b.config.RawStateTimeout) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Failed parsing state_timeout: %s", err)) - } - b.config.stateTimeout = stateTimeout - - if errs != nil && len(errs.Errors) > 0 { - return nil, errs - } - - common.ScrubConfig(b.config, b.config.ClientID, b.config.APIKey) return nil, nil } func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { - var client DigitalOceanClient - // Initialize the DO API client - if b.config.APIToken == "" { - client = DigitalOceanClientNewV1(b.config.ClientID, b.config.APIKey, b.config.APIURL) - } else { - client = DigitalOceanClientNewV2(b.config.APIToken, b.config.APIURL) - } + client := godo.NewClient(oauth2.NewClient(oauth2.NoContext, &apiTokenSource{ + AccessToken: b.config.APIToken, + })) // Set up the state state := new(multistep.BasicStateBag) @@ -252,26 +94,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, nil } - sregion := state.Get("region") - - var region string - - if sregion != nil { - region = sregion.(string) - } else { - region = fmt.Sprintf("%v", state.Get("region_id").(uint)) - } - - found_region, err := client.Region(region) - - if err != nil { - return nil, err - } - artifact := &Artifact{ snapshotName: state.Get("snapshot_name").(string), - snapshotId: state.Get("snapshot_image_id").(uint), - regionName: found_region.Name, + snapshotId: state.Get("snapshot_image_id").(int), + regionName: state.Get("region").(string), client: client, } diff --git a/builder/digitalocean/builder_acc_test.go b/builder/digitalocean/builder_acc_test.go new file mode 100644 index 000000000..20e56b924 --- /dev/null +++ b/builder/digitalocean/builder_acc_test.go @@ -0,0 +1,30 @@ +package digitalocean + +import ( + "os" + "testing" + + builderT "github.com/mitchellh/packer/helper/builder/testing" +) + +func TestBuilderAcc_basic(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Builder: &Builder{}, + Template: testBuilderAccBasic, + }) +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("DIGITALOCEAN_API_TOKEN"); v == "" { + t.Fatal("DIGITALOCEAN_API_TOKEN must be set for acceptance tests") + } +} + +const testBuilderAccBasic = ` +{ + "builders": [{ + "type": "test" + }] +} +` diff --git a/builder/digitalocean/builder_test.go b/builder/digitalocean/builder_test.go index bd3bb1d21..8985aae2f 100644 --- a/builder/digitalocean/builder_test.go +++ b/builder/digitalocean/builder_test.go @@ -43,90 +43,6 @@ func TestBuilder_Prepare_BadType(t *testing.T) { } } -func TestBuilderPrepare_APIKey(t *testing.T) { - var b Builder - config := testConfig() - - // Test good - config["api_key"] = "foo" - warnings, err := b.Prepare(config) - if len(warnings) > 0 { - t.Fatalf("bad: %#v", warnings) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.APIKey != "foo" { - t.Errorf("access key invalid: %s", b.config.APIKey) - } - - // Test bad - delete(config, "api_key") - b = Builder{} - warnings, err = b.Prepare(config) - if len(warnings) > 0 { - t.Fatalf("bad: %#v", warnings) - } - if err == nil { - t.Fatal("should have error") - } - - // Test env variable - delete(config, "api_key") - os.Setenv("DIGITALOCEAN_API_KEY", "foo") - defer os.Setenv("DIGITALOCEAN_API_KEY", "") - warnings, err = b.Prepare(config) - if len(warnings) > 0 { - t.Fatalf("bad: %#v", warnings) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } -} - -func TestBuilderPrepare_ClientID(t *testing.T) { - var b Builder - config := testConfig() - - // Test good - config["client_id"] = "foo" - warnings, err := b.Prepare(config) - if len(warnings) > 0 { - t.Fatalf("bad: %#v", warnings) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.ClientID != "foo" { - t.Errorf("invalid: %s", b.config.ClientID) - } - - // Test bad - delete(config, "client_id") - b = Builder{} - warnings, err = b.Prepare(config) - if len(warnings) > 0 { - t.Fatalf("bad: %#v", warnings) - } - if err == nil { - t.Fatal("should have error") - } - - // Test env variable - delete(config, "client_id") - os.Setenv("DIGITALOCEAN_CLIENT_ID", "foo") - defer os.Setenv("DIGITALOCEAN_CLIENT_ID", "") - warnings, err = b.Prepare(config) - if len(warnings) > 0 { - t.Fatalf("bad: %#v", warnings) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } -} - func TestBuilderPrepare_InvalidKey(t *testing.T) { var b Builder config := testConfig() diff --git a/builder/digitalocean/config.go b/builder/digitalocean/config.go new file mode 100644 index 000000000..0be49f08e --- /dev/null +++ b/builder/digitalocean/config.go @@ -0,0 +1,140 @@ +package digitalocean + +import ( + "errors" + "fmt" + "os" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/common/uuid" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" + //"github.com/digitalocean/godo" +) + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + + APIToken string `mapstructure:"api_token"` + + // OLD STUFF + + Region string `mapstructure:"region"` + Size string `mapstructure:"size"` + Image string `mapstructure:"image"` + + PrivateNetworking bool `mapstructure:"private_networking"` + SnapshotName string `mapstructure:"snapshot_name"` + DropletName string `mapstructure:"droplet_name"` + SSHUsername string `mapstructure:"ssh_username"` + SSHPort uint `mapstructure:"ssh_port"` + + RawSSHTimeout string `mapstructure:"ssh_timeout"` + RawStateTimeout string `mapstructure:"state_timeout"` + + // These are unexported since they're set by other fields + // being set. + sshTimeout time.Duration + stateTimeout time.Duration + + ctx *interpolate.Context +} + +func NewConfig(raws ...interface{}) (*Config, []string, error) { + var c Config + + var md mapstructure.Metadata + err := config.Decode(&c, &config.DecodeOpts{ + Metadata: &md, + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "run_command", + }, + }, + }, raws...) + if err != nil { + return nil, nil, err + } + + // Defaults + if c.APIToken == "" { + // Default to environment variable for api_token, if it exists + c.APIToken = os.Getenv("DIGITALOCEAN_API_TOKEN") + } + + if c.Region == "" { + c.Region = DefaultRegion + } + + if c.Size == "" { + c.Size = DefaultSize + } + + if c.Image == "" { + c.Image = DefaultImage + } + + if c.SnapshotName == "" { + // Default to packer-{{ unix timestamp (utc) }} + c.SnapshotName = "packer-{{timestamp}}" + } + + if c.DropletName == "" { + // Default to packer-[time-ordered-uuid] + c.DropletName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) + } + + if c.SSHUsername == "" { + // Default to "root". You can override this if your + // SourceImage has a different user account then the DO default + c.SSHUsername = "root" + } + + if c.SSHPort == 0 { + // Default to port 22 per DO default + c.SSHPort = 22 + } + + if c.RawSSHTimeout == "" { + // Default to 1 minute timeouts + c.RawSSHTimeout = "1m" + } + + if c.RawStateTimeout == "" { + // Default to 6 minute timeouts waiting for + // desired state. i.e waiting for droplet to become active + c.RawStateTimeout = "6m" + } + + var errs *packer.MultiError + if c.APIToken == "" { + // Required configurations that will display errors if not set + errs = packer.MultiErrorAppend( + errs, errors.New("api_token for auth must be specified")) + } + + sshTimeout, err := time.ParseDuration(c.RawSSHTimeout) + if err != nil { + errs = packer.MultiErrorAppend( + errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) + } + c.sshTimeout = sshTimeout + + stateTimeout, err := time.ParseDuration(c.RawStateTimeout) + if err != nil { + errs = packer.MultiErrorAppend( + errs, fmt.Errorf("Failed parsing state_timeout: %s", err)) + } + c.stateTimeout = stateTimeout + + if errs != nil && len(errs.Errors) > 0 { + return nil, nil, errs + } + + common.ScrubConfig(c, c.APIToken) + return &c, nil, nil +} diff --git a/builder/digitalocean/step_create_droplet.go b/builder/digitalocean/step_create_droplet.go index afb3e5814..40ac8f0e9 100644 --- a/builder/digitalocean/step_create_droplet.go +++ b/builder/digitalocean/step_create_droplet.go @@ -3,25 +3,35 @@ package digitalocean import ( "fmt" + "github.com/digitalocean/godo" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) type stepCreateDroplet struct { - dropletId uint + dropletId int } func (s *stepCreateDroplet) Run(state multistep.StateBag) multistep.StepAction { - client := state.Get("client").(DigitalOceanClient) + client := state.Get("client").(*godo.Client) ui := state.Get("ui").(packer.Ui) c := state.Get("config").(Config) - sshKeyId := state.Get("ssh_key_id").(uint) - - ui.Say("Creating droplet...") + sshKeyId := state.Get("ssh_key_id").(int) // Create the droplet based on configuration - dropletId, err := client.CreateDroplet(c.DropletName, c.Size, c.Image, c.Region, sshKeyId, c.PrivateNetworking) - + ui.Say("Creating droplet...") + droplet, _, err := client.Droplets.Create(&godo.DropletCreateRequest{ + Name: c.DropletName, + Region: c.Region, + Size: c.Size, + Image: godo.DropletCreateImage{ + Slug: c.Image, + }, + SSHKeys: []godo.DropletCreateSSHKey{ + godo.DropletCreateSSHKey{ID: int(sshKeyId)}, + }, + PrivateNetworking: c.PrivateNetworking, + }) if err != nil { err := fmt.Errorf("Error creating droplet: %s", err) state.Put("error", err) @@ -30,10 +40,10 @@ func (s *stepCreateDroplet) Run(state multistep.StateBag) multistep.StepAction { } // We use this in cleanup - s.dropletId = dropletId + s.dropletId = droplet.ID // Store the droplet id for later - state.Put("droplet_id", dropletId) + state.Put("droplet_id", droplet.ID) return multistep.ActionContinue } @@ -44,19 +54,14 @@ func (s *stepCreateDroplet) Cleanup(state multistep.StateBag) { return } - client := state.Get("client").(DigitalOceanClient) + client := state.Get("client").(*godo.Client) ui := state.Get("ui").(packer.Ui) - c := state.Get("config").(Config) // Destroy the droplet we just created ui.Say("Destroying droplet...") - - err := client.DestroyDroplet(s.dropletId) + _, err := client.Droplets.Delete(s.dropletId) if err != nil { - curlstr := fmt.Sprintf("curl '%v/droplets/%v/destroy?client_id=%v&api_key=%v'", - c.APIURL, s.dropletId, c.ClientID, c.APIKey) - ui.Error(fmt.Sprintf( - "Error destroying droplet. Please destroy it manually: %v", curlstr)) + "Error destroying droplet. Please destroy it manually: %s", err)) } } diff --git a/builder/digitalocean/step_create_ssh_key.go b/builder/digitalocean/step_create_ssh_key.go index db1ad9c16..fa0940c23 100644 --- a/builder/digitalocean/step_create_ssh_key.go +++ b/builder/digitalocean/step_create_ssh_key.go @@ -9,17 +9,18 @@ import ( "log" "code.google.com/p/gosshold/ssh" + "github.com/digitalocean/godo" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common/uuid" "github.com/mitchellh/packer/packer" ) type stepCreateSSHKey struct { - keyId uint + keyId int } func (s *stepCreateSSHKey) Run(state multistep.StateBag) multistep.StepAction { - client := state.Get("client").(DigitalOceanClient) + client := state.Get("client").(*godo.Client) ui := state.Get("ui").(packer.Ui) ui.Say("Creating temporary ssh key for droplet...") @@ -46,7 +47,10 @@ func (s *stepCreateSSHKey) Run(state multistep.StateBag) multistep.StepAction { name := fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) // Create the key! - keyId, err := client.CreateKey(name, pub_sshformat) + key, _, err := client.Keys.Create(&godo.KeyCreateRequest{ + Name: name, + PublicKey: pub_sshformat, + }) if err != nil { err := fmt.Errorf("Error creating temporary SSH key: %s", err) state.Put("error", err) @@ -55,12 +59,12 @@ func (s *stepCreateSSHKey) Run(state multistep.StateBag) multistep.StepAction { } // We use this to check cleanup - s.keyId = keyId + s.keyId = key.ID log.Printf("temporary ssh key name: %s", name) // Remember some state for the future - state.Put("ssh_key_id", keyId) + state.Put("ssh_key_id", key.ID) return multistep.ActionContinue } @@ -71,18 +75,14 @@ func (s *stepCreateSSHKey) Cleanup(state multistep.StateBag) { return } - client := state.Get("client").(DigitalOceanClient) + client := state.Get("client").(*godo.Client) ui := state.Get("ui").(packer.Ui) - c := state.Get("config").(Config) ui.Say("Deleting temporary ssh key...") - err := client.DestroyKey(s.keyId) - - curlstr := fmt.Sprintf("curl -H 'Authorization: Bearer #TOKEN#' -X DELETE '%v/v2/account/keys/%v'", c.APIURL, s.keyId) - + _, err := client.Keys.DeleteByID(s.keyId) if err != nil { - log.Printf("Error cleaning up ssh key: %v", err.Error()) + log.Printf("Error cleaning up ssh key: %s", err) ui.Error(fmt.Sprintf( - "Error cleaning up ssh key. Please delete the key manually: %v", curlstr)) + "Error cleaning up ssh key. Please delete the key manually: %s", err)) } } diff --git a/builder/digitalocean/step_droplet_info.go b/builder/digitalocean/step_droplet_info.go index 8e9b69927..5fbcb7141 100644 --- a/builder/digitalocean/step_droplet_info.go +++ b/builder/digitalocean/step_droplet_info.go @@ -3,6 +3,7 @@ package digitalocean import ( "fmt" + "github.com/digitalocean/godo" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) @@ -10,10 +11,10 @@ import ( type stepDropletInfo struct{} func (s *stepDropletInfo) Run(state multistep.StateBag) multistep.StepAction { - client := state.Get("client").(DigitalOceanClient) + client := state.Get("client").(*godo.Client) ui := state.Get("ui").(packer.Ui) c := state.Get("config").(Config) - dropletId := state.Get("droplet_id").(uint) + dropletId := state.Get("droplet_id").(int) ui.Say("Waiting for droplet to become active...") @@ -26,16 +27,25 @@ func (s *stepDropletInfo) Run(state multistep.StateBag) multistep.StepAction { } // Set the IP on the state for later - ip, _, err := client.DropletStatus(dropletId) + droplet, _, err := client.Droplets.Get(dropletId) if err != nil { - err := fmt.Errorf("Error retrieving droplet ID: %s", err) + err := fmt.Errorf("Error retrieving droplet: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - state.Put("droplet_ip", ip) + // Verify we have an IPv4 address + invalid := droplet.Networks == nil || + len(droplet.Networks.V4) == 0 + if invalid { + err := fmt.Errorf("IPv4 address not found for droplet!") + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + state.Put("droplet_ip", droplet.Networks.V4[0].IPAddress) return multistep.ActionContinue } diff --git a/builder/digitalocean/step_power_off.go b/builder/digitalocean/step_power_off.go index d6ef49a22..3d547e8c2 100644 --- a/builder/digitalocean/step_power_off.go +++ b/builder/digitalocean/step_power_off.go @@ -4,6 +4,7 @@ import ( "fmt" "log" + "github.com/digitalocean/godo" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) @@ -11,12 +12,12 @@ import ( type stepPowerOff struct{} func (s *stepPowerOff) Run(state multistep.StateBag) multistep.StepAction { - client := state.Get("client").(DigitalOceanClient) + client := state.Get("client").(*godo.Client) c := state.Get("config").(Config) ui := state.Get("ui").(packer.Ui) - dropletId := state.Get("droplet_id").(uint) + dropletId := state.Get("droplet_id").(int) - _, status, err := client.DropletStatus(dropletId) + droplet, _, err := client.Droplets.Get(dropletId) if err != nil { err := fmt.Errorf("Error checking droplet state: %s", err) state.Put("error", err) @@ -24,14 +25,14 @@ func (s *stepPowerOff) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - if status == "off" { + if droplet.Status == "off" { // Droplet is already off, don't do anything return multistep.ActionContinue } // Pull the plug on the Droplet ui.Say("Forcefully shutting down Droplet...") - err = client.PowerOffDroplet(dropletId) + _, _, err = client.DropletActions.PowerOff(dropletId) if err != nil { err := fmt.Errorf("Error powering off droplet: %s", err) state.Put("error", err) diff --git a/builder/digitalocean/step_shutdown.go b/builder/digitalocean/step_shutdown.go index 06a2ae9f5..602f3e690 100644 --- a/builder/digitalocean/step_shutdown.go +++ b/builder/digitalocean/step_shutdown.go @@ -5,6 +5,7 @@ import ( "log" "time" + "github.com/digitalocean/godo" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) @@ -12,16 +13,16 @@ import ( type stepShutdown struct{} func (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction { - client := state.Get("client").(DigitalOceanClient) + client := state.Get("client").(*godo.Client) ui := state.Get("ui").(packer.Ui) - dropletId := state.Get("droplet_id").(uint) + dropletId := state.Get("droplet_id").(int) // Gracefully power off the droplet. We have to retry this a number // of times because sometimes it says it completed when it actually // did absolutely nothing (*ALAKAZAM!* magic!). We give up after // a pretty arbitrary amount of time. ui.Say("Gracefully shutting down droplet...") - err := client.ShutdownDroplet(dropletId) + _, _, err := client.DropletActions.Shutdown(dropletId) if err != nil { // If we get an error the first time, actually report it err := fmt.Errorf("Error shutting down droplet: %s", err) @@ -48,7 +49,7 @@ func (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction { for attempts := 2; attempts > 0; attempts++ { log.Printf("ShutdownDroplet attempt #%d...", attempts) - err := client.ShutdownDroplet(dropletId) + _, _, err := client.DropletActions.Shutdown(dropletId) if err != nil { log.Printf("Shutdown retry error: %s", err) } diff --git a/builder/digitalocean/step_snapshot.go b/builder/digitalocean/step_snapshot.go index 1903c1a34..7ff384924 100644 --- a/builder/digitalocean/step_snapshot.go +++ b/builder/digitalocean/step_snapshot.go @@ -5,6 +5,7 @@ import ( "fmt" "log" + "github.com/digitalocean/godo" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) @@ -12,13 +13,13 @@ import ( type stepSnapshot struct{} func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { - client := state.Get("client").(DigitalOceanClient) + client := state.Get("client").(*godo.Client) ui := state.Get("ui").(packer.Ui) c := state.Get("config").(Config) - dropletId := state.Get("droplet_id").(uint) + dropletId := state.Get("droplet_id").(int) ui.Say(fmt.Sprintf("Creating snapshot: %v", c.SnapshotName)) - err := client.CreateSnapshot(dropletId, c.SnapshotName) + _, _, err := client.DropletActions.Snapshot(dropletId, c.SnapshotName) if err != nil { err := fmt.Errorf("Error creating snapshot: %s", err) state.Put("error", err) @@ -36,7 +37,7 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { } log.Printf("Looking up snapshot ID for snapshot: %s", c.SnapshotName) - images, err := client.Images() + images, _, err := client.Images.List(nil) if err != nil { err := fmt.Errorf("Error looking up snapshot ID: %s", err) state.Put("error", err) @@ -44,10 +45,10 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - var imageId uint + var imageId int for _, image := range images { if image.Name == c.SnapshotName { - imageId = image.Id + imageId = image.ID break } } @@ -60,7 +61,6 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { } log.Printf("Snapshot image ID: %d", imageId) - state.Put("snapshot_image_id", imageId) state.Put("snapshot_name", c.SnapshotName) state.Put("region", c.Region) diff --git a/builder/digitalocean/token_source.go b/builder/digitalocean/token_source.go new file mode 100644 index 000000000..eab5a084b --- /dev/null +++ b/builder/digitalocean/token_source.go @@ -0,0 +1,15 @@ +package digitalocean + +import ( + "golang.org/x/oauth2" +) + +type apiTokenSource struct { + AccessToken string +} + +func (t *apiTokenSource) Token() (*oauth2.Token, error) { + return &oauth2.Token{ + AccessToken: t.AccessToken, + }, nil +} diff --git a/builder/digitalocean/wait.go b/builder/digitalocean/wait.go index e5b1dee90..3d299d433 100644 --- a/builder/digitalocean/wait.go +++ b/builder/digitalocean/wait.go @@ -4,11 +4,15 @@ import ( "fmt" "log" "time" + + "github.com/digitalocean/godo" ) // waitForState simply blocks until the droplet is in // a state we expect, while eventually timing out. -func waitForDropletState(desiredState string, dropletId uint, client DigitalOceanClient, timeout time.Duration) error { +func waitForDropletState( + desiredState string, dropletId int, + client *godo.Client, timeout time.Duration) error { done := make(chan struct{}) defer close(done) @@ -19,13 +23,13 @@ func waitForDropletState(desiredState string, dropletId uint, client DigitalOcea attempts += 1 log.Printf("Checking droplet status... (attempt: %d)", attempts) - _, status, err := client.DropletStatus(dropletId) + droplet, _, err := client.Droplets.Get(dropletId) if err != nil { result <- err return } - if status == desiredState { + if droplet.Status == desiredState { result <- nil return } From e294db8ede0e9813c77192478159bc124412b811 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 10 Jun 2015 14:04:24 -0700 Subject: [PATCH 266/956] Revert to original BuilderId --- post-processor/compress/artifact.go | 2 +- post-processor/compress/post-processor.go | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/post-processor/compress/artifact.go b/post-processor/compress/artifact.go index f428a3b55..054d501d1 100644 --- a/post-processor/compress/artifact.go +++ b/post-processor/compress/artifact.go @@ -5,7 +5,7 @@ import ( "os" ) -const BuilderId = "vtolstov.compress" +const BuilderId = "packer.post-processor.compress" type Artifact struct { builderId string diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 74228d909..6d28e7c0e 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -1,10 +1,10 @@ package compress import ( - tar "archive/tar" - zip "archive/zip" + "archive/tar" + "archive/zip" "compress/flate" - gzip "compress/gzip" + "compress/gzip" "fmt" "io" "os" @@ -13,13 +13,13 @@ import ( "strings" "time" - bgzf "github.com/biogo/hts/bgzf" - pgzip "github.com/klauspost/pgzip" + "github.com/biogo/hts/bgzf" + "github.com/klauspost/pgzip" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" - lz4 "github.com/pierrec/lz4" + "github.com/pierrec/lz4" "gopkg.in/yaml.v2" ) From 3ac74bbae8cc2b1b3bb3bcd65454b2e29391418d Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 10 Jun 2015 14:07:13 -0700 Subject: [PATCH 267/956] Remove redundant aliases --- post-processor/compress/benchmark.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/post-processor/compress/benchmark.go b/post-processor/compress/benchmark.go index a2585bc89..ed4d68168 100644 --- a/post-processor/compress/benchmark.go +++ b/post-processor/compress/benchmark.go @@ -4,17 +4,17 @@ package main import ( "compress/flate" - gzip "compress/gzip" + "compress/gzip" + "fmt" "io" "io/ioutil" - "fmt" "os" "runtime" "testing" - bgzf "github.com/biogo/hts/bgzf" - pgzip "github.com/klauspost/pgzip" - lz4 "github.com/pierrec/lz4" + "github.com/biogo/hts/bgzf" + "github.com/klauspost/pgzip" + "github.com/pierrec/lz4" ) type Compressor struct { @@ -60,7 +60,7 @@ func NewCompressor(src, dst string) (*Compressor, error) { func main() { - runtime.GOMAXPROCS(runtime.NumCPU()) + runtime.GOMAXPROCS(runtime.NumCPU()) var resw testing.BenchmarkResult var resr testing.BenchmarkResult @@ -174,7 +174,7 @@ func (c *Compressor) BenchmarkPGZIPReader(b *testing.B) { func (c *Compressor) BenchmarkLZ4Writer(b *testing.B) { cw := lz4.NewWriter(c.w) -// cw.Header.HighCompression = true + // cw.Header.HighCompression = true cw.Header.NoChecksum = true b.ResetTimer() From 486c7e4ae61237a82fb0ec1be57d9adde197deb8 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 14:07:24 -0700 Subject: [PATCH 268/956] builder/digitalocean: remove unused things --- builder/digitalocean/config.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/builder/digitalocean/config.go b/builder/digitalocean/config.go index 0be49f08e..5defe89db 100644 --- a/builder/digitalocean/config.go +++ b/builder/digitalocean/config.go @@ -12,7 +12,6 @@ import ( "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" - //"github.com/digitalocean/godo" ) type Config struct { @@ -20,8 +19,6 @@ type Config struct { APIToken string `mapstructure:"api_token"` - // OLD STUFF - Region string `mapstructure:"region"` Size string `mapstructure:"size"` Image string `mapstructure:"image"` From a691a1521c5f722d7e3449fd84fcf40fa3183b6a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 14:16:40 -0700 Subject: [PATCH 269/956] website: update do docs --- .../docs/builders/digitalocean.html.markdown | 51 ++----------------- 1 file changed, 4 insertions(+), 47 deletions(-) diff --git a/website/source/docs/builders/digitalocean.html.markdown b/website/source/docs/builders/digitalocean.html.markdown index 5ffe1c668..28254b19c 100644 --- a/website/source/docs/builders/digitalocean.html.markdown +++ b/website/source/docs/builders/digitalocean.html.markdown @@ -24,31 +24,13 @@ There are many configuration options available for the builder. They are segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. -### Required v1 api: +### Required: -* `api_key` (string) - The API key to use to access your account. You can - retrieve this on the "API" page visible after logging into your account - on DigitalOcean. - If not specified, Packer will use the environment variable - `DIGITALOCEAN_API_KEY`, if set. - -* `client_id` (string) - The client ID to use to access your account. You can - find this on the "API" page visible after logging into your account on - DigitalOcean. - If not specified, Packer will use the environment variable - `DIGITALOCEAN_CLIENT_ID`, if set. - -### Required v2 api: - -* `api_token` (string) - The client TOKEN to use to access your account. If it - specified, then use v2 api (current), if not then used old (v1) deprecated api. - Also it can be specified via environment variable `DIGITALOCEAN_API_TOKEN`, if set. +* `api_token` (string) - The client TOKEN to use to access your account. + It can also be specified via environment variable `DIGITALOCEAN_API_TOKEN`, if set. ### Optional: -* `api_url` (string) - API endpoint, by default use https://api.digitalocean.com - Also it can be specified via environment variable `DIGITALOCEAN_API_URL`, if set. - * `droplet_name` (string) - The name assigned to the droplet. DigitalOcean sets the hostname of the machine to this value. @@ -57,10 +39,6 @@ each category, the available configuration keys are alphabetized. defaults to 'ubuntu-12-04-x64' which is the slug for "Ubuntu 12.04.4 x64". See https://developers.digitalocean.com/documentation/v2/#list-all-images for details on how to get a list of the the accepted image names/slugs. -* `image_id` (integer) - The ID of the base image to use. This is the image that - will be used to launch a new droplet and provision it. - This setting is deprecated. Use `image` instead. - * `private_networking` (boolean) - Set to `true` to enable private networking for the droplet being created. This defaults to `false`, or not enabled. @@ -69,17 +47,10 @@ each category, the available configuration keys are alphabetized. This defaults to "nyc3", which is the slug for "New York 3". See https://developers.digitalocean.com/documentation/v2/#list-all-regions for the accepted region names/slugs. -* `region_id` (integer) - The ID of the region to launch the droplet in. Consequently, - this is the region where the snapshot will be available. - This setting is deprecated. Use `region` instead. - * `size` (string) - The name (or slug) of the droplet size to use. This defaults to "512mb", which is the slug for "512MB". See https://developers.digitalocean.com/documentation/v2/#list-all-sizes for the accepted size names/slugs. -* `size_id` (integer) - The ID of the droplet size to use. - This setting is deprecated. Use `size` instead. - * `snapshot_name` (string) - The name of the resulting snapshot that will appear in your account. This must be unique. To help make this unique, use a function like `timestamp` (see @@ -107,20 +78,6 @@ own access tokens: ```javascript { "type": "digitalocean", - "client_id": "YOUR CLIENT ID", - "api_key": "YOUR API KEY" + "api_token": "YOUR API KEY" } ``` - -## Finding Image, Region, and Size IDs - -Unfortunately, finding a list of available values for `image_id`, `region_id`, -and `size_id` is not easy at the moment. Basically, it has to be done through -the [DigitalOcean API](https://www.digitalocean.com/api_access) using the -`/images`, `/regions`, and `/sizes` endpoints. You can use `curl` for this -or request it in your browser. - -If you're comfortable installing RubyGems, [Tugboat](https://github.com/pearkes/tugboat) -is a fantastic DigitalOcean command-line client that has commands to -find the available images, regions, and sizes. For example, to see all the -global images, you can run `tugboat images --global`. From 9a393a560172e286cf9b9319a6689822b47c2783 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 14:18:38 -0700 Subject: [PATCH 270/956] builder/digitalocean: only list user images --- builder/digitalocean/step_snapshot.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/digitalocean/step_snapshot.go b/builder/digitalocean/step_snapshot.go index 7ff384924..cfda7af20 100644 --- a/builder/digitalocean/step_snapshot.go +++ b/builder/digitalocean/step_snapshot.go @@ -37,7 +37,7 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { } log.Printf("Looking up snapshot ID for snapshot: %s", c.SnapshotName) - images, _, err := client.Images.List(nil) + images, _, err := client.Images.ListUser(&godo.ListOptions{PerPage: 200}) if err != nil { err := fmt.Errorf("Error looking up snapshot ID: %s", err) state.Put("error", err) From 2056fda4d3a91e670f628f554b0fcb0e3725bddd Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Wed, 10 Jun 2015 16:19:36 -0500 Subject: [PATCH 271/956] builder/amazon: Allow spaces in AMI names --- builder/amazon/common/template_funcs.go | 2 +- builder/amazon/common/template_funcs_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/builder/amazon/common/template_funcs.go b/builder/amazon/common/template_funcs.go index 45f68af92..30d49fdb4 100644 --- a/builder/amazon/common/template_funcs.go +++ b/builder/amazon/common/template_funcs.go @@ -20,7 +20,7 @@ func isalphanumeric(b byte) bool { // Clean up AMI name by replacing invalid characters with "-" func templateCleanAMIName(s string) string { - allowed := []byte{'(', ')', ',', '/', '-', '_'} + allowed := []byte{'(', ')', ',', '/', '-', '_', ' '} b := []byte(s) newb := make([]byte, len(b)) for i, c := range b { diff --git a/builder/amazon/common/template_funcs_test.go b/builder/amazon/common/template_funcs_test.go index 0e8c568ef..e4126bf61 100644 --- a/builder/amazon/common/template_funcs_test.go +++ b/builder/amazon/common/template_funcs_test.go @@ -5,8 +5,8 @@ import ( ) func TestAMITemplatePrepare_clean(t *testing.T) { - origName := "AMZamz09(),/-_:&^$%" - expected := "AMZamz09(),/-_-----" + origName := "AMZamz09(),/-_:&^ $%" + expected := "AMZamz09(),/-_--- --" name := templateCleanAMIName(origName) From 311c9eb5c2b594312ba6e69ff47b71419657349c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 14:28:05 -0700 Subject: [PATCH 272/956] builder/digitalocean: fix unit tests --- builder/digitalocean/builder_test.go | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/builder/digitalocean/builder_test.go b/builder/digitalocean/builder_test.go index 8985aae2f..878431691 100644 --- a/builder/digitalocean/builder_test.go +++ b/builder/digitalocean/builder_test.go @@ -1,22 +1,15 @@ package digitalocean import ( - "github.com/mitchellh/packer/packer" - "os" "strconv" "testing" -) -func init() { - // Clear out the credential env vars - os.Setenv("DIGITALOCEAN_API_KEY", "") - os.Setenv("DIGITALOCEAN_CLIENT_ID", "") -} + "github.com/mitchellh/packer/packer" +) func testConfig() map[string]interface{} { return map[string]interface{}{ - "client_id": "foo", - "api_key": "bar", + "api_token": "bar", } } @@ -78,7 +71,6 @@ func TestBuilderPrepare_Region(t *testing.T) { expected := "sfo1" // Test set - config["region_id"] = 0 config["region"] = expected b = Builder{} warnings, err = b.Prepare(config) @@ -114,7 +106,6 @@ func TestBuilderPrepare_Size(t *testing.T) { expected := "1024mb" // Test set - config["size_id"] = 0 config["size"] = expected b = Builder{} warnings, err = b.Prepare(config) @@ -150,7 +141,6 @@ func TestBuilderPrepare_Image(t *testing.T) { expected := "ubuntu-14-04-x64" // Test set - config["image_id"] = 0 config["image"] = expected b = Builder{} warnings, err = b.Prepare(config) From be8443d50813919a3d9fc0e39135d0d570a6c968 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 18:49:27 -0700 Subject: [PATCH 273/956] update CHANGELOG --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a33dbc92..cc2ea5217 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ ## 0.8.0 (unreleased) +BACKWARDS INCOMPATIBILITIES: + + * The DigitalOcean builder no longer supports the v1 API which has been + deprecated for some time. Most configurations should continue to + work as long as you use the `api_token` field for auth. + FEATURES: * **New config function: `template_dir`**: The directory to the template From 5da56d2aa6622b5096cdb1c5efbeba0408c63aa0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 18:54:51 -0700 Subject: [PATCH 274/956] builder/digitalocean: image, region, etc. required --- builder/digitalocean/builder.go | 12 ----------- builder/digitalocean/builder_acc_test.go | 5 ++++- builder/digitalocean/builder_test.go | 24 ++++++--------------- builder/digitalocean/config.go | 27 +++++++++++++----------- 4 files changed, 25 insertions(+), 43 deletions(-) diff --git a/builder/digitalocean/builder.go b/builder/digitalocean/builder.go index 3ba7074a2..996eb2e09 100644 --- a/builder/digitalocean/builder.go +++ b/builder/digitalocean/builder.go @@ -14,18 +14,6 @@ import ( "golang.org/x/oauth2" ) -// see https://api.digitalocean.com/images/?client_id=[client_id]&api_key=[api_key] -// name="Ubuntu 12.04.4 x64", id=6374128, -const DefaultImage = "ubuntu-12-04-x64" - -// see https://api.digitalocean.com/regions/?client_id=[client_id]&api_key=[api_key] -// name="New York 3", id=8 -const DefaultRegion = "nyc3" - -// see https://api.digitalocean.com/sizes/?client_id=[client_id]&api_key=[api_key] -// name="512MB", id=66 (the smallest droplet size) -const DefaultSize = "512mb" - // The unique id for the builder const BuilderId = "pearkes.digitalocean" diff --git a/builder/digitalocean/builder_acc_test.go b/builder/digitalocean/builder_acc_test.go index 20e56b924..f9df863cb 100644 --- a/builder/digitalocean/builder_acc_test.go +++ b/builder/digitalocean/builder_acc_test.go @@ -24,7 +24,10 @@ func testAccPreCheck(t *testing.T) { const testBuilderAccBasic = ` { "builders": [{ - "type": "test" + "type": "test", + "region": "nyc2", + "size": "512mb", + "image": "ubuntu-12-04-x64" }] } ` diff --git a/builder/digitalocean/builder_test.go b/builder/digitalocean/builder_test.go index 878431691..3d2378f2d 100644 --- a/builder/digitalocean/builder_test.go +++ b/builder/digitalocean/builder_test.go @@ -60,12 +60,8 @@ func TestBuilderPrepare_Region(t *testing.T) { if len(warnings) > 0 { t.Fatalf("bad: %#v", warnings) } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.Region != DefaultRegion { - t.Errorf("found %s, expected %s", b.config.Region, DefaultRegion) + if err == nil { + t.Fatalf("should error") } expected := "sfo1" @@ -95,12 +91,8 @@ func TestBuilderPrepare_Size(t *testing.T) { if len(warnings) > 0 { t.Fatalf("bad: %#v", warnings) } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.Size != DefaultSize { - t.Errorf("found %s, expected %s", b.config.Size, DefaultSize) + if err == nil { + t.Fatalf("should error") } expected := "1024mb" @@ -130,12 +122,8 @@ func TestBuilderPrepare_Image(t *testing.T) { if len(warnings) > 0 { t.Fatalf("bad: %#v", warnings) } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.Image != DefaultImage { - t.Errorf("found %s, expected %s", b.config.Image, DefaultImage) + if err == nil { + t.Fatal("should error") } expected := "ubuntu-14-04-x64" diff --git a/builder/digitalocean/config.go b/builder/digitalocean/config.go index 5defe89db..057138633 100644 --- a/builder/digitalocean/config.go +++ b/builder/digitalocean/config.go @@ -63,18 +63,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { c.APIToken = os.Getenv("DIGITALOCEAN_API_TOKEN") } - if c.Region == "" { - c.Region = DefaultRegion - } - - if c.Size == "" { - c.Size = DefaultSize - } - - if c.Image == "" { - c.Image = DefaultImage - } - if c.SnapshotName == "" { // Default to packer-{{ unix timestamp (utc) }} c.SnapshotName = "packer-{{timestamp}}" @@ -114,6 +102,21 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { errs, errors.New("api_token for auth must be specified")) } + if c.Region == "" { + errs = packer.MultiErrorAppend( + errs, errors.New("region is required")) + } + + if c.Size == "" { + errs = packer.MultiErrorAppend( + errs, errors.New("size is required")) + } + + if c.Image == "" { + errs = packer.MultiErrorAppend( + errs, errors.New("image is required")) + } + sshTimeout, err := time.ParseDuration(c.RawSSHTimeout) if err != nil { errs = packer.MultiErrorAppend( From 910b16104ed5ed5d654c56f9e0749873fedfdd84 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 18:56:03 -0700 Subject: [PATCH 275/956] website: update required options for DO --- CHANGELOG.md | 1 + .../docs/builders/digitalocean.html.markdown | 30 +++++++++---------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cc2ea5217..fd57fb8c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ BACKWARDS INCOMPATIBILITIES: * The DigitalOcean builder no longer supports the v1 API which has been deprecated for some time. Most configurations should continue to work as long as you use the `api_token` field for auth. + * builder/digitalocean: `image`, `region`, and `size` are now required. FEATURES: diff --git a/website/source/docs/builders/digitalocean.html.markdown b/website/source/docs/builders/digitalocean.html.markdown index 28254b19c..34d11bedb 100644 --- a/website/source/docs/builders/digitalocean.html.markdown +++ b/website/source/docs/builders/digitalocean.html.markdown @@ -29,28 +29,25 @@ each category, the available configuration keys are alphabetized. * `api_token` (string) - The client TOKEN to use to access your account. It can also be specified via environment variable `DIGITALOCEAN_API_TOKEN`, if set. +* `image` (string) - The name (or slug) of the base image to use. This is the + image that will be used to launch a new droplet and provision it. + See https://developers.digitalocean.com/documentation/v2/#list-all-images for details on how to get a list of the the accepted image names/slugs. + +* `region` (string) - The name (or slug) of the region to launch the droplet in. + Consequently, this is the region where the snapshot will be available. + See https://developers.digitalocean.com/documentation/v2/#list-all-regions for the accepted region names/slugs. + +* `size` (string) - The name (or slug) of the droplet size to use. + See https://developers.digitalocean.com/documentation/v2/#list-all-sizes for the accepted size names/slugs. + ### Optional: * `droplet_name` (string) - The name assigned to the droplet. DigitalOcean sets the hostname of the machine to this value. -* `image` (string) - The name (or slug) of the base image to use. This is the - image that will be used to launch a new droplet and provision it. This - defaults to 'ubuntu-12-04-x64' which is the slug for "Ubuntu 12.04.4 x64". - See https://developers.digitalocean.com/documentation/v2/#list-all-images for details on how to get a list of the the accepted image names/slugs. - * `private_networking` (boolean) - Set to `true` to enable private networking for the droplet being created. This defaults to `false`, or not enabled. -* `region` (string) - The name (or slug) of the region to launch the droplet in. - Consequently, this is the region where the snapshot will be available. - This defaults to "nyc3", which is the slug for "New York 3". - See https://developers.digitalocean.com/documentation/v2/#list-all-regions for the accepted region names/slugs. - -* `size` (string) - The name (or slug) of the droplet size to use. - This defaults to "512mb", which is the slug for "512MB". - See https://developers.digitalocean.com/documentation/v2/#list-all-sizes for the accepted size names/slugs. - * `snapshot_name` (string) - The name of the resulting snapshot that will appear in your account. This must be unique. To help make this unique, use a function like `timestamp` (see @@ -78,6 +75,9 @@ own access tokens: ```javascript { "type": "digitalocean", - "api_token": "YOUR API KEY" + "api_token": "YOUR API KEY", + "image": "ubuntu-12-04-x64", + "region": "nyc2", + "size": "512mb" } ``` From 6b29c2d26a80289c720300cc0381613148c938ad Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 18:58:27 -0700 Subject: [PATCH 276/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fd57fb8c3..357afa298 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ FEATURES: IMPROVEMENTS: * core: Interrupt handling for SIGTERM signal as well. [GH-1858] + * builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829] * builder/parallels: Support Parallels Desktop 11 [GH-2199] * builder/openstack: Add `rackconnect_wait` for Rackspace customers to wait for RackConnect data to appear From c9c9e2871c2cfc6b2e3b17c5c24eb58cc5a6f45c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 18:58:50 -0700 Subject: [PATCH 277/956] builder/digitalocean: fix build --- builder/digitalocean/builder.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/builder/digitalocean/builder.go b/builder/digitalocean/builder.go index c269da6e1..97569d0fe 100644 --- a/builder/digitalocean/builder.go +++ b/builder/digitalocean/builder.go @@ -4,6 +4,7 @@ package digitalocean import ( + "fmt" "log" "time" @@ -47,7 +48,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe // Build the steps steps := []multistep.Step{ &stepCreateSSHKey{ - Debug: b.config.PackerDebug, + Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("do_%s.pem", b.config.PackerBuildName), }, new(stepCreateDroplet), From 5cfd26a0d336494f2273076b6f1cf2341bcec551 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 19:29:48 -0700 Subject: [PATCH 278/956] builder/digitalocean: user data support [GH-2113] --- builder/digitalocean/config.go | 1 + builder/digitalocean/step_create_droplet.go | 1 + website/source/docs/builders/digitalocean.html.markdown | 2 ++ 3 files changed, 4 insertions(+) diff --git a/builder/digitalocean/config.go b/builder/digitalocean/config.go index 057138633..5621d64d7 100644 --- a/builder/digitalocean/config.go +++ b/builder/digitalocean/config.go @@ -26,6 +26,7 @@ type Config struct { PrivateNetworking bool `mapstructure:"private_networking"` SnapshotName string `mapstructure:"snapshot_name"` DropletName string `mapstructure:"droplet_name"` + UserData string `mapstructure:"user_data"` SSHUsername string `mapstructure:"ssh_username"` SSHPort uint `mapstructure:"ssh_port"` diff --git a/builder/digitalocean/step_create_droplet.go b/builder/digitalocean/step_create_droplet.go index 40ac8f0e9..aafd53622 100644 --- a/builder/digitalocean/step_create_droplet.go +++ b/builder/digitalocean/step_create_droplet.go @@ -31,6 +31,7 @@ func (s *stepCreateDroplet) Run(state multistep.StateBag) multistep.StepAction { godo.DropletCreateSSHKey{ID: int(sshKeyId)}, }, PrivateNetworking: c.PrivateNetworking, + UserData: c.UserData, }) if err != nil { err := fmt.Errorf("Error creating droplet: %s", err) diff --git a/website/source/docs/builders/digitalocean.html.markdown b/website/source/docs/builders/digitalocean.html.markdown index 34d11bedb..829424e3d 100644 --- a/website/source/docs/builders/digitalocean.html.markdown +++ b/website/source/docs/builders/digitalocean.html.markdown @@ -67,6 +67,8 @@ each category, the available configuration keys are alphabetized. for a droplet to enter a desired state (such as "active") before timing out. The default state timeout is "6m". +* `user_data` (string) - User data to launch with the Droplet. + ## Basic Example Here is a basic example. It is completely valid as soon as you enter your From 0e0cd28071942199858f2e9384368473c3c8d54a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 19:31:48 -0700 Subject: [PATCH 279/956] builder/digitalocean: fix failing unit tests --- builder/digitalocean/builder_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/builder/digitalocean/builder_test.go b/builder/digitalocean/builder_test.go index 3d2378f2d..22c9e3b50 100644 --- a/builder/digitalocean/builder_test.go +++ b/builder/digitalocean/builder_test.go @@ -10,6 +10,9 @@ import ( func testConfig() map[string]interface{} { return map[string]interface{}{ "api_token": "bar", + "region": "nyc2", + "size": "512mb", + "image": "foo", } } @@ -56,6 +59,7 @@ func TestBuilderPrepare_Region(t *testing.T) { config := testConfig() // Test default + delete(config, "region") warnings, err := b.Prepare(config) if len(warnings) > 0 { t.Fatalf("bad: %#v", warnings) @@ -87,6 +91,7 @@ func TestBuilderPrepare_Size(t *testing.T) { config := testConfig() // Test default + delete(config, "size") warnings, err := b.Prepare(config) if len(warnings) > 0 { t.Fatalf("bad: %#v", warnings) @@ -118,6 +123,7 @@ func TestBuilderPrepare_Image(t *testing.T) { config := testConfig() // Test default + delete(config, "image") warnings, err := b.Prepare(config) if len(warnings) > 0 { t.Fatalf("bad: %#v", warnings) From dcf140f99facb21ef1e177866c9cb41eb4349093 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 19:53:07 -0700 Subject: [PATCH 280/956] builder/digitalocean: more robust wait for pending --- builder/digitalocean/config.go | 7 +++- builder/digitalocean/step_power_off.go | 10 ++++++ builder/digitalocean/step_shutdown.go | 14 +++++++- builder/digitalocean/step_snapshot.go | 13 +++++++ builder/digitalocean/wait.go | 49 ++++++++++++++++++++++++++ 5 files changed, 91 insertions(+), 2 deletions(-) diff --git a/builder/digitalocean/config.go b/builder/digitalocean/config.go index 057138633..dd1460583 100644 --- a/builder/digitalocean/config.go +++ b/builder/digitalocean/config.go @@ -64,8 +64,13 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { } if c.SnapshotName == "" { + def, err := interpolate.Render("packer-{{timestamp}}", nil) + if err != nil { + panic(err) + } + // Default to packer-{{ unix timestamp (utc) }} - c.SnapshotName = "packer-{{timestamp}}" + c.SnapshotName = def } if c.DropletName == "" { diff --git a/builder/digitalocean/step_power_off.go b/builder/digitalocean/step_power_off.go index 3d547e8c2..94891e227 100644 --- a/builder/digitalocean/step_power_off.go +++ b/builder/digitalocean/step_power_off.go @@ -3,6 +3,7 @@ package digitalocean import ( "fmt" "log" + "time" "github.com/digitalocean/godo" "github.com/mitchellh/multistep" @@ -48,6 +49,15 @@ func (s *stepPowerOff) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } + // Wait for the droplet to become unlocked for future steps + if err := waitForDropletUnlocked(client, dropletId, 2*time.Minute); err != nil { + // If we get an error the first time, actually report it + err := fmt.Errorf("Error powering off droplet: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + return multistep.ActionContinue } diff --git a/builder/digitalocean/step_shutdown.go b/builder/digitalocean/step_shutdown.go index 602f3e690..da04aee33 100644 --- a/builder/digitalocean/step_shutdown.go +++ b/builder/digitalocean/step_shutdown.go @@ -65,7 +65,19 @@ func (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction { err = waitForDropletState("off", dropletId, client, 2*time.Minute) if err != nil { - log.Printf("Error waiting for graceful off: %s", err) + // If we get an error the first time, actually report it + err := fmt.Errorf("Error shutting down droplet: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + if err := waitForDropletUnlocked(client, dropletId, 2*time.Minute); err != nil { + // If we get an error the first time, actually report it + err := fmt.Errorf("Error shutting down droplet: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt } return multistep.ActionContinue diff --git a/builder/digitalocean/step_snapshot.go b/builder/digitalocean/step_snapshot.go index cfda7af20..f6902b8c5 100644 --- a/builder/digitalocean/step_snapshot.go +++ b/builder/digitalocean/step_snapshot.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "log" + "time" "github.com/digitalocean/godo" "github.com/mitchellh/multistep" @@ -27,6 +28,18 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } + // Wait for the droplet to become unlocked first. For snapshots + // this can end up taking quite a long time, so we hardcode this to + // 10 minutes. + if err := waitForDropletUnlocked(client, dropletId, 10*time.Minute); err != nil { + // If we get an error the first time, actually report it + err := fmt.Errorf("Error shutting down droplet: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + // With the pending state over, verify that we're in the active state ui.Say("Waiting for snapshot to complete...") err = waitForDropletState("active", dropletId, client, c.stateTimeout) if err != nil { diff --git a/builder/digitalocean/wait.go b/builder/digitalocean/wait.go index 3d299d433..a41bbb3ed 100644 --- a/builder/digitalocean/wait.go +++ b/builder/digitalocean/wait.go @@ -8,6 +8,55 @@ import ( "github.com/digitalocean/godo" ) +// waitForDropletUnlocked waits for the Droplet to be unlocked to +// avoid "pending" errors when making state changes. +func waitForDropletUnlocked( + client *godo.Client, dropletId int, timeout time.Duration) error { + done := make(chan struct{}) + defer close(done) + + result := make(chan error, 1) + go func() { + attempts := 0 + for { + attempts += 1 + + log.Printf("[DEBUG] Checking droplet lock state... (attempt: %d)", attempts) + droplet, _, err := client.Droplets.Get(dropletId) + if err != nil { + result <- err + return + } + + if !droplet.Locked { + result <- nil + return + } + + // Wait 3 seconds in between + time.Sleep(3 * time.Second) + + // Verify we shouldn't exit + select { + case <-done: + // We finished, so just exit the goroutine + return + default: + // Keep going + } + } + }() + + log.Printf("[DEBUG] Waiting for up to %d seconds for droplet to unlock", timeout/time.Second) + select { + case err := <-result: + return err + case <-time.After(timeout): + return fmt.Errorf( + "Timeout while waiting to for droplet to unlock") + } +} + // waitForState simply blocks until the droplet is in // a state we expect, while eventually timing out. func waitForDropletState( From c9d308c5d7f4a26c620fc5be10df045c018b7505 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 19:53:52 -0700 Subject: [PATCH 281/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 357afa298..b79d6a4bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,8 @@ BUG FIXES: * builder/amazon/chroot: Retry waiting for disk attachments [GH-2046] * builder/amazon/instance: Use `-i` in sudo commands so PATH is inherited. [GH-1930] * builder/amazon/instance: Use `--region` flag for bundle upload command. [GH-1931] + * builder/digitalocean: Wait for droplet to unlock before changing state, + should lower the "pending event" errors. * builder/digitalocean: Ignore invalid fields from the ever-changing v2 API * builder/digitalocean: Private images can be used as a source [GH-1792] * builder/docker: Fixed hang on prompt while copying script From 03032c26fbef7d2a4ba79fc06d437b10f0a4eb18 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 20:23:39 -0700 Subject: [PATCH 282/956] ignore non-exe plugins on Windows [GH-2173] --- config.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/config.go b/config.go index 745922b1e..20e8929c3 100644 --- a/config.go +++ b/config.go @@ -6,6 +6,7 @@ import ( "log" "os/exec" "path/filepath" + "runtime" "strings" "github.com/mitchellh/osext" @@ -172,6 +173,15 @@ func (c *config) discoverSingle(glob string, m *map[string]string) error { for _, match := range matches { file := filepath.Base(match) + // One Windows, ignore any plugins that don't end in .exe. + // We could do a full PATHEXT parse, but this is probably good enough. + if runtime.GOOS == "windows" && filepath.Ext(file) != "exe" { + log.Printf( + "[DEBUG] Ignoring plugin match %s, no exe extension", + match) + continue + } + // If the filename has a ".", trim up to there if idx := strings.Index(file, "."); idx >= 0 { file = file[:idx] From 9514be0df0613f4c7c6dd8e0b25f62f026ace9d3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 20:24:34 -0700 Subject: [PATCH 283/956] lowercase the extension of plugins just in case --- config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.go b/config.go index 20e8929c3..2ebc66422 100644 --- a/config.go +++ b/config.go @@ -175,7 +175,7 @@ func (c *config) discoverSingle(glob string, m *map[string]string) error { // One Windows, ignore any plugins that don't end in .exe. // We could do a full PATHEXT parse, but this is probably good enough. - if runtime.GOOS == "windows" && filepath.Ext(file) != "exe" { + if runtime.GOOS == "windows" && strings.ToLower(filepath.Ext(file)) != "exe" { log.Printf( "[DEBUG] Ignoring plugin match %s, no exe extension", match) From 9c1e6bc478c150920a2a6433fddbf6808b0f1a12 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 20:30:00 -0700 Subject: [PATCH 284/956] website: clarify shutdown command [GH-2011] --- website/source/docs/builders/virtualbox-iso.html.markdown | 8 +++++--- website/source/docs/builders/virtualbox-ovf.html.markdown | 8 +++++--- website/source/docs/builders/vmware-vmx.html.markdown | 8 +++++--- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index 03a3fb23a..07204a33c 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -179,9 +179,11 @@ each category, the available options are alphabetized and described. By default this is "output-BUILDNAME" where "BUILDNAME" is the name of the build. -* `shutdown_command` (string) - The command to use to gracefully shut down - the machine once all the provisioning is done. By default this is an empty - string, which tells Packer to just forcefully shut down the machine. +* `shutdown_command` (string) - The command to use to gracefully shut down the machine once all + the provisioning is done. By default this is an empty string, which tells Packer to just + forcefully shut down the machine unless a shutdown command takes place inside script so this may + safely be omitted. If one or more scripts require a reboot it is suggested to leave this blank + since reboots may fail and specify the final shutdown command in your last script. * `shutdown_timeout` (string) - The amount of time to wait after executing the `shutdown_command` for the virtual machine to actually shut down. diff --git a/website/source/docs/builders/virtualbox-ovf.html.markdown b/website/source/docs/builders/virtualbox-ovf.html.markdown index 88a92b674..9635d0e60 100644 --- a/website/source/docs/builders/virtualbox-ovf.html.markdown +++ b/website/source/docs/builders/virtualbox-ovf.html.markdown @@ -154,9 +154,11 @@ each category, the available options are alphabetized and described. By default this is "output-BUILDNAME" where "BUILDNAME" is the name of the build. -* `shutdown_command` (string) - The command to use to gracefully shut down - the machine once all the provisioning is done. By default this is an empty - string, which tells Packer to just forcefully shut down the machine. +* `shutdown_command` (string) - The command to use to gracefully shut down the machine once all + the provisioning is done. By default this is an empty string, which tells Packer to just + forcefully shut down the machine unless a shutdown command takes place inside script so this may + safely be omitted. If one or more scripts require a reboot it is suggested to leave this blank + since reboots may fail and specify the final shutdown command in your last script. * `shutdown_timeout` (string) - The amount of time to wait after executing the `shutdown_command` for the virtual machine to actually shut down. diff --git a/website/source/docs/builders/vmware-vmx.html.markdown b/website/source/docs/builders/vmware-vmx.html.markdown index 399bab8a9..bbdd8925c 100644 --- a/website/source/docs/builders/vmware-vmx.html.markdown +++ b/website/source/docs/builders/vmware-vmx.html.markdown @@ -109,9 +109,11 @@ each category, the available options are alphabetized and described. By default this is "output-BUILDNAME" where "BUILDNAME" is the name of the build. -* `shutdown_command` (string) - The command to use to gracefully shut down - the machine once all the provisioning is done. By default this is an empty - string, which tells Packer to just forcefully shut down the machine. +* `shutdown_command` (string) - The command to use to gracefully shut down the machine once all + the provisioning is done. By default this is an empty string, which tells Packer to just + forcefully shut down the machine unless a shutdown command takes place inside script so this may + safely be omitted. If one or more scripts require a reboot it is suggested to leave this blank + since reboots may fail and specify the final shutdown command in your last script. * `shutdown_timeout` (string) - The amount of time to wait after executing the `shutdown_command` for the virtual machine to actually shut down. From 9c1e461402bb88c32e08c8019e7f9816e36eb2a0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 20:33:00 -0700 Subject: [PATCH 285/956] website: document chef_environment for chef-solo --- website/source/docs/provisioners/chef-solo.html.markdown | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/provisioners/chef-solo.html.markdown b/website/source/docs/provisioners/chef-solo.html.markdown index 178ba0c62..3a76c5514 100644 --- a/website/source/docs/provisioners/chef-solo.html.markdown +++ b/website/source/docs/provisioners/chef-solo.html.markdown @@ -34,6 +34,9 @@ The example below is fully functional and expects cookbooks in the The reference of available configuration options is listed below. No configuration is actually required, but at least `run_list` is recommended. +* `chef_environment` (string) - The name of the `chef_environment` sent to the + Chef server. By default this is empty and will not use an environment + * `config_template` (string) - Path to a template that will be used for the Chef configuration file. By default Packer only sets configuration it needs to match the settings set in the provisioner configuration. If From 952077ccb0b1cfe189a43b8001d6dac3ea29b64c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 21:02:22 -0700 Subject: [PATCH 286/956] provisioner/shell: set -e for inline [GH-2069] --- provisioner/shell/provisioner.go | 1 + 1 file changed, 1 insertion(+) diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index 48904710d..8b6ceb705 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -184,6 +184,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { // Write our contents to it writer := bufio.NewWriter(tf) writer.WriteString(fmt.Sprintf("#!%s\n", p.config.InlineShebang)) + writer.WriteString("set -e\n") for _, command := range p.config.Inline { if _, err := writer.WriteString(command + "\n"); err != nil { return fmt.Errorf("Error preparing shell script: %s", err) From a995df352e7a6967c7e40d1a165b89af98a4abd3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 10 Jun 2015 21:18:46 -0700 Subject: [PATCH 287/956] provisioner/shell: uploaded script should be 0755 [GH-1708] --- provisioner/shell/provisioner.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index 48904710d..f07c91936 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -247,11 +247,11 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } cmd = &packer.RemoteCmd{ - Command: fmt.Sprintf("chmod 0777 %s", p.config.RemotePath), + Command: fmt.Sprintf("chmod 0755 %s", p.config.RemotePath), } if err := comm.Start(cmd); err != nil { return fmt.Errorf( - "Error chmodding script file to 0777 in remote "+ + "Error chmodding script file to 0755 in remote "+ "machine: %s", err) } cmd.Wait() From e8f846e47e93669a9eee2d1daaa4d649a71667ab Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 11 Jun 2015 08:50:48 -0500 Subject: [PATCH 288/956] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b79d6a4bb..abb0d46d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ IMPROVEMENTS: BUG FIXES: * core: Fix potential panic for post-processor plugin exits [GH-2098] + * builder/amazon: Allow spaces in AMI names when using `clean_ami_name` [GH-2182] * builder/amazon: Remove deprecated ec2-upload-bundle paramger [GH-1931] * builder/amazon: Retry finding created instance for eventual consistency. [GH-2129] From 1fbf8b7f32beaf98a1a85f396a0e98eecfd12ca4 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Thu, 11 Jun 2015 10:43:27 -0500 Subject: [PATCH 289/956] update create_tags for new sdk --- builder/amazon/common/step_create_tags.go | 51 ++++++++++++++++------- 1 file changed, 36 insertions(+), 15 deletions(-) diff --git a/builder/amazon/common/step_create_tags.go b/builder/amazon/common/step_create_tags.go index 3a31202ff..7c89e5a59 100644 --- a/builder/amazon/common/step_create_tags.go +++ b/builder/amazon/common/step_create_tags.go @@ -3,6 +3,7 @@ package common import ( "fmt" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" @@ -19,41 +20,61 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction { if len(s.Tags) > 0 { for region, ami := range amis { - ui.Say(fmt.Sprintf("Preparing tags for AMI (%s) and related snapshots", ami)) + ui.Say(fmt.Sprintf("Adding tags to AMI (%s)...", ami)) + + var ec2Tags []*ec2.Tag + for key, value := range s.Tags { + ui.Message(fmt.Sprintf("Adding tag: \"%s\": \"%s\"", key, value)) + ec2Tags = append(ec2Tags, &ec2.Tag{ + Key: aws.String(key), + Value: aws.String(value), + }) + } // Declare list of resources to tag - resourceIds := []string{ami} + resourceIds := []*string{&ami} // Retrieve image list for given AMI - imageResp, err := ec2conn.Images([]string{ami}, ec2.NewFilter()) + imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ + ImageIDs: resourceIds, + }) + if err != nil { err := fmt.Errorf("Error retrieving details for AMI (%s): %s", ami, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - image := &imageResp.Images[0] + + if len(imageResp.Images) == 0 { + err := fmt.Errorf("Error retrieving details for AMI (%s), no images found", ami) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + image := imageResp.Images[0] // Add only those with a Snapshot ID, i.e. not Ephemeral - for _, device := range image.BlockDevices { - if device.SnapshotId != "" { - ui.Say(fmt.Sprintf("Tagging snapshot: %s", device.SnapshotId)) - resourceIds = append(resourceIds, device.SnapshotId) + for _, device := range image.BlockDeviceMappings { + if device.EBS != nil && device.EBS.SnapshotID != nil { + ui.Say(fmt.Sprintf("Tagging snapshot: %s", *device.EBS.SnapshotID)) + resourceIds = append(resourceIds, device.EBS.SnapshotID) } } - var ec2Tags []*ec2.Tag - for key, value := range s.Tags { - ui.Message(fmt.Sprintf("Adding tag: \"%s\": \"%s\"", key, value)) - ec2Tags = append(ec2Tags, &ec2.Tag{Key: &key, Value: &value}) - } + regionconn := ec2.New(&aws.Config{ + Credentials: ec2conn.Config.Credentials, + Region: region, + }) - _, err := regionconn.CreateTags(&ec2.CreateTagsInput{ + _, err = regionconn.CreateTags(&ec2.CreateTagsInput{ Resources: resourceIds, Tags: ec2Tags, }) + if err != nil { - err := fmt.Errorf("Error adding tags to AMI (%s): %s", ami, err) + err := fmt.Errorf("Error adding tags to Resources (%#v): %s", resourceIds, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt From 16f8866728d81acf43925427d2b89db581a55fb1 Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 11 Jun 2015 10:57:07 -0500 Subject: [PATCH 290/956] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index abb0d46d0..4705c3523 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ BUG FIXES: * core: Fix potential panic for post-processor plugin exits [GH-2098] * builder/amazon: Allow spaces in AMI names when using `clean_ami_name` [GH-2182] * builder/amazon: Remove deprecated ec2-upload-bundle paramger [GH-1931] + * builder/amazon: Use IAM Profile to upload bundle if provided [GH-1985] * builder/amazon: Retry finding created instance for eventual consistency. [GH-2129] * builder/amazon: If no AZ is specified, use AZ chosen automatically by From 724d591ba4f29817b4f97085b463ba728936bfe1 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Thu, 11 Jun 2015 14:02:00 -0500 Subject: [PATCH 291/956] documentation: subnet_id is required for non-default VPC --- website/Gemfile.lock | 9 ++++++--- website/source/docs/builders/amazon-ebs.html.markdown | 3 ++- .../source/docs/builders/amazon-instance.html.markdown | 3 ++- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/website/Gemfile.lock b/website/Gemfile.lock index 7366999a6..b49383f77 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -79,18 +79,18 @@ GEM celluloid (~> 0.16.0) rb-fsevent (>= 0.9.3) rb-inotify (>= 0.9) - middleman (3.3.13) + middleman (3.3.12) coffee-script (~> 2.2) compass (>= 1.0.0, < 2.0.0) compass-import-once (= 1.0.5) execjs (~> 2.0) haml (>= 4.0.5) kramdown (~> 1.2) - middleman-core (= 3.3.13) + middleman-core (= 3.3.12) middleman-sprockets (>= 3.1.2) sass (>= 3.4.0, < 4.0) uglifier (~> 2.5) - middleman-core (3.3.13) + middleman-core (3.3.12) activesupport (~> 4.1.0) bundler (~> 1.1) erubis @@ -175,3 +175,6 @@ PLATFORMS DEPENDENCIES middleman-hashicorp! + +BUNDLED WITH + 1.10.2 diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 5e2c31e90..0ff9522df 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -144,7 +144,8 @@ each category, the available configuration keys are alphabetized. or "5m". The default SSH timeout is "5m", or five minutes. * `subnet_id` (string) - If using VPC, the ID of the subnet, such as - "subnet-12345def", where Packer will launch the EC2 instance. + "subnet-12345def", where Packer will launch the EC2 instance. This field is + required if you are using an non-default VPC. * `tags` (object of key/value strings) - Tags applied to the AMI. diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 8360322de..ae5fbff27 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -184,7 +184,8 @@ each category, the available configuration keys are alphabetized. or "5m". The default SSH timeout is "5m", or five minutes. * `subnet_id` (string) - If using VPC, the ID of the subnet, such as - "subnet-12345def", where Packer will launch the EC2 instance. + "subnet-12345def", where Packer will launch the EC2 instance. This field is + required if you are using an non-default VPC. * `tags` (object of key/value strings) - Tags applied to the AMI. From e88fa43cfe9bdb785176c6b9c1944ebecb43157b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 11 Jun 2015 16:45:24 -0400 Subject: [PATCH 292/956] find proper extension --- config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.go b/config.go index 2ebc66422..efb4e7d31 100644 --- a/config.go +++ b/config.go @@ -175,7 +175,7 @@ func (c *config) discoverSingle(glob string, m *map[string]string) error { // One Windows, ignore any plugins that don't end in .exe. // We could do a full PATHEXT parse, but this is probably good enough. - if runtime.GOOS == "windows" && strings.ToLower(filepath.Ext(file)) != "exe" { + if runtime.GOOS == "windows" && strings.ToLower(filepath.Ext(file)) != ".exe" { log.Printf( "[DEBUG] Ignoring plugin match %s, no exe extension", match) From ae386d0a36c1485276d76ac8a7c30b8fb77c97dd Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 11 Jun 2015 16:55:32 -0400 Subject: [PATCH 293/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4705c3523..bef4170f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ BUG FIXES: * post-processor/vagrant-cloud: Don't delete version on error [GH-2014] * provisioner/puppet-masterless: Allow manifest_file to be a directory * provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call + * provisioner/shell: chmod executable script to 0755, not 0777 [GH-1708] ## 0.7.5 (December 9, 2014) From 8ca8bd7866e29bfcf38e84c5393146922e5a404e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 11 Jun 2015 16:57:44 -0400 Subject: [PATCH 294/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bef4170f1..ded839503 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ IMPROVEMENTS: * core: Interrupt handling for SIGTERM signal as well. [GH-1858] * builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829] + * builder/digitalocean: User data support [GH-2113] * builder/parallels: Support Parallels Desktop 11 [GH-2199] * builder/openstack: Add `rackconnect_wait` for Rackspace customers to wait for RackConnect data to appear From 3ed73852be35e04cb69c05459a298e7b1eb4a37c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 11 Jun 2015 17:19:23 -0400 Subject: [PATCH 295/956] provisioner/shell: set -e on the shebang itself --- provisioner/shell/provisioner.go | 3 +-- website/source/docs/provisioners/shell.html.markdown | 4 +++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index 8b6ceb705..0d55c981b 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -94,7 +94,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } if p.config.InlineShebang == "" { - p.config.InlineShebang = "/bin/sh" + p.config.InlineShebang = "/bin/sh -e" } if p.config.RawStartRetryTimeout == "" { @@ -184,7 +184,6 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { // Write our contents to it writer := bufio.NewWriter(tf) writer.WriteString(fmt.Sprintf("#!%s\n", p.config.InlineShebang)) - writer.WriteString("set -e\n") for _, command := range p.config.Inline { if _, err := writer.WriteString(command + "\n"); err != nil { return fmt.Errorf("Error preparing shell script: %s", err) diff --git a/website/source/docs/provisioners/shell.html.markdown b/website/source/docs/provisioners/shell.html.markdown index 7fcbe885b..e57910cb0 100644 --- a/website/source/docs/provisioners/shell.html.markdown +++ b/website/source/docs/provisioners/shell.html.markdown @@ -66,8 +66,10 @@ Optional parameters: * `inline_shebang` (string) - The [shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when - running commands specified by `inline`. By default, this is `/bin/sh`. + running commands specified by `inline`. By default, this is `/bin/sh -e`. If you're not using `inline`, then this configuration has no effect. + **Important:** If you customize this, be sure to include something like + the `-e` flag, otherwise individual steps failing won't fail the provisioner. * `remote_path` (string) - The path where the script will be uploaded to in the machine. This defaults to "/tmp/script.sh". This value must be From 2a912f7013703e263816513914ec374a9edaba6a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 11 Jun 2015 17:20:13 -0400 Subject: [PATCH 296/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ded839503..207920b3a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -83,6 +83,7 @@ BUG FIXES: * provisioner/puppet-masterless: Allow manifest_file to be a directory * provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call * provisioner/shell: chmod executable script to 0755, not 0777 [GH-1708] + * provisioner/shell: inline commands failing will fail the provisioner [GH-2069] ## 0.7.5 (December 9, 2014) From 04e174fae899e80cb1d5d5a9b558e6c23a01a930 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Thu, 11 Jun 2015 16:21:29 -0500 Subject: [PATCH 297/956] builder/amazon: Properly return error code on ssh errors --- common/step_connect_ssh.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/common/step_connect_ssh.go b/common/step_connect_ssh.go index c3d8aac2d..d45767ee5 100644 --- a/common/step_connect_ssh.go +++ b/common/step_connect_ssh.go @@ -3,13 +3,14 @@ package common import ( "errors" "fmt" + "log" + "strings" + "time" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/communicator/ssh" "github.com/mitchellh/packer/packer" gossh "golang.org/x/crypto/ssh" - "log" - "strings" - "time" ) // StepConnectSSH is a multistep Step implementation that waits for SSH @@ -64,6 +65,7 @@ WaitLoop: case <-waitDone: if err != nil { ui.Error(fmt.Sprintf("Error waiting for SSH: %s", err)) + state.Put("error", err) return multistep.ActionHalt } From 7830d78d063f5ade6d6b9b929f8b2c2a1a06999e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 11 Jun 2015 17:24:02 -0400 Subject: [PATCH 298/956] provisioner/shell: fix tests --- provisioner/shell/provisioner_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/shell/provisioner_test.go b/provisioner/shell/provisioner_test.go index 4fd5a5cad..54c41c956 100644 --- a/provisioner/shell/provisioner_test.go +++ b/provisioner/shell/provisioner_test.go @@ -45,7 +45,7 @@ func TestProvisionerPrepare_InlineShebang(t *testing.T) { t.Fatalf("should not have error: %s", err) } - if p.config.InlineShebang != "/bin/sh" { + if p.config.InlineShebang != "/bin/sh -e" { t.Fatalf("bad value: %s", p.config.InlineShebang) } From 25f5d6dba318d4894ca83e9fdc8a2fcd10273826 Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 11 Jun 2015 16:29:15 -0500 Subject: [PATCH 299/956] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 207920b3a..b1bc247f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ BUG FIXES: * builder/amazon: Allow spaces in AMI names when using `clean_ami_name` [GH-2182] * builder/amazon: Remove deprecated ec2-upload-bundle paramger [GH-1931] * builder/amazon: Use IAM Profile to upload bundle if provided [GH-1985] + * builder/amazon: Use correct exit code after SSH authentication failed [GH-2004] * builder/amazon: Retry finding created instance for eventual consistency. [GH-2129] * builder/amazon: If no AZ is specified, use AZ chosen automatically by From 17555ff21ad8c00f03101dd25743b25e716606a7 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 11 Jun 2015 17:38:25 -0400 Subject: [PATCH 300/956] update version for dev --- version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version.go b/version.go index f7e44a1d0..dbfc8da12 100644 --- a/version.go +++ b/version.go @@ -4,9 +4,9 @@ package main var GitCommit string // The main version number that is being run at the moment. -const Version = "0.7.5" +const Version = "0.8.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" From c903579aaacc66c01c893e4dad21480f1188f415 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 11 Jun 2015 23:43:36 -0400 Subject: [PATCH 301/956] builder/openstack-new --- builder/openstack-new/access_config.go | 109 ++++++++++++++ builder/openstack-new/artifact.go | 47 ++++++ builder/openstack-new/artifact_test.go | 35 +++++ builder/openstack-new/builder.go | 134 ++++++++++++++++++ builder/openstack-new/builder_test.go | 94 ++++++++++++ builder/openstack-new/image_config.go | 25 ++++ builder/openstack-new/image_config_test.go | 23 +++ builder/openstack-new/run_config.go | 75 ++++++++++ builder/openstack-new/run_config_test.go | 88 ++++++++++++ builder/openstack-new/server.go | 100 +++++++++++++ builder/openstack-new/ssh.go | 89 ++++++++++++ builder/openstack-new/step_allocate_ip.go | 94 ++++++++++++ builder/openstack-new/step_create_image.go | 82 +++++++++++ builder/openstack-new/step_key_pair.go | 106 ++++++++++++++ .../openstack-new/step_run_source_server.go | 110 ++++++++++++++ .../step_wait_for_rackconnect.go | 52 +++++++ plugin/builder-openstack-new/main.go | 15 ++ 17 files changed, 1278 insertions(+) create mode 100644 builder/openstack-new/access_config.go create mode 100644 builder/openstack-new/artifact.go create mode 100644 builder/openstack-new/artifact_test.go create mode 100644 builder/openstack-new/builder.go create mode 100644 builder/openstack-new/builder_test.go create mode 100644 builder/openstack-new/image_config.go create mode 100644 builder/openstack-new/image_config_test.go create mode 100644 builder/openstack-new/run_config.go create mode 100644 builder/openstack-new/run_config_test.go create mode 100644 builder/openstack-new/server.go create mode 100644 builder/openstack-new/ssh.go create mode 100644 builder/openstack-new/step_allocate_ip.go create mode 100644 builder/openstack-new/step_create_image.go create mode 100644 builder/openstack-new/step_key_pair.go create mode 100644 builder/openstack-new/step_run_source_server.go create mode 100644 builder/openstack-new/step_wait_for_rackconnect.go create mode 100644 plugin/builder-openstack-new/main.go diff --git a/builder/openstack-new/access_config.go b/builder/openstack-new/access_config.go new file mode 100644 index 000000000..e0f962c50 --- /dev/null +++ b/builder/openstack-new/access_config.go @@ -0,0 +1,109 @@ +package openstack + +import ( + "crypto/tls" + "fmt" + "net/http" + "os" + + "github.com/mitchellh/packer/template/interpolate" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" +) + +// AccessConfig is for common configuration related to openstack access +type AccessConfig struct { + Username string `mapstructure:"username"` + UserID string `mapstructure:"user_id"` + Password string `mapstructure:"password"` + APIKey string `mapstructure:"api_key"` + IdentityEndpoint string `mapstructure:"identity_endpoint"` + TenantID string `mapstructure:"tenant_id"` + TenantName string `mapstructure:"tenant_name"` + DomainID string `mapstructure:"domain_id"` + DomainName string `mapstructure:"domain_name"` + Insecure bool `mapstructure:"insecure"` + Region string `mapstructure:"region"` + EndpointType string `mapstructure:"endpoint_type"` + + osClient *gophercloud.ProviderClient +} + +func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error { + if c.EndpointType != "internal" && c.EndpointType != "internalURL" && + c.EndpointType != "admin" && c.EndpointType != "adminURL" && + c.EndpointType != "public" && c.EndpointType != "publicURL" && + c.EndpointType != "" { + return []error{fmt.Errorf("Invalid endpoint type provided")} + } + + if c.Region == "" { + c.Region = os.Getenv("OS_REGION_NAME") + } + + // Get as much as possible from the end + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return []error{err} + } + + // Override values if we have them in our config + overrides := []struct { + From, To *string + }{ + {&c.Username, &ao.Username}, + {&c.UserID, &ao.UserID}, + {&c.Password, &ao.Password}, + {&c.APIKey, &ao.APIKey}, + {&c.IdentityEndpoint, &ao.IdentityEndpoint}, + {&c.TenantID, &ao.TenantID}, + {&c.TenantName, &ao.TenantName}, + {&c.DomainID, &ao.DomainID}, + {&c.DomainName, &ao.DomainName}, + } + for _, s := range overrides { + if *s.From != "" { + *s.To = *s.From + } + } + + // Build the client itself + client, err := openstack.NewClient(ao.IdentityEndpoint) + if err != nil { + return []error{err} + } + + // If we have insecure set, then create a custom HTTP client that + // ignores SSL errors. + if c.Insecure { + config := &tls.Config{InsecureSkipVerify: true} + transport := &http.Transport{TLSClientConfig: config} + client.HTTPClient.Transport = transport + } + + // Auth + err = openstack.Authenticate(client, ao) + if err != nil { + return []error{err} + } + + c.osClient = client + return nil +} + +func (c *AccessConfig) computeV2Client() (*gophercloud.ServiceClient, error) { + return openstack.NewComputeV2(c.osClient, gophercloud.EndpointOpts{ + Region: c.Region, + Availability: c.getEndpointType(), + }) +} + +func (c *AccessConfig) getEndpointType() gophercloud.Availability { + if c.EndpointType == "internal" || c.EndpointType == "internalURL" { + return gophercloud.AvailabilityInternal + } + if c.EndpointType == "admin" || c.EndpointType == "adminURL" { + return gophercloud.AvailabilityAdmin + } + return gophercloud.AvailabilityPublic +} diff --git a/builder/openstack-new/artifact.go b/builder/openstack-new/artifact.go new file mode 100644 index 000000000..aa60d2641 --- /dev/null +++ b/builder/openstack-new/artifact.go @@ -0,0 +1,47 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/images" +) + +// Artifact is an artifact implementation that contains built images. +type Artifact struct { + // ImageId of built image + ImageId string + + // BuilderId is the unique ID for the builder that created this image + BuilderIdValue string + + // OpenStack connection for performing API stuff. + Client *gophercloud.ServiceClient +} + +func (a *Artifact) BuilderId() string { + return a.BuilderIdValue +} + +func (*Artifact) Files() []string { + // We have no files + return nil +} + +func (a *Artifact) Id() string { + return a.ImageId +} + +func (a *Artifact) String() string { + return fmt.Sprintf("An image was created: %v", a.ImageId) +} + +func (a *Artifact) State(name string) interface{} { + return nil +} + +func (a *Artifact) Destroy() error { + log.Printf("Destroying image: %s", a.ImageId) + return images.Delete(a.Client, a.ImageId).ExtractErr() +} diff --git a/builder/openstack-new/artifact_test.go b/builder/openstack-new/artifact_test.go new file mode 100644 index 000000000..313fea7cf --- /dev/null +++ b/builder/openstack-new/artifact_test.go @@ -0,0 +1,35 @@ +package openstack + +import ( + "github.com/mitchellh/packer/packer" + "testing" +) + +func TestArtifact_Impl(t *testing.T) { + var _ packer.Artifact = new(Artifact) +} + +func TestArtifactId(t *testing.T) { + expected := `b8cdf55b-c916-40bd-b190-389ec144c4ed` + + a := &Artifact{ + ImageId: "b8cdf55b-c916-40bd-b190-389ec144c4ed", + } + + result := a.Id() + if result != expected { + t.Fatalf("bad: %s", result) + } +} + +func TestArtifactString(t *testing.T) { + expected := "An image was created: b8cdf55b-c916-40bd-b190-389ec144c4ed" + + a := &Artifact{ + ImageId: "b8cdf55b-c916-40bd-b190-389ec144c4ed", + } + result := a.String() + if result != expected { + t.Fatalf("bad: %s", result) + } +} diff --git a/builder/openstack-new/builder.go b/builder/openstack-new/builder.go new file mode 100644 index 000000000..bebb28452 --- /dev/null +++ b/builder/openstack-new/builder.go @@ -0,0 +1,134 @@ +// The openstack package contains a packer.Builder implementation that +// builds Images for openstack. + +package openstack + +import ( + "fmt" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/common" + "log" + + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" +) + +// The unique ID for this builder +const BuilderId = "mitchellh.openstack" + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + AccessConfig `mapstructure:",squash"` + ImageConfig `mapstructure:",squash"` + RunConfig `mapstructure:",squash"` + + ctx interpolate.Context +} + +type Builder struct { + config Config + runner multistep.Runner +} + +func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { + err := config.Decode(&b.config, &config.DecodeOpts{ + Interpolate: true, + }, raws...) + if err != nil { + return nil, err + } + + // Accumulate any errors + var errs *packer.MultiError + errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.ImageConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...) + + if errs != nil && len(errs.Errors) > 0 { + return nil, errs + } + + log.Println(common.ScrubConfig(b.config, b.config.Password)) + return nil, nil +} + +func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { + computeClient, err := b.config.computeV2Client() + if err != nil { + return nil, fmt.Errorf("Error initializing compute client: %s", err) + } + + // Setup the state bag and initial state for the steps + state := new(multistep.BasicStateBag) + state.Put("config", b.config) + state.Put("hook", hook) + state.Put("ui", ui) + + // Build the steps + steps := []multistep.Step{ + &StepKeyPair{ + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName), + }, + &StepRunSourceServer{ + Name: b.config.ImageName, + Flavor: b.config.Flavor, + SourceImage: b.config.SourceImage, + SecurityGroups: b.config.SecurityGroups, + Networks: b.config.Networks, + }, + &StepWaitForRackConnect{ + Wait: b.config.RackconnectWait, + }, + &StepAllocateIp{ + FloatingIpPool: b.config.FloatingIpPool, + FloatingIp: b.config.FloatingIp, + }, + &common.StepConnectSSH{ + SSHAddress: SSHAddress(computeClient, b.config.SSHInterface, b.config.SSHPort), + SSHConfig: SSHConfig(b.config.SSHUsername), + SSHWaitTimeout: b.config.SSHTimeout(), + }, + &common.StepProvision{}, + &stepCreateImage{}, + } + + // Run! + if b.config.PackerDebug { + b.runner = &multistep.DebugRunner{ + Steps: steps, + PauseFn: common.MultistepDebugFn(ui), + } + } else { + b.runner = &multistep.BasicRunner{Steps: steps} + } + + b.runner.Run(state) + + // If there was an error, return that + if rawErr, ok := state.GetOk("error"); ok { + return nil, rawErr.(error) + } + + // If there are no images, then just return + if _, ok := state.GetOk("image"); !ok { + return nil, nil + } + + // Build the artifact and return it + artifact := &Artifact{ + ImageId: state.Get("image").(string), + BuilderIdValue: BuilderId, + Client: computeClient, + } + + return artifact, nil +} + +func (b *Builder) Cancel() { + if b.runner != nil { + log.Println("Cancelling the step runner...") + b.runner.Cancel() + } +} diff --git a/builder/openstack-new/builder_test.go b/builder/openstack-new/builder_test.go new file mode 100644 index 000000000..badf9784d --- /dev/null +++ b/builder/openstack-new/builder_test.go @@ -0,0 +1,94 @@ +package openstack + +import ( + "github.com/mitchellh/packer/packer" + "testing" +) + +func testConfig() map[string]interface{} { + return map[string]interface{}{ + "username": "foo", + "password": "bar", + "provider": "foo", + "region": "DFW", + "image_name": "foo", + "source_image": "foo", + "flavor": "foo", + "ssh_username": "root", + } +} + +func TestBuilder_ImplementsBuilder(t *testing.T) { + var raw interface{} + raw = &Builder{} + if _, ok := raw.(packer.Builder); !ok { + t.Fatalf("Builder should be a builder") + } +} + +func TestBuilder_Prepare_BadType(t *testing.T) { + b := &Builder{} + c := map[string]interface{}{ + "password": []string{}, + } + + warns, err := b.Prepare(c) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatalf("prepare should fail") + } +} + +func TestBuilderPrepare_ImageName(t *testing.T) { + var b Builder + config := testConfig() + + // Test good + config["image_name"] = "foo" + warns, err := b.Prepare(config) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + // Test bad + config["image_name"] = "foo {{" + b = Builder{} + warns, err = b.Prepare(config) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatal("should have error") + } + + // Test bad + delete(config, "image_name") + b = Builder{} + warns, err = b.Prepare(config) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatal("should have error") + } +} + +func TestBuilderPrepare_InvalidKey(t *testing.T) { + var b Builder + config := testConfig() + + // Add a random key + config["i_should_not_be_valid"] = true + warns, err := b.Prepare(config) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatal("should have error") + } +} diff --git a/builder/openstack-new/image_config.go b/builder/openstack-new/image_config.go new file mode 100644 index 000000000..124449eab --- /dev/null +++ b/builder/openstack-new/image_config.go @@ -0,0 +1,25 @@ +package openstack + +import ( + "fmt" + + "github.com/mitchellh/packer/template/interpolate" +) + +// ImageConfig is for common configuration related to creating Images. +type ImageConfig struct { + ImageName string `mapstructure:"image_name"` +} + +func (c *ImageConfig) Prepare(ctx *interpolate.Context) []error { + errs := make([]error, 0) + if c.ImageName == "" { + errs = append(errs, fmt.Errorf("An image_name must be specified")) + } + + if len(errs) > 0 { + return errs + } + + return nil +} diff --git a/builder/openstack-new/image_config_test.go b/builder/openstack-new/image_config_test.go new file mode 100644 index 000000000..4d81ecd94 --- /dev/null +++ b/builder/openstack-new/image_config_test.go @@ -0,0 +1,23 @@ +package openstack + +import ( + "testing" +) + +func testImageConfig() *ImageConfig { + return &ImageConfig{ + ImageName: "foo", + } +} + +func TestImageConfigPrepare_Region(t *testing.T) { + c := testImageConfig() + if err := c.Prepare(nil); err != nil { + t.Fatalf("shouldn't have err: %s", err) + } + + c.ImageName = "" + if err := c.Prepare(nil); err == nil { + t.Fatal("should have error") + } +} diff --git a/builder/openstack-new/run_config.go b/builder/openstack-new/run_config.go new file mode 100644 index 000000000..e5d73c9c1 --- /dev/null +++ b/builder/openstack-new/run_config.go @@ -0,0 +1,75 @@ +package openstack + +import ( + "errors" + "fmt" + "time" + + "github.com/mitchellh/packer/template/interpolate" +) + +// RunConfig contains configuration for running an instance from a source +// image and details on how to access that launched image. +type RunConfig struct { + SourceImage string `mapstructure:"source_image"` + Flavor string `mapstructure:"flavor"` + RawSSHTimeout string `mapstructure:"ssh_timeout"` + SSHUsername string `mapstructure:"ssh_username"` + SSHPort int `mapstructure:"ssh_port"` + SSHInterface string `mapstructure:"ssh_interface"` + OpenstackProvider string `mapstructure:"openstack_provider"` + UseFloatingIp bool `mapstructure:"use_floating_ip"` + RackconnectWait bool `mapstructure:"rackconnect_wait"` + FloatingIpPool string `mapstructure:"floating_ip_pool"` + FloatingIp string `mapstructure:"floating_ip"` + SecurityGroups []string `mapstructure:"security_groups"` + Networks []string `mapstructure:"networks"` + + // Unexported fields that are calculated from others + sshTimeout time.Duration +} + +func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { + // Defaults + if c.SSHUsername == "" { + c.SSHUsername = "root" + } + + if c.SSHPort == 0 { + c.SSHPort = 22 + } + + if c.RawSSHTimeout == "" { + c.RawSSHTimeout = "5m" + } + + if c.UseFloatingIp && c.FloatingIpPool == "" { + c.FloatingIpPool = "public" + } + + // Validation + var err error + errs := make([]error, 0) + if c.SourceImage == "" { + errs = append(errs, errors.New("A source_image must be specified")) + } + + if c.Flavor == "" { + errs = append(errs, errors.New("A flavor must be specified")) + } + + if c.SSHUsername == "" { + errs = append(errs, errors.New("An ssh_username must be specified")) + } + + c.sshTimeout, err = time.ParseDuration(c.RawSSHTimeout) + if err != nil { + errs = append(errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) + } + + return errs +} + +func (c *RunConfig) SSHTimeout() time.Duration { + return c.sshTimeout +} diff --git a/builder/openstack-new/run_config_test.go b/builder/openstack-new/run_config_test.go new file mode 100644 index 000000000..16b89b352 --- /dev/null +++ b/builder/openstack-new/run_config_test.go @@ -0,0 +1,88 @@ +package openstack + +import ( + "os" + "testing" +) + +func init() { + // Clear out the openstack env vars so they don't + // affect our tests. + os.Setenv("SDK_USERNAME", "") + os.Setenv("SDK_PASSWORD", "") + os.Setenv("SDK_PROVIDER", "") +} + +func testRunConfig() *RunConfig { + return &RunConfig{ + SourceImage: "abcd", + Flavor: "m1.small", + SSHUsername: "root", + } +} + +func TestRunConfigPrepare(t *testing.T) { + c := testRunConfig() + err := c.Prepare(nil) + if len(err) > 0 { + t.Fatalf("err: %s", err) + } +} + +func TestRunConfigPrepare_InstanceType(t *testing.T) { + c := testRunConfig() + c.Flavor = "" + if err := c.Prepare(nil); len(err) != 1 { + t.Fatalf("err: %s", err) + } +} + +func TestRunConfigPrepare_SourceImage(t *testing.T) { + c := testRunConfig() + c.SourceImage = "" + if err := c.Prepare(nil); len(err) != 1 { + t.Fatalf("err: %s", err) + } +} + +func TestRunConfigPrepare_SSHPort(t *testing.T) { + c := testRunConfig() + c.SSHPort = 0 + if err := c.Prepare(nil); len(err) != 0 { + t.Fatalf("err: %s", err) + } + + if c.SSHPort != 22 { + t.Fatalf("invalid value: %d", c.SSHPort) + } + + c.SSHPort = 44 + if err := c.Prepare(nil); len(err) != 0 { + t.Fatalf("err: %s", err) + } + + if c.SSHPort != 44 { + t.Fatalf("invalid value: %d", c.SSHPort) + } +} + +func TestRunConfigPrepare_SSHTimeout(t *testing.T) { + c := testRunConfig() + c.RawSSHTimeout = "" + if err := c.Prepare(nil); len(err) != 0 { + t.Fatalf("err: %s", err) + } + + c.RawSSHTimeout = "bad" + if err := c.Prepare(nil); len(err) != 1 { + t.Fatalf("err: %s", err) + } +} + +func TestRunConfigPrepare_SSHUsername(t *testing.T) { + c := testRunConfig() + c.SSHUsername = "" + if err := c.Prepare(nil); len(err) != 0 { + t.Fatalf("err: %s", err) + } +} diff --git a/builder/openstack-new/server.go b/builder/openstack-new/server.go new file mode 100644 index 000000000..a87ef0110 --- /dev/null +++ b/builder/openstack-new/server.go @@ -0,0 +1,100 @@ +package openstack + +import ( + "errors" + "fmt" + "log" + "time" + + "github.com/mitchellh/multistep" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +// StateRefreshFunc is a function type used for StateChangeConf that is +// responsible for refreshing the item being watched for a state change. +// +// It returns three results. `result` is any object that will be returned +// as the final object after waiting for state change. This allows you to +// return the final updated object, for example an openstack instance after +// refreshing it. +// +// `state` is the latest state of that object. And `err` is any error that +// may have happened while refreshing the state. +type StateRefreshFunc func() (result interface{}, state string, progress int, err error) + +// StateChangeConf is the configuration struct used for `WaitForState`. +type StateChangeConf struct { + Pending []string + Refresh StateRefreshFunc + StepState multistep.StateBag + Target string +} + +// ServerStateRefreshFunc returns a StateRefreshFunc that is used to watch +// an openstack server. +func ServerStateRefreshFunc( + client *gophercloud.ServiceClient, s *servers.Server) StateRefreshFunc { + return func() (interface{}, string, int, error) { + var serverNew *servers.Server + result := servers.Get(client, s.ID) + err := result.Err + if err == nil { + serverNew, err = result.Extract() + } + if result.Err != nil { + errCode, ok := result.Err.(*gophercloud.UnexpectedResponseCodeError) + if ok && errCode.Actual == 404 { + log.Printf("[INFO] 404 on ServerStateRefresh, returning DELETED") + return nil, "DELETED", 0, nil + } else { + log.Printf("[ERROR] Error on ServerStateRefresh: %s", result.Err) + return nil, "", 0, result.Err + } + } + + return serverNew, serverNew.Status, serverNew.Progress, nil + } +} + +// WaitForState watches an object and waits for it to achieve a certain +// state. +func WaitForState(conf *StateChangeConf) (i interface{}, err error) { + log.Printf("Waiting for state to become: %s", conf.Target) + + for { + var currentProgress int + var currentState string + i, currentState, currentProgress, err = conf.Refresh() + if err != nil { + return + } + + if currentState == conf.Target { + return + } + + if conf.StepState != nil { + if _, ok := conf.StepState.GetOk(multistep.StateCancelled); ok { + return nil, errors.New("interrupted") + } + } + + found := false + for _, allowed := range conf.Pending { + if currentState == allowed { + found = true + break + } + } + + if !found { + return nil, fmt.Errorf("unexpected state '%s', wanted target '%s'", currentState, conf.Target) + } + + log.Printf("Waiting for state to become: %s currently %s (%d%%)", conf.Target, currentState, currentProgress) + time.Sleep(2 * time.Second) + } + + return +} diff --git a/builder/openstack-new/ssh.go b/builder/openstack-new/ssh.go new file mode 100644 index 000000000..a3de654f6 --- /dev/null +++ b/builder/openstack-new/ssh.go @@ -0,0 +1,89 @@ +package openstack + +import ( + "errors" + "fmt" + "time" + + "github.com/mitchellh/multistep" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "golang.org/x/crypto/ssh" +) + +// SSHAddress returns a function that can be given to the SSH communicator +// for determining the SSH address based on the server AccessIPv4 setting.. +func SSHAddress( + client *gophercloud.ServiceClient, + sshinterface string, port int) func(multistep.StateBag) (string, error) { + return func(state multistep.StateBag) (string, error) { + s := state.Get("server").(*servers.Server) + + // If we have a floating IP, use that + if ip := state.Get("access_ip").(*floatingip.FloatingIP); ip.FixedIP != "" { + return fmt.Sprintf("%s:%d", ip.FixedIP, port), nil + } + + if s.AccessIPv4 != "" { + return fmt.Sprintf("%s:%d", s.AccessIPv4, port), nil + } + + // Get all the addresses associated with this server + /* + ip_pools, err := s.AllAddressPools() + if err != nil { + return "", errors.New("Error parsing SSH addresses") + } + for pool, addresses := range ip_pools { + if sshinterface != "" { + if pool != sshinterface { + continue + } + } + if pool != "" { + for _, address := range addresses { + if address.Addr != "" && address.Version == 4 { + return fmt.Sprintf("%s:%d", address.Addr, port), nil + } + } + } + } + */ + + result := servers.Get(client, s.ID) + err := result.Err + if err == nil { + s, err = result.Extract() + } + if err != nil { + return "", err + } + + state.Put("server", s) + time.Sleep(1 * time.Second) + + return "", errors.New("couldn't determine IP address for server") + } +} + +// SSHConfig returns a function that can be used for the SSH communicator +// config for connecting to the instance created over SSH using the generated +// private key. +func SSHConfig(username string) func(multistep.StateBag) (*ssh.ClientConfig, error) { + return func(state multistep.StateBag) (*ssh.ClientConfig, error) { + privateKey := state.Get("privateKey").(string) + + signer, err := ssh.ParsePrivateKey([]byte(privateKey)) + if err != nil { + return nil, fmt.Errorf("Error setting up SSH config: %s", err) + } + + return &ssh.ClientConfig{ + User: username, + Auth: []ssh.AuthMethod{ + ssh.PublicKeys(signer), + }, + }, nil + } +} diff --git a/builder/openstack-new/step_allocate_ip.go b/builder/openstack-new/step_allocate_ip.go new file mode 100644 index 000000000..adb15eb5b --- /dev/null +++ b/builder/openstack-new/step_allocate_ip.go @@ -0,0 +1,94 @@ +package openstack + +import ( + "fmt" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +type StepAllocateIp struct { + FloatingIpPool string + FloatingIp string +} + +func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + config := state.Get("config").(Config) + server := state.Get("server").(*servers.Server) + + // We need the v2 compute client + client, err := config.computeV2Client() + if err != nil { + err = fmt.Errorf("Error initializing compute client: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + + var instanceIp *floatingip.FloatingIP + // This is here in case we error out before putting instanceIp into the + // statebag below, because it is requested by Cleanup() + state.Put("access_ip", instanceIp) + + if s.FloatingIp != "" { + instanceIp.FixedIP = s.FloatingIp + } else if s.FloatingIpPool != "" { + newIp, err := floatingip.Create(client, floatingip.CreateOpts{ + Pool: s.FloatingIpPool, + }).Extract() + if err != nil { + err := fmt.Errorf("Error creating floating ip from pool '%s'", s.FloatingIpPool) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + instanceIp = newIp + ui.Say(fmt.Sprintf("Created temporary floating IP %s...", instanceIp.FixedIP)) + } + + if instanceIp.FixedIP != "" { + err := floatingip.Associate(client, server.ID, instanceIp.FixedIP).ExtractErr() + if err != nil { + err := fmt.Errorf( + "Error associating floating IP %s with instance.", + instanceIp.FixedIP) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + ui.Say(fmt.Sprintf( + "Added floating IP %s to instance...", instanceIp.FixedIP)) + } + + state.Put("access_ip", instanceIp) + + return multistep.ActionContinue +} + +func (s *StepAllocateIp) Cleanup(state multistep.StateBag) { + config := state.Get("config").(Config) + ui := state.Get("ui").(packer.Ui) + instanceIp := state.Get("access_ip").(*floatingip.FloatingIP) + + // We need the v2 compute client + client, err := config.computeV2Client() + if err != nil { + ui.Error(fmt.Sprintf( + "Error deleting temporary floating IP %s", instanceIp.FixedIP)) + return + } + + if s.FloatingIpPool != "" && instanceIp.ID != "" { + if err := floatingip.Delete(client, instanceIp.ID).ExtractErr(); err != nil { + ui.Error(fmt.Sprintf( + "Error deleting temporary floating IP %s", instanceIp.FixedIP)) + return + } + + ui.Say(fmt.Sprintf("Deleted temporary floating IP %s", instanceIp.FixedIP)) + } +} diff --git a/builder/openstack-new/step_create_image.go b/builder/openstack-new/step_create_image.go new file mode 100644 index 000000000..df540e311 --- /dev/null +++ b/builder/openstack-new/step_create_image.go @@ -0,0 +1,82 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/images" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +type stepCreateImage struct{} + +func (s *stepCreateImage) Run(state multistep.StateBag) multistep.StepAction { + config := state.Get("config").(Config) + server := state.Get("server").(*servers.Server) + ui := state.Get("ui").(packer.Ui) + + // We need the v2 compute client + client, err := config.computeV2Client() + if err != nil { + err = fmt.Errorf("Error initializing compute client: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + + // Create the image + ui.Say(fmt.Sprintf("Creating the image: %s", config.ImageName)) + imageId, err := servers.CreateImage(client, server.ID, servers.CreateImageOpts{ + Name: config.ImageName, + }).ExtractImageID() + if err != nil { + err := fmt.Errorf("Error creating image: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + // Set the Image ID in the state + ui.Say(fmt.Sprintf("Image: %s", imageId)) + state.Put("image", imageId) + + // Wait for the image to become ready + ui.Say("Waiting for image to become ready...") + if err := WaitForImage(client, imageId); err != nil { + err := fmt.Errorf("Error waiting for image: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (s *stepCreateImage) Cleanup(multistep.StateBag) { + // No cleanup... +} + +// WaitForImage waits for the given Image ID to become ready. +func WaitForImage(client *gophercloud.ServiceClient, imageId string) error { + for { + var image *images.Image + result := images.Get(client, imageId) + err := result.Err + if err == nil { + image, err = result.Extract() + } + if err != nil { + return err + } + + if image.Status == "ACTIVE" { + return nil + } + + log.Printf("Waiting for image creation status: %s (%d%%)", image.Status, image.Progress) + time.Sleep(2 * time.Second) + } +} diff --git a/builder/openstack-new/step_key_pair.go b/builder/openstack-new/step_key_pair.go new file mode 100644 index 000000000..06bcbf9ea --- /dev/null +++ b/builder/openstack-new/step_key_pair.go @@ -0,0 +1,106 @@ +package openstack + +import ( + "fmt" + "os" + "runtime" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/common/uuid" + "github.com/mitchellh/packer/packer" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" +) + +type StepKeyPair struct { + Debug bool + DebugKeyPath string + keyName string +} + +func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { + config := state.Get("config").(Config) + ui := state.Get("ui").(packer.Ui) + + // We need the v2 compute client + computeClient, err := config.computeV2Client() + if err != nil { + err = fmt.Errorf("Error initializing compute client: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + + ui.Say("Creating temporary keypair for this instance...") + keyName := fmt.Sprintf("packer %s", uuid.TimeOrderedUUID()) + keypair, err := keypairs.Create(computeClient, keypairs.CreateOpts{ + Name: keyName, + }).Extract() + if err != nil { + state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err)) + return multistep.ActionHalt + } + + if keypair.PrivateKey == "" { + state.Put("error", fmt.Errorf("The temporary keypair returned was blank")) + return multistep.ActionHalt + } + + // If we're in debug mode, output the private key to the working + // directory. + if s.Debug { + ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.DebugKeyPath)) + f, err := os.Create(s.DebugKeyPath) + if err != nil { + state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) + return multistep.ActionHalt + } + defer f.Close() + + // Write the key out + if _, err := f.Write([]byte(keypair.PrivateKey)); err != nil { + state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) + return multistep.ActionHalt + } + + // Chmod it so that it is SSH ready + if runtime.GOOS != "windows" { + if err := f.Chmod(0600); err != nil { + state.Put("error", fmt.Errorf("Error setting permissions of debug key: %s", err)) + return multistep.ActionHalt + } + } + } + + // Set the keyname so we know to delete it later + s.keyName = keyName + + // Set some state data for use in future steps + state.Put("keyPair", keyName) + state.Put("privateKey", keypair.PrivateKey) + + return multistep.ActionContinue +} + +func (s *StepKeyPair) Cleanup(state multistep.StateBag) { + // If no key name is set, then we never created it, so just return + if s.keyName == "" { + return + } + + config := state.Get("config").(Config) + ui := state.Get("ui").(packer.Ui) + + // We need the v2 compute client + computeClient, err := config.computeV2Client() + if err != nil { + ui.Error(fmt.Sprintf( + "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) + return + } + + ui.Say("Deleting temporary keypair...") + err = keypairs.Delete(computeClient, s.keyName).ExtractErr() + if err != nil { + ui.Error(fmt.Sprintf( + "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) + } +} diff --git a/builder/openstack-new/step_run_source_server.go b/builder/openstack-new/step_run_source_server.go new file mode 100644 index 000000000..e58e2c46b --- /dev/null +++ b/builder/openstack-new/step_run_source_server.go @@ -0,0 +1,110 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +type StepRunSourceServer struct { + Flavor string + Name string + SourceImage string + SecurityGroups []string + Networks []string + + server *servers.Server +} + +func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction { + config := state.Get("config").(Config) + keyName := state.Get("keyPair").(string) + ui := state.Get("ui").(packer.Ui) + + // We need the v2 compute client + computeClient, err := config.computeV2Client() + if err != nil { + err = fmt.Errorf("Error initializing compute client: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + + networks := make([]servers.Network, len(s.Networks)) + for i, networkUuid := range s.Networks { + networks[i].UUID = networkUuid + } + + s.server, err = servers.Create(computeClient, keypairs.CreateOptsExt{ + CreateOptsBuilder: servers.CreateOpts{ + Name: s.Name, + ImageRef: s.SourceImage, + FlavorRef: s.Flavor, + SecurityGroups: s.SecurityGroups, + Networks: networks, + }, + + KeyName: keyName, + }).Extract() + if err != nil { + err := fmt.Errorf("Error launching source server: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + log.Printf("server id: %s", s.server.ID) + + ui.Say(fmt.Sprintf("Waiting for server (%s) to become ready...", s.server.ID)) + stateChange := StateChangeConf{ + Pending: []string{"BUILD"}, + Target: "ACTIVE", + Refresh: ServerStateRefreshFunc(computeClient, s.server), + StepState: state, + } + latestServer, err := WaitForState(&stateChange) + if err != nil { + err := fmt.Errorf("Error waiting for server (%s) to become ready: %s", s.server.ID, err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + s.server = latestServer.(*servers.Server) + state.Put("server", s.server) + + return multistep.ActionContinue +} + +func (s *StepRunSourceServer) Cleanup(state multistep.StateBag) { + if s.server == nil { + return + } + + config := state.Get("config").(Config) + ui := state.Get("ui").(packer.Ui) + + // We need the v2 compute client + computeClient, err := config.computeV2Client() + if err != nil { + ui.Error(fmt.Sprintf("Error terminating server, may still be around: %s", err)) + return + } + + ui.Say("Terminating the source server...") + if err := servers.Delete(computeClient, s.server.ID).ExtractErr(); err != nil { + ui.Error(fmt.Sprintf("Error terminating server, may still be around: %s", err)) + return + } + + stateChange := StateChangeConf{ + Pending: []string{"ACTIVE", "BUILD", "REBUILD", "SUSPENDED"}, + Refresh: ServerStateRefreshFunc(computeClient, s.server), + Target: "DELETED", + } + + WaitForState(&stateChange) +} diff --git a/builder/openstack-new/step_wait_for_rackconnect.go b/builder/openstack-new/step_wait_for_rackconnect.go new file mode 100644 index 000000000..6263bd17d --- /dev/null +++ b/builder/openstack-new/step_wait_for_rackconnect.go @@ -0,0 +1,52 @@ +package openstack + +import ( + "fmt" + "time" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +type StepWaitForRackConnect struct { + Wait bool +} + +func (s *StepWaitForRackConnect) Run(state multistep.StateBag) multistep.StepAction { + if !s.Wait { + return multistep.ActionContinue + } + + config := state.Get("config").(Config) + server := state.Get("server").(*servers.Server) + ui := state.Get("ui").(packer.Ui) + + // We need the v2 compute client + computeClient, err := config.computeV2Client() + if err != nil { + err = fmt.Errorf("Error initializing compute client: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + + ui.Say(fmt.Sprintf( + "Waiting for server (%s) to become RackConnect ready...", server.ID)) + for { + server, err = servers.Get(computeClient, server.ID).Extract() + if err != nil { + return multistep.ActionHalt + } + + if server.Metadata["rackconnect_automation_status"] == "DEPLOYED" { + break + } + + time.Sleep(2 * time.Second) + } + + return multistep.ActionContinue +} + +func (s *StepWaitForRackConnect) Cleanup(state multistep.StateBag) { +} diff --git a/plugin/builder-openstack-new/main.go b/plugin/builder-openstack-new/main.go new file mode 100644 index 000000000..d8075c78d --- /dev/null +++ b/plugin/builder-openstack-new/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "github.com/mitchellh/packer/builder/openstack-new" + "github.com/mitchellh/packer/packer/plugin" +) + +func main() { + server, err := plugin.Server() + if err != nil { + panic(err) + } + server.RegisterBuilder(new(openstack.Builder)) + server.Serve() +} From 551e80774d0b0f28547a5373a59821fbf0cfeaf8 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 00:03:17 -0400 Subject: [PATCH 302/956] builder/openstack-new: fix some issues --- builder/openstack-new/ssh.go | 53 ++++++++++--------- builder/openstack-new/step_allocate_ip.go | 7 ++- .../openstack-new/step_run_source_server.go | 2 +- 3 files changed, 32 insertions(+), 30 deletions(-) diff --git a/builder/openstack-new/ssh.go b/builder/openstack-new/ssh.go index a3de654f6..7b0510f98 100644 --- a/builder/openstack-new/ssh.go +++ b/builder/openstack-new/ssh.go @@ -3,6 +3,7 @@ package openstack import ( "errors" "fmt" + "log" "time" "github.com/mitchellh/multistep" @@ -21,7 +22,8 @@ func SSHAddress( s := state.Get("server").(*servers.Server) // If we have a floating IP, use that - if ip := state.Get("access_ip").(*floatingip.FloatingIP); ip.FixedIP != "" { + ip := state.Get("access_ip").(*floatingip.FloatingIP) + if ip != nil && ip.FixedIP != "" { return fmt.Sprintf("%s:%d", ip.FixedIP, port), nil } @@ -29,33 +31,34 @@ func SSHAddress( return fmt.Sprintf("%s:%d", s.AccessIPv4, port), nil } - // Get all the addresses associated with this server - /* - ip_pools, err := s.AllAddressPools() - if err != nil { - return "", errors.New("Error parsing SSH addresses") + // Get all the addresses associated with this server. This + // was taken directly from Terraform. + for _, networkAddresses := range s.Addresses { + elements, ok := networkAddresses.([]interface{}) + if !ok { + log.Printf( + "[ERROR] Unknown return type for address field: %#v", + networkAddresses) + continue } - for pool, addresses := range ip_pools { - if sshinterface != "" { - if pool != sshinterface { - continue - } - } - if pool != "" { - for _, address := range addresses { - if address.Addr != "" && address.Version == 4 { - return fmt.Sprintf("%s:%d", address.Addr, port), nil - } - } - } - } - */ - result := servers.Get(client, s.ID) - err := result.Err - if err == nil { - s, err = result.Extract() + for _, element := range elements { + var addr string + address := element.(map[string]interface{}) + if address["OS-EXT-IPS:type"] == "floating" { + addr = address["addr"].(string) + } else { + if address["version"].(float64) == 4 { + addr = address["addr"].(string) + } + } + if addr != "" { + return fmt.Sprintf("%s:%d", addr, port), nil + } + } } + + s, err := servers.Get(client, s.ID).Extract() if err != nil { return "", err } diff --git a/builder/openstack-new/step_allocate_ip.go b/builder/openstack-new/step_allocate_ip.go index adb15eb5b..16efe8d38 100644 --- a/builder/openstack-new/step_allocate_ip.go +++ b/builder/openstack-new/step_allocate_ip.go @@ -33,7 +33,7 @@ func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction { state.Put("access_ip", instanceIp) if s.FloatingIp != "" { - instanceIp.FixedIP = s.FloatingIp + *instanceIp = floatingip.FloatingIP{FixedIP: s.FloatingIp} } else if s.FloatingIpPool != "" { newIp, err := floatingip.Create(client, floatingip.CreateOpts{ Pool: s.FloatingIpPool, @@ -45,11 +45,11 @@ func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - instanceIp = newIp + *instanceIp = *newIp ui.Say(fmt.Sprintf("Created temporary floating IP %s...", instanceIp.FixedIP)) } - if instanceIp.FixedIP != "" { + if instanceIp != nil && instanceIp.FixedIP != "" { err := floatingip.Associate(client, server.ID, instanceIp.FixedIP).ExtractErr() if err != nil { err := fmt.Errorf( @@ -65,7 +65,6 @@ func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction { } state.Put("access_ip", instanceIp) - return multistep.ActionContinue } diff --git a/builder/openstack-new/step_run_source_server.go b/builder/openstack-new/step_run_source_server.go index e58e2c46b..4432d5860 100644 --- a/builder/openstack-new/step_run_source_server.go +++ b/builder/openstack-new/step_run_source_server.go @@ -42,7 +42,7 @@ func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction CreateOptsBuilder: servers.CreateOpts{ Name: s.Name, ImageRef: s.SourceImage, - FlavorRef: s.Flavor, + FlavorName: s.Flavor, SecurityGroups: s.SecurityGroups, Networks: networks, }, From 7a46b80cfb24aa307286a2b67c253f7716b3d3fc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 00:05:24 -0400 Subject: [PATCH 303/956] builder/openstack-new: better UI --- builder/openstack-new/step_create_image.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/openstack-new/step_create_image.go b/builder/openstack-new/step_create_image.go index df540e311..989db81e5 100644 --- a/builder/openstack-new/step_create_image.go +++ b/builder/openstack-new/step_create_image.go @@ -40,7 +40,7 @@ func (s *stepCreateImage) Run(state multistep.StateBag) multistep.StepAction { } // Set the Image ID in the state - ui.Say(fmt.Sprintf("Image: %s", imageId)) + ui.Message(fmt.Sprintf("Image: %s", imageId)) state.Put("image", imageId) // Wait for the image to become ready From 46f518f21df6a127d3e9e64eabfdc31441aa4a1c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 00:09:01 -0400 Subject: [PATCH 304/956] builder/openstack: proper error extraction --- builder/openstack-new/server.go | 15 +++++---------- builder/openstack-new/step_create_image.go | 14 ++++++++------ 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/builder/openstack-new/server.go b/builder/openstack-new/server.go index a87ef0110..de8c9d103 100644 --- a/builder/openstack-new/server.go +++ b/builder/openstack-new/server.go @@ -36,20 +36,15 @@ type StateChangeConf struct { func ServerStateRefreshFunc( client *gophercloud.ServiceClient, s *servers.Server) StateRefreshFunc { return func() (interface{}, string, int, error) { - var serverNew *servers.Server - result := servers.Get(client, s.ID) - err := result.Err - if err == nil { - serverNew, err = result.Extract() - } - if result.Err != nil { - errCode, ok := result.Err.(*gophercloud.UnexpectedResponseCodeError) + serverNew, err := servers.Get(client, s.ID).Extract() + if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) if ok && errCode.Actual == 404 { log.Printf("[INFO] 404 on ServerStateRefresh, returning DELETED") return nil, "DELETED", 0, nil } else { - log.Printf("[ERROR] Error on ServerStateRefresh: %s", result.Err) - return nil, "", 0, result.Err + log.Printf("[ERROR] Error on ServerStateRefresh: %s", err) + return nil, "", 0, err } } diff --git a/builder/openstack-new/step_create_image.go b/builder/openstack-new/step_create_image.go index 989db81e5..b777e8b0b 100644 --- a/builder/openstack-new/step_create_image.go +++ b/builder/openstack-new/step_create_image.go @@ -62,13 +62,15 @@ func (s *stepCreateImage) Cleanup(multistep.StateBag) { // WaitForImage waits for the given Image ID to become ready. func WaitForImage(client *gophercloud.ServiceClient, imageId string) error { for { - var image *images.Image - result := images.Get(client, imageId) - err := result.Err - if err == nil { - image, err = result.Extract() - } + image, err := images.Get(client, imageId).Extract() if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if ok && errCode.Actual == 500 { + log.Printf("[ERROR] 500 error received, will ignore and retry: %s", err) + time.Sleep(2 * time.Second) + continue + } + return err } From a0d41fcd14f2525df351d1764e3401d1f016ff53 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 00:16:43 -0400 Subject: [PATCH 305/956] builder/openstack --- builder/openstack-new/access_config.go | 109 ------------ builder/openstack-new/artifact.go | 47 ----- builder/openstack-new/artifact_test.go | 35 ---- builder/openstack-new/builder.go | 134 -------------- builder/openstack-new/builder_test.go | 94 ---------- builder/openstack-new/image_config.go | 25 --- builder/openstack-new/image_config_test.go | 23 --- builder/openstack-new/run_config.go | 75 -------- builder/openstack-new/run_config_test.go | 88 ---------- builder/openstack-new/server.go | 95 ---------- builder/openstack-new/ssh.go | 92 ---------- builder/openstack-new/step_allocate_ip.go | 93 ---------- builder/openstack-new/step_create_image.go | 84 --------- builder/openstack-new/step_key_pair.go | 106 ------------ .../openstack-new/step_run_source_server.go | 110 ------------ .../step_wait_for_rackconnect.go | 52 ------ builder/openstack/access_config.go | 163 +++++++++--------- builder/openstack/access_config_test.go | 77 --------- builder/openstack/artifact.go | 7 +- builder/openstack/builder.go | 23 +-- builder/openstack/server.go | 23 +-- builder/openstack/ssh.go | 62 ++++--- builder/openstack/step_allocate_ip.go | 68 +++++--- builder/openstack/step_create_image.go | 39 +++-- builder/openstack/step_key_pair.go | 43 +++-- builder/openstack/step_run_source_server.go | 70 ++++---- .../openstack/step_wait_for_rackconnect.go | 22 ++- 27 files changed, 303 insertions(+), 1556 deletions(-) delete mode 100644 builder/openstack-new/access_config.go delete mode 100644 builder/openstack-new/artifact.go delete mode 100644 builder/openstack-new/artifact_test.go delete mode 100644 builder/openstack-new/builder.go delete mode 100644 builder/openstack-new/builder_test.go delete mode 100644 builder/openstack-new/image_config.go delete mode 100644 builder/openstack-new/image_config_test.go delete mode 100644 builder/openstack-new/run_config.go delete mode 100644 builder/openstack-new/run_config_test.go delete mode 100644 builder/openstack-new/server.go delete mode 100644 builder/openstack-new/ssh.go delete mode 100644 builder/openstack-new/step_allocate_ip.go delete mode 100644 builder/openstack-new/step_create_image.go delete mode 100644 builder/openstack-new/step_key_pair.go delete mode 100644 builder/openstack-new/step_run_source_server.go delete mode 100644 builder/openstack-new/step_wait_for_rackconnect.go delete mode 100644 builder/openstack/access_config_test.go diff --git a/builder/openstack-new/access_config.go b/builder/openstack-new/access_config.go deleted file mode 100644 index e0f962c50..000000000 --- a/builder/openstack-new/access_config.go +++ /dev/null @@ -1,109 +0,0 @@ -package openstack - -import ( - "crypto/tls" - "fmt" - "net/http" - "os" - - "github.com/mitchellh/packer/template/interpolate" - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/openstack" -) - -// AccessConfig is for common configuration related to openstack access -type AccessConfig struct { - Username string `mapstructure:"username"` - UserID string `mapstructure:"user_id"` - Password string `mapstructure:"password"` - APIKey string `mapstructure:"api_key"` - IdentityEndpoint string `mapstructure:"identity_endpoint"` - TenantID string `mapstructure:"tenant_id"` - TenantName string `mapstructure:"tenant_name"` - DomainID string `mapstructure:"domain_id"` - DomainName string `mapstructure:"domain_name"` - Insecure bool `mapstructure:"insecure"` - Region string `mapstructure:"region"` - EndpointType string `mapstructure:"endpoint_type"` - - osClient *gophercloud.ProviderClient -} - -func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error { - if c.EndpointType != "internal" && c.EndpointType != "internalURL" && - c.EndpointType != "admin" && c.EndpointType != "adminURL" && - c.EndpointType != "public" && c.EndpointType != "publicURL" && - c.EndpointType != "" { - return []error{fmt.Errorf("Invalid endpoint type provided")} - } - - if c.Region == "" { - c.Region = os.Getenv("OS_REGION_NAME") - } - - // Get as much as possible from the end - ao, err := openstack.AuthOptionsFromEnv() - if err != nil { - return []error{err} - } - - // Override values if we have them in our config - overrides := []struct { - From, To *string - }{ - {&c.Username, &ao.Username}, - {&c.UserID, &ao.UserID}, - {&c.Password, &ao.Password}, - {&c.APIKey, &ao.APIKey}, - {&c.IdentityEndpoint, &ao.IdentityEndpoint}, - {&c.TenantID, &ao.TenantID}, - {&c.TenantName, &ao.TenantName}, - {&c.DomainID, &ao.DomainID}, - {&c.DomainName, &ao.DomainName}, - } - for _, s := range overrides { - if *s.From != "" { - *s.To = *s.From - } - } - - // Build the client itself - client, err := openstack.NewClient(ao.IdentityEndpoint) - if err != nil { - return []error{err} - } - - // If we have insecure set, then create a custom HTTP client that - // ignores SSL errors. - if c.Insecure { - config := &tls.Config{InsecureSkipVerify: true} - transport := &http.Transport{TLSClientConfig: config} - client.HTTPClient.Transport = transport - } - - // Auth - err = openstack.Authenticate(client, ao) - if err != nil { - return []error{err} - } - - c.osClient = client - return nil -} - -func (c *AccessConfig) computeV2Client() (*gophercloud.ServiceClient, error) { - return openstack.NewComputeV2(c.osClient, gophercloud.EndpointOpts{ - Region: c.Region, - Availability: c.getEndpointType(), - }) -} - -func (c *AccessConfig) getEndpointType() gophercloud.Availability { - if c.EndpointType == "internal" || c.EndpointType == "internalURL" { - return gophercloud.AvailabilityInternal - } - if c.EndpointType == "admin" || c.EndpointType == "adminURL" { - return gophercloud.AvailabilityAdmin - } - return gophercloud.AvailabilityPublic -} diff --git a/builder/openstack-new/artifact.go b/builder/openstack-new/artifact.go deleted file mode 100644 index aa60d2641..000000000 --- a/builder/openstack-new/artifact.go +++ /dev/null @@ -1,47 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/openstack/compute/v2/images" -) - -// Artifact is an artifact implementation that contains built images. -type Artifact struct { - // ImageId of built image - ImageId string - - // BuilderId is the unique ID for the builder that created this image - BuilderIdValue string - - // OpenStack connection for performing API stuff. - Client *gophercloud.ServiceClient -} - -func (a *Artifact) BuilderId() string { - return a.BuilderIdValue -} - -func (*Artifact) Files() []string { - // We have no files - return nil -} - -func (a *Artifact) Id() string { - return a.ImageId -} - -func (a *Artifact) String() string { - return fmt.Sprintf("An image was created: %v", a.ImageId) -} - -func (a *Artifact) State(name string) interface{} { - return nil -} - -func (a *Artifact) Destroy() error { - log.Printf("Destroying image: %s", a.ImageId) - return images.Delete(a.Client, a.ImageId).ExtractErr() -} diff --git a/builder/openstack-new/artifact_test.go b/builder/openstack-new/artifact_test.go deleted file mode 100644 index 313fea7cf..000000000 --- a/builder/openstack-new/artifact_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package openstack - -import ( - "github.com/mitchellh/packer/packer" - "testing" -) - -func TestArtifact_Impl(t *testing.T) { - var _ packer.Artifact = new(Artifact) -} - -func TestArtifactId(t *testing.T) { - expected := `b8cdf55b-c916-40bd-b190-389ec144c4ed` - - a := &Artifact{ - ImageId: "b8cdf55b-c916-40bd-b190-389ec144c4ed", - } - - result := a.Id() - if result != expected { - t.Fatalf("bad: %s", result) - } -} - -func TestArtifactString(t *testing.T) { - expected := "An image was created: b8cdf55b-c916-40bd-b190-389ec144c4ed" - - a := &Artifact{ - ImageId: "b8cdf55b-c916-40bd-b190-389ec144c4ed", - } - result := a.String() - if result != expected { - t.Fatalf("bad: %s", result) - } -} diff --git a/builder/openstack-new/builder.go b/builder/openstack-new/builder.go deleted file mode 100644 index bebb28452..000000000 --- a/builder/openstack-new/builder.go +++ /dev/null @@ -1,134 +0,0 @@ -// The openstack package contains a packer.Builder implementation that -// builds Images for openstack. - -package openstack - -import ( - "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/common" - "log" - - "github.com/mitchellh/packer/helper/config" - "github.com/mitchellh/packer/packer" - "github.com/mitchellh/packer/template/interpolate" -) - -// The unique ID for this builder -const BuilderId = "mitchellh.openstack" - -type Config struct { - common.PackerConfig `mapstructure:",squash"` - AccessConfig `mapstructure:",squash"` - ImageConfig `mapstructure:",squash"` - RunConfig `mapstructure:",squash"` - - ctx interpolate.Context -} - -type Builder struct { - config Config - runner multistep.Runner -} - -func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - err := config.Decode(&b.config, &config.DecodeOpts{ - Interpolate: true, - }, raws...) - if err != nil { - return nil, err - } - - // Accumulate any errors - var errs *packer.MultiError - errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...) - errs = packer.MultiErrorAppend(errs, b.config.ImageConfig.Prepare(&b.config.ctx)...) - errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...) - - if errs != nil && len(errs.Errors) > 0 { - return nil, errs - } - - log.Println(common.ScrubConfig(b.config, b.config.Password)) - return nil, nil -} - -func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { - computeClient, err := b.config.computeV2Client() - if err != nil { - return nil, fmt.Errorf("Error initializing compute client: %s", err) - } - - // Setup the state bag and initial state for the steps - state := new(multistep.BasicStateBag) - state.Put("config", b.config) - state.Put("hook", hook) - state.Put("ui", ui) - - // Build the steps - steps := []multistep.Step{ - &StepKeyPair{ - Debug: b.config.PackerDebug, - DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName), - }, - &StepRunSourceServer{ - Name: b.config.ImageName, - Flavor: b.config.Flavor, - SourceImage: b.config.SourceImage, - SecurityGroups: b.config.SecurityGroups, - Networks: b.config.Networks, - }, - &StepWaitForRackConnect{ - Wait: b.config.RackconnectWait, - }, - &StepAllocateIp{ - FloatingIpPool: b.config.FloatingIpPool, - FloatingIp: b.config.FloatingIp, - }, - &common.StepConnectSSH{ - SSHAddress: SSHAddress(computeClient, b.config.SSHInterface, b.config.SSHPort), - SSHConfig: SSHConfig(b.config.SSHUsername), - SSHWaitTimeout: b.config.SSHTimeout(), - }, - &common.StepProvision{}, - &stepCreateImage{}, - } - - // Run! - if b.config.PackerDebug { - b.runner = &multistep.DebugRunner{ - Steps: steps, - PauseFn: common.MultistepDebugFn(ui), - } - } else { - b.runner = &multistep.BasicRunner{Steps: steps} - } - - b.runner.Run(state) - - // If there was an error, return that - if rawErr, ok := state.GetOk("error"); ok { - return nil, rawErr.(error) - } - - // If there are no images, then just return - if _, ok := state.GetOk("image"); !ok { - return nil, nil - } - - // Build the artifact and return it - artifact := &Artifact{ - ImageId: state.Get("image").(string), - BuilderIdValue: BuilderId, - Client: computeClient, - } - - return artifact, nil -} - -func (b *Builder) Cancel() { - if b.runner != nil { - log.Println("Cancelling the step runner...") - b.runner.Cancel() - } -} diff --git a/builder/openstack-new/builder_test.go b/builder/openstack-new/builder_test.go deleted file mode 100644 index badf9784d..000000000 --- a/builder/openstack-new/builder_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package openstack - -import ( - "github.com/mitchellh/packer/packer" - "testing" -) - -func testConfig() map[string]interface{} { - return map[string]interface{}{ - "username": "foo", - "password": "bar", - "provider": "foo", - "region": "DFW", - "image_name": "foo", - "source_image": "foo", - "flavor": "foo", - "ssh_username": "root", - } -} - -func TestBuilder_ImplementsBuilder(t *testing.T) { - var raw interface{} - raw = &Builder{} - if _, ok := raw.(packer.Builder); !ok { - t.Fatalf("Builder should be a builder") - } -} - -func TestBuilder_Prepare_BadType(t *testing.T) { - b := &Builder{} - c := map[string]interface{}{ - "password": []string{}, - } - - warns, err := b.Prepare(c) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatalf("prepare should fail") - } -} - -func TestBuilderPrepare_ImageName(t *testing.T) { - var b Builder - config := testConfig() - - // Test good - config["image_name"] = "foo" - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - // Test bad - config["image_name"] = "foo {{" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test bad - delete(config, "image_name") - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } -} - -func TestBuilderPrepare_InvalidKey(t *testing.T) { - var b Builder - config := testConfig() - - // Add a random key - config["i_should_not_be_valid"] = true - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } -} diff --git a/builder/openstack-new/image_config.go b/builder/openstack-new/image_config.go deleted file mode 100644 index 124449eab..000000000 --- a/builder/openstack-new/image_config.go +++ /dev/null @@ -1,25 +0,0 @@ -package openstack - -import ( - "fmt" - - "github.com/mitchellh/packer/template/interpolate" -) - -// ImageConfig is for common configuration related to creating Images. -type ImageConfig struct { - ImageName string `mapstructure:"image_name"` -} - -func (c *ImageConfig) Prepare(ctx *interpolate.Context) []error { - errs := make([]error, 0) - if c.ImageName == "" { - errs = append(errs, fmt.Errorf("An image_name must be specified")) - } - - if len(errs) > 0 { - return errs - } - - return nil -} diff --git a/builder/openstack-new/image_config_test.go b/builder/openstack-new/image_config_test.go deleted file mode 100644 index 4d81ecd94..000000000 --- a/builder/openstack-new/image_config_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package openstack - -import ( - "testing" -) - -func testImageConfig() *ImageConfig { - return &ImageConfig{ - ImageName: "foo", - } -} - -func TestImageConfigPrepare_Region(t *testing.T) { - c := testImageConfig() - if err := c.Prepare(nil); err != nil { - t.Fatalf("shouldn't have err: %s", err) - } - - c.ImageName = "" - if err := c.Prepare(nil); err == nil { - t.Fatal("should have error") - } -} diff --git a/builder/openstack-new/run_config.go b/builder/openstack-new/run_config.go deleted file mode 100644 index e5d73c9c1..000000000 --- a/builder/openstack-new/run_config.go +++ /dev/null @@ -1,75 +0,0 @@ -package openstack - -import ( - "errors" - "fmt" - "time" - - "github.com/mitchellh/packer/template/interpolate" -) - -// RunConfig contains configuration for running an instance from a source -// image and details on how to access that launched image. -type RunConfig struct { - SourceImage string `mapstructure:"source_image"` - Flavor string `mapstructure:"flavor"` - RawSSHTimeout string `mapstructure:"ssh_timeout"` - SSHUsername string `mapstructure:"ssh_username"` - SSHPort int `mapstructure:"ssh_port"` - SSHInterface string `mapstructure:"ssh_interface"` - OpenstackProvider string `mapstructure:"openstack_provider"` - UseFloatingIp bool `mapstructure:"use_floating_ip"` - RackconnectWait bool `mapstructure:"rackconnect_wait"` - FloatingIpPool string `mapstructure:"floating_ip_pool"` - FloatingIp string `mapstructure:"floating_ip"` - SecurityGroups []string `mapstructure:"security_groups"` - Networks []string `mapstructure:"networks"` - - // Unexported fields that are calculated from others - sshTimeout time.Duration -} - -func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { - // Defaults - if c.SSHUsername == "" { - c.SSHUsername = "root" - } - - if c.SSHPort == 0 { - c.SSHPort = 22 - } - - if c.RawSSHTimeout == "" { - c.RawSSHTimeout = "5m" - } - - if c.UseFloatingIp && c.FloatingIpPool == "" { - c.FloatingIpPool = "public" - } - - // Validation - var err error - errs := make([]error, 0) - if c.SourceImage == "" { - errs = append(errs, errors.New("A source_image must be specified")) - } - - if c.Flavor == "" { - errs = append(errs, errors.New("A flavor must be specified")) - } - - if c.SSHUsername == "" { - errs = append(errs, errors.New("An ssh_username must be specified")) - } - - c.sshTimeout, err = time.ParseDuration(c.RawSSHTimeout) - if err != nil { - errs = append(errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) - } - - return errs -} - -func (c *RunConfig) SSHTimeout() time.Duration { - return c.sshTimeout -} diff --git a/builder/openstack-new/run_config_test.go b/builder/openstack-new/run_config_test.go deleted file mode 100644 index 16b89b352..000000000 --- a/builder/openstack-new/run_config_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package openstack - -import ( - "os" - "testing" -) - -func init() { - // Clear out the openstack env vars so they don't - // affect our tests. - os.Setenv("SDK_USERNAME", "") - os.Setenv("SDK_PASSWORD", "") - os.Setenv("SDK_PROVIDER", "") -} - -func testRunConfig() *RunConfig { - return &RunConfig{ - SourceImage: "abcd", - Flavor: "m1.small", - SSHUsername: "root", - } -} - -func TestRunConfigPrepare(t *testing.T) { - c := testRunConfig() - err := c.Prepare(nil) - if len(err) > 0 { - t.Fatalf("err: %s", err) - } -} - -func TestRunConfigPrepare_InstanceType(t *testing.T) { - c := testRunConfig() - c.Flavor = "" - if err := c.Prepare(nil); len(err) != 1 { - t.Fatalf("err: %s", err) - } -} - -func TestRunConfigPrepare_SourceImage(t *testing.T) { - c := testRunConfig() - c.SourceImage = "" - if err := c.Prepare(nil); len(err) != 1 { - t.Fatalf("err: %s", err) - } -} - -func TestRunConfigPrepare_SSHPort(t *testing.T) { - c := testRunConfig() - c.SSHPort = 0 - if err := c.Prepare(nil); len(err) != 0 { - t.Fatalf("err: %s", err) - } - - if c.SSHPort != 22 { - t.Fatalf("invalid value: %d", c.SSHPort) - } - - c.SSHPort = 44 - if err := c.Prepare(nil); len(err) != 0 { - t.Fatalf("err: %s", err) - } - - if c.SSHPort != 44 { - t.Fatalf("invalid value: %d", c.SSHPort) - } -} - -func TestRunConfigPrepare_SSHTimeout(t *testing.T) { - c := testRunConfig() - c.RawSSHTimeout = "" - if err := c.Prepare(nil); len(err) != 0 { - t.Fatalf("err: %s", err) - } - - c.RawSSHTimeout = "bad" - if err := c.Prepare(nil); len(err) != 1 { - t.Fatalf("err: %s", err) - } -} - -func TestRunConfigPrepare_SSHUsername(t *testing.T) { - c := testRunConfig() - c.SSHUsername = "" - if err := c.Prepare(nil); len(err) != 0 { - t.Fatalf("err: %s", err) - } -} diff --git a/builder/openstack-new/server.go b/builder/openstack-new/server.go deleted file mode 100644 index de8c9d103..000000000 --- a/builder/openstack-new/server.go +++ /dev/null @@ -1,95 +0,0 @@ -package openstack - -import ( - "errors" - "fmt" - "log" - "time" - - "github.com/mitchellh/multistep" - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/openstack/compute/v2/servers" -) - -// StateRefreshFunc is a function type used for StateChangeConf that is -// responsible for refreshing the item being watched for a state change. -// -// It returns three results. `result` is any object that will be returned -// as the final object after waiting for state change. This allows you to -// return the final updated object, for example an openstack instance after -// refreshing it. -// -// `state` is the latest state of that object. And `err` is any error that -// may have happened while refreshing the state. -type StateRefreshFunc func() (result interface{}, state string, progress int, err error) - -// StateChangeConf is the configuration struct used for `WaitForState`. -type StateChangeConf struct { - Pending []string - Refresh StateRefreshFunc - StepState multistep.StateBag - Target string -} - -// ServerStateRefreshFunc returns a StateRefreshFunc that is used to watch -// an openstack server. -func ServerStateRefreshFunc( - client *gophercloud.ServiceClient, s *servers.Server) StateRefreshFunc { - return func() (interface{}, string, int, error) { - serverNew, err := servers.Get(client, s.ID).Extract() - if err != nil { - errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) - if ok && errCode.Actual == 404 { - log.Printf("[INFO] 404 on ServerStateRefresh, returning DELETED") - return nil, "DELETED", 0, nil - } else { - log.Printf("[ERROR] Error on ServerStateRefresh: %s", err) - return nil, "", 0, err - } - } - - return serverNew, serverNew.Status, serverNew.Progress, nil - } -} - -// WaitForState watches an object and waits for it to achieve a certain -// state. -func WaitForState(conf *StateChangeConf) (i interface{}, err error) { - log.Printf("Waiting for state to become: %s", conf.Target) - - for { - var currentProgress int - var currentState string - i, currentState, currentProgress, err = conf.Refresh() - if err != nil { - return - } - - if currentState == conf.Target { - return - } - - if conf.StepState != nil { - if _, ok := conf.StepState.GetOk(multistep.StateCancelled); ok { - return nil, errors.New("interrupted") - } - } - - found := false - for _, allowed := range conf.Pending { - if currentState == allowed { - found = true - break - } - } - - if !found { - return nil, fmt.Errorf("unexpected state '%s', wanted target '%s'", currentState, conf.Target) - } - - log.Printf("Waiting for state to become: %s currently %s (%d%%)", conf.Target, currentState, currentProgress) - time.Sleep(2 * time.Second) - } - - return -} diff --git a/builder/openstack-new/ssh.go b/builder/openstack-new/ssh.go deleted file mode 100644 index 7b0510f98..000000000 --- a/builder/openstack-new/ssh.go +++ /dev/null @@ -1,92 +0,0 @@ -package openstack - -import ( - "errors" - "fmt" - "log" - "time" - - "github.com/mitchellh/multistep" - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip" - "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - "golang.org/x/crypto/ssh" -) - -// SSHAddress returns a function that can be given to the SSH communicator -// for determining the SSH address based on the server AccessIPv4 setting.. -func SSHAddress( - client *gophercloud.ServiceClient, - sshinterface string, port int) func(multistep.StateBag) (string, error) { - return func(state multistep.StateBag) (string, error) { - s := state.Get("server").(*servers.Server) - - // If we have a floating IP, use that - ip := state.Get("access_ip").(*floatingip.FloatingIP) - if ip != nil && ip.FixedIP != "" { - return fmt.Sprintf("%s:%d", ip.FixedIP, port), nil - } - - if s.AccessIPv4 != "" { - return fmt.Sprintf("%s:%d", s.AccessIPv4, port), nil - } - - // Get all the addresses associated with this server. This - // was taken directly from Terraform. - for _, networkAddresses := range s.Addresses { - elements, ok := networkAddresses.([]interface{}) - if !ok { - log.Printf( - "[ERROR] Unknown return type for address field: %#v", - networkAddresses) - continue - } - - for _, element := range elements { - var addr string - address := element.(map[string]interface{}) - if address["OS-EXT-IPS:type"] == "floating" { - addr = address["addr"].(string) - } else { - if address["version"].(float64) == 4 { - addr = address["addr"].(string) - } - } - if addr != "" { - return fmt.Sprintf("%s:%d", addr, port), nil - } - } - } - - s, err := servers.Get(client, s.ID).Extract() - if err != nil { - return "", err - } - - state.Put("server", s) - time.Sleep(1 * time.Second) - - return "", errors.New("couldn't determine IP address for server") - } -} - -// SSHConfig returns a function that can be used for the SSH communicator -// config for connecting to the instance created over SSH using the generated -// private key. -func SSHConfig(username string) func(multistep.StateBag) (*ssh.ClientConfig, error) { - return func(state multistep.StateBag) (*ssh.ClientConfig, error) { - privateKey := state.Get("privateKey").(string) - - signer, err := ssh.ParsePrivateKey([]byte(privateKey)) - if err != nil { - return nil, fmt.Errorf("Error setting up SSH config: %s", err) - } - - return &ssh.ClientConfig{ - User: username, - Auth: []ssh.AuthMethod{ - ssh.PublicKeys(signer), - }, - }, nil - } -} diff --git a/builder/openstack-new/step_allocate_ip.go b/builder/openstack-new/step_allocate_ip.go deleted file mode 100644 index 16efe8d38..000000000 --- a/builder/openstack-new/step_allocate_ip.go +++ /dev/null @@ -1,93 +0,0 @@ -package openstack - -import ( - "fmt" - - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" - "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip" - "github.com/rackspace/gophercloud/openstack/compute/v2/servers" -) - -type StepAllocateIp struct { - FloatingIpPool string - FloatingIp string -} - -func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction { - ui := state.Get("ui").(packer.Ui) - config := state.Get("config").(Config) - server := state.Get("server").(*servers.Server) - - // We need the v2 compute client - client, err := config.computeV2Client() - if err != nil { - err = fmt.Errorf("Error initializing compute client: %s", err) - state.Put("error", err) - return multistep.ActionHalt - } - - var instanceIp *floatingip.FloatingIP - // This is here in case we error out before putting instanceIp into the - // statebag below, because it is requested by Cleanup() - state.Put("access_ip", instanceIp) - - if s.FloatingIp != "" { - *instanceIp = floatingip.FloatingIP{FixedIP: s.FloatingIp} - } else if s.FloatingIpPool != "" { - newIp, err := floatingip.Create(client, floatingip.CreateOpts{ - Pool: s.FloatingIpPool, - }).Extract() - if err != nil { - err := fmt.Errorf("Error creating floating ip from pool '%s'", s.FloatingIpPool) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } - - *instanceIp = *newIp - ui.Say(fmt.Sprintf("Created temporary floating IP %s...", instanceIp.FixedIP)) - } - - if instanceIp != nil && instanceIp.FixedIP != "" { - err := floatingip.Associate(client, server.ID, instanceIp.FixedIP).ExtractErr() - if err != nil { - err := fmt.Errorf( - "Error associating floating IP %s with instance.", - instanceIp.FixedIP) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } - - ui.Say(fmt.Sprintf( - "Added floating IP %s to instance...", instanceIp.FixedIP)) - } - - state.Put("access_ip", instanceIp) - return multistep.ActionContinue -} - -func (s *StepAllocateIp) Cleanup(state multistep.StateBag) { - config := state.Get("config").(Config) - ui := state.Get("ui").(packer.Ui) - instanceIp := state.Get("access_ip").(*floatingip.FloatingIP) - - // We need the v2 compute client - client, err := config.computeV2Client() - if err != nil { - ui.Error(fmt.Sprintf( - "Error deleting temporary floating IP %s", instanceIp.FixedIP)) - return - } - - if s.FloatingIpPool != "" && instanceIp.ID != "" { - if err := floatingip.Delete(client, instanceIp.ID).ExtractErr(); err != nil { - ui.Error(fmt.Sprintf( - "Error deleting temporary floating IP %s", instanceIp.FixedIP)) - return - } - - ui.Say(fmt.Sprintf("Deleted temporary floating IP %s", instanceIp.FixedIP)) - } -} diff --git a/builder/openstack-new/step_create_image.go b/builder/openstack-new/step_create_image.go deleted file mode 100644 index b777e8b0b..000000000 --- a/builder/openstack-new/step_create_image.go +++ /dev/null @@ -1,84 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/openstack/compute/v2/images" - "github.com/rackspace/gophercloud/openstack/compute/v2/servers" -) - -type stepCreateImage struct{} - -func (s *stepCreateImage) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(Config) - server := state.Get("server").(*servers.Server) - ui := state.Get("ui").(packer.Ui) - - // We need the v2 compute client - client, err := config.computeV2Client() - if err != nil { - err = fmt.Errorf("Error initializing compute client: %s", err) - state.Put("error", err) - return multistep.ActionHalt - } - - // Create the image - ui.Say(fmt.Sprintf("Creating the image: %s", config.ImageName)) - imageId, err := servers.CreateImage(client, server.ID, servers.CreateImageOpts{ - Name: config.ImageName, - }).ExtractImageID() - if err != nil { - err := fmt.Errorf("Error creating image: %s", err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } - - // Set the Image ID in the state - ui.Message(fmt.Sprintf("Image: %s", imageId)) - state.Put("image", imageId) - - // Wait for the image to become ready - ui.Say("Waiting for image to become ready...") - if err := WaitForImage(client, imageId); err != nil { - err := fmt.Errorf("Error waiting for image: %s", err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } - - return multistep.ActionContinue -} - -func (s *stepCreateImage) Cleanup(multistep.StateBag) { - // No cleanup... -} - -// WaitForImage waits for the given Image ID to become ready. -func WaitForImage(client *gophercloud.ServiceClient, imageId string) error { - for { - image, err := images.Get(client, imageId).Extract() - if err != nil { - errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) - if ok && errCode.Actual == 500 { - log.Printf("[ERROR] 500 error received, will ignore and retry: %s", err) - time.Sleep(2 * time.Second) - continue - } - - return err - } - - if image.Status == "ACTIVE" { - return nil - } - - log.Printf("Waiting for image creation status: %s (%d%%)", image.Status, image.Progress) - time.Sleep(2 * time.Second) - } -} diff --git a/builder/openstack-new/step_key_pair.go b/builder/openstack-new/step_key_pair.go deleted file mode 100644 index 06bcbf9ea..000000000 --- a/builder/openstack-new/step_key_pair.go +++ /dev/null @@ -1,106 +0,0 @@ -package openstack - -import ( - "fmt" - "os" - "runtime" - - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/common/uuid" - "github.com/mitchellh/packer/packer" - "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" -) - -type StepKeyPair struct { - Debug bool - DebugKeyPath string - keyName string -} - -func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(Config) - ui := state.Get("ui").(packer.Ui) - - // We need the v2 compute client - computeClient, err := config.computeV2Client() - if err != nil { - err = fmt.Errorf("Error initializing compute client: %s", err) - state.Put("error", err) - return multistep.ActionHalt - } - - ui.Say("Creating temporary keypair for this instance...") - keyName := fmt.Sprintf("packer %s", uuid.TimeOrderedUUID()) - keypair, err := keypairs.Create(computeClient, keypairs.CreateOpts{ - Name: keyName, - }).Extract() - if err != nil { - state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err)) - return multistep.ActionHalt - } - - if keypair.PrivateKey == "" { - state.Put("error", fmt.Errorf("The temporary keypair returned was blank")) - return multistep.ActionHalt - } - - // If we're in debug mode, output the private key to the working - // directory. - if s.Debug { - ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.DebugKeyPath)) - f, err := os.Create(s.DebugKeyPath) - if err != nil { - state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) - return multistep.ActionHalt - } - defer f.Close() - - // Write the key out - if _, err := f.Write([]byte(keypair.PrivateKey)); err != nil { - state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) - return multistep.ActionHalt - } - - // Chmod it so that it is SSH ready - if runtime.GOOS != "windows" { - if err := f.Chmod(0600); err != nil { - state.Put("error", fmt.Errorf("Error setting permissions of debug key: %s", err)) - return multistep.ActionHalt - } - } - } - - // Set the keyname so we know to delete it later - s.keyName = keyName - - // Set some state data for use in future steps - state.Put("keyPair", keyName) - state.Put("privateKey", keypair.PrivateKey) - - return multistep.ActionContinue -} - -func (s *StepKeyPair) Cleanup(state multistep.StateBag) { - // If no key name is set, then we never created it, so just return - if s.keyName == "" { - return - } - - config := state.Get("config").(Config) - ui := state.Get("ui").(packer.Ui) - - // We need the v2 compute client - computeClient, err := config.computeV2Client() - if err != nil { - ui.Error(fmt.Sprintf( - "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) - return - } - - ui.Say("Deleting temporary keypair...") - err = keypairs.Delete(computeClient, s.keyName).ExtractErr() - if err != nil { - ui.Error(fmt.Sprintf( - "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) - } -} diff --git a/builder/openstack-new/step_run_source_server.go b/builder/openstack-new/step_run_source_server.go deleted file mode 100644 index 4432d5860..000000000 --- a/builder/openstack-new/step_run_source_server.go +++ /dev/null @@ -1,110 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" - "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" - "github.com/rackspace/gophercloud/openstack/compute/v2/servers" -) - -type StepRunSourceServer struct { - Flavor string - Name string - SourceImage string - SecurityGroups []string - Networks []string - - server *servers.Server -} - -func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(Config) - keyName := state.Get("keyPair").(string) - ui := state.Get("ui").(packer.Ui) - - // We need the v2 compute client - computeClient, err := config.computeV2Client() - if err != nil { - err = fmt.Errorf("Error initializing compute client: %s", err) - state.Put("error", err) - return multistep.ActionHalt - } - - networks := make([]servers.Network, len(s.Networks)) - for i, networkUuid := range s.Networks { - networks[i].UUID = networkUuid - } - - s.server, err = servers.Create(computeClient, keypairs.CreateOptsExt{ - CreateOptsBuilder: servers.CreateOpts{ - Name: s.Name, - ImageRef: s.SourceImage, - FlavorName: s.Flavor, - SecurityGroups: s.SecurityGroups, - Networks: networks, - }, - - KeyName: keyName, - }).Extract() - if err != nil { - err := fmt.Errorf("Error launching source server: %s", err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } - - log.Printf("server id: %s", s.server.ID) - - ui.Say(fmt.Sprintf("Waiting for server (%s) to become ready...", s.server.ID)) - stateChange := StateChangeConf{ - Pending: []string{"BUILD"}, - Target: "ACTIVE", - Refresh: ServerStateRefreshFunc(computeClient, s.server), - StepState: state, - } - latestServer, err := WaitForState(&stateChange) - if err != nil { - err := fmt.Errorf("Error waiting for server (%s) to become ready: %s", s.server.ID, err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } - - s.server = latestServer.(*servers.Server) - state.Put("server", s.server) - - return multistep.ActionContinue -} - -func (s *StepRunSourceServer) Cleanup(state multistep.StateBag) { - if s.server == nil { - return - } - - config := state.Get("config").(Config) - ui := state.Get("ui").(packer.Ui) - - // We need the v2 compute client - computeClient, err := config.computeV2Client() - if err != nil { - ui.Error(fmt.Sprintf("Error terminating server, may still be around: %s", err)) - return - } - - ui.Say("Terminating the source server...") - if err := servers.Delete(computeClient, s.server.ID).ExtractErr(); err != nil { - ui.Error(fmt.Sprintf("Error terminating server, may still be around: %s", err)) - return - } - - stateChange := StateChangeConf{ - Pending: []string{"ACTIVE", "BUILD", "REBUILD", "SUSPENDED"}, - Refresh: ServerStateRefreshFunc(computeClient, s.server), - Target: "DELETED", - } - - WaitForState(&stateChange) -} diff --git a/builder/openstack-new/step_wait_for_rackconnect.go b/builder/openstack-new/step_wait_for_rackconnect.go deleted file mode 100644 index 6263bd17d..000000000 --- a/builder/openstack-new/step_wait_for_rackconnect.go +++ /dev/null @@ -1,52 +0,0 @@ -package openstack - -import ( - "fmt" - "time" - - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" - "github.com/rackspace/gophercloud/openstack/compute/v2/servers" -) - -type StepWaitForRackConnect struct { - Wait bool -} - -func (s *StepWaitForRackConnect) Run(state multistep.StateBag) multistep.StepAction { - if !s.Wait { - return multistep.ActionContinue - } - - config := state.Get("config").(Config) - server := state.Get("server").(*servers.Server) - ui := state.Get("ui").(packer.Ui) - - // We need the v2 compute client - computeClient, err := config.computeV2Client() - if err != nil { - err = fmt.Errorf("Error initializing compute client: %s", err) - state.Put("error", err) - return multistep.ActionHalt - } - - ui.Say(fmt.Sprintf( - "Waiting for server (%s) to become RackConnect ready...", server.ID)) - for { - server, err = servers.Get(computeClient, server.ID).Extract() - if err != nil { - return multistep.ActionHalt - } - - if server.Metadata["rackconnect_automation_status"] == "DEPLOYED" { - break - } - - time.Sleep(2 * time.Second) - } - - return multistep.ActionContinue -} - -func (s *StepWaitForRackConnect) Cleanup(state multistep.StateBag) { -} diff --git a/builder/openstack/access_config.go b/builder/openstack/access_config.go index cb1c9d7bd..e0f962c50 100644 --- a/builder/openstack/access_config.go +++ b/builder/openstack/access_config.go @@ -4,99 +4,106 @@ import ( "crypto/tls" "fmt" "net/http" - "net/url" "os" - "strings" - "github.com/mitchellh/gophercloud-fork-40444fb" - "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/template/interpolate" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" ) // AccessConfig is for common configuration related to openstack access type AccessConfig struct { - Username string `mapstructure:"username"` - Password string `mapstructure:"password"` - ApiKey string `mapstructure:"api_key"` - Project string `mapstructure:"project"` - Provider string `mapstructure:"provider"` - RawRegion string `mapstructure:"region"` - ProxyUrl string `mapstructure:"proxy_url"` - TenantId string `mapstructure:"tenant_id"` - Insecure bool `mapstructure:"insecure"` -} + Username string `mapstructure:"username"` + UserID string `mapstructure:"user_id"` + Password string `mapstructure:"password"` + APIKey string `mapstructure:"api_key"` + IdentityEndpoint string `mapstructure:"identity_endpoint"` + TenantID string `mapstructure:"tenant_id"` + TenantName string `mapstructure:"tenant_name"` + DomainID string `mapstructure:"domain_id"` + DomainName string `mapstructure:"domain_name"` + Insecure bool `mapstructure:"insecure"` + Region string `mapstructure:"region"` + EndpointType string `mapstructure:"endpoint_type"` -// Auth returns a valid Auth object for access to openstack services, or -// an error if the authentication couldn't be resolved. -func (c *AccessConfig) Auth() (gophercloud.AccessProvider, error) { - c.Username = common.ChooseString(c.Username, os.Getenv("SDK_USERNAME"), os.Getenv("OS_USERNAME")) - c.Password = common.ChooseString(c.Password, os.Getenv("SDK_PASSWORD"), os.Getenv("OS_PASSWORD")) - c.ApiKey = common.ChooseString(c.ApiKey, os.Getenv("SDK_API_KEY")) - c.Project = common.ChooseString(c.Project, os.Getenv("SDK_PROJECT"), os.Getenv("OS_TENANT_NAME")) - c.Provider = common.ChooseString(c.Provider, os.Getenv("SDK_PROVIDER"), os.Getenv("OS_AUTH_URL")) - c.RawRegion = common.ChooseString(c.RawRegion, os.Getenv("SDK_REGION"), os.Getenv("OS_REGION_NAME")) - c.TenantId = common.ChooseString(c.TenantId, os.Getenv("OS_TENANT_ID")) - - // OpenStack's auto-generated openrc.sh files do not append the suffix - // /tokens to the authentication URL. This ensures it is present when - // specifying the URL. - if strings.Contains(c.Provider, "://") && !strings.HasSuffix(c.Provider, "/tokens") { - c.Provider += "/tokens" - } - - authoptions := gophercloud.AuthOptions{ - AllowReauth: true, - - ApiKey: c.ApiKey, - TenantId: c.TenantId, - TenantName: c.Project, - Username: c.Username, - Password: c.Password, - } - - default_transport := &http.Transport{} - - if c.Insecure { - cfg := new(tls.Config) - cfg.InsecureSkipVerify = true - default_transport.TLSClientConfig = cfg - } - - // For corporate networks it may be the case where we want our API calls - // to be sent through a separate HTTP proxy than external traffic. - if c.ProxyUrl != "" { - url, err := url.Parse(c.ProxyUrl) - if err != nil { - return nil, err - } - - // The gophercloud.Context has a UseCustomClient method which - // would allow us to override with a new instance of http.Client. - default_transport.Proxy = http.ProxyURL(url) - } - - if c.Insecure || c.ProxyUrl != "" { - http.DefaultTransport = default_transport - } - - return gophercloud.Authenticate(c.Provider, authoptions) -} - -func (c *AccessConfig) Region() string { - return common.ChooseString(c.RawRegion, os.Getenv("SDK_REGION"), os.Getenv("OS_REGION_NAME")) + osClient *gophercloud.ProviderClient } func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error { - errs := make([]error, 0) - if strings.HasPrefix(c.Provider, "rackspace") { - if c.Region() == "" { - errs = append(errs, fmt.Errorf("region must be specified when using rackspace")) + if c.EndpointType != "internal" && c.EndpointType != "internalURL" && + c.EndpointType != "admin" && c.EndpointType != "adminURL" && + c.EndpointType != "public" && c.EndpointType != "publicURL" && + c.EndpointType != "" { + return []error{fmt.Errorf("Invalid endpoint type provided")} + } + + if c.Region == "" { + c.Region = os.Getenv("OS_REGION_NAME") + } + + // Get as much as possible from the end + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return []error{err} + } + + // Override values if we have them in our config + overrides := []struct { + From, To *string + }{ + {&c.Username, &ao.Username}, + {&c.UserID, &ao.UserID}, + {&c.Password, &ao.Password}, + {&c.APIKey, &ao.APIKey}, + {&c.IdentityEndpoint, &ao.IdentityEndpoint}, + {&c.TenantID, &ao.TenantID}, + {&c.TenantName, &ao.TenantName}, + {&c.DomainID, &ao.DomainID}, + {&c.DomainName, &ao.DomainName}, + } + for _, s := range overrides { + if *s.From != "" { + *s.To = *s.From } } - if len(errs) > 0 { - return errs + // Build the client itself + client, err := openstack.NewClient(ao.IdentityEndpoint) + if err != nil { + return []error{err} } + // If we have insecure set, then create a custom HTTP client that + // ignores SSL errors. + if c.Insecure { + config := &tls.Config{InsecureSkipVerify: true} + transport := &http.Transport{TLSClientConfig: config} + client.HTTPClient.Transport = transport + } + + // Auth + err = openstack.Authenticate(client, ao) + if err != nil { + return []error{err} + } + + c.osClient = client return nil } + +func (c *AccessConfig) computeV2Client() (*gophercloud.ServiceClient, error) { + return openstack.NewComputeV2(c.osClient, gophercloud.EndpointOpts{ + Region: c.Region, + Availability: c.getEndpointType(), + }) +} + +func (c *AccessConfig) getEndpointType() gophercloud.Availability { + if c.EndpointType == "internal" || c.EndpointType == "internalURL" { + return gophercloud.AvailabilityInternal + } + if c.EndpointType == "admin" || c.EndpointType == "adminURL" { + return gophercloud.AvailabilityAdmin + } + return gophercloud.AvailabilityPublic +} diff --git a/builder/openstack/access_config_test.go b/builder/openstack/access_config_test.go deleted file mode 100644 index cf37448cc..000000000 --- a/builder/openstack/access_config_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package openstack - -import ( - "os" - "testing" -) - -func init() { - // Clear out the openstack env vars so they don't - // affect our tests. - os.Setenv("SDK_REGION", "") - os.Setenv("OS_REGION_NAME", "") -} - -func testAccessConfig() *AccessConfig { - return &AccessConfig{} -} - -func TestAccessConfigPrepare_NoRegion_Rackspace(t *testing.T) { - c := testAccessConfig() - c.Provider = "rackspace-us" - if err := c.Prepare(nil); err == nil { - t.Fatalf("shouldn't have err: %s", err) - } -} - -func TestAccessConfigRegionWithEmptyEnv(t *testing.T) { - c := testAccessConfig() - c.Prepare(nil) - if c.Region() != "" { - t.Fatalf("Region should be empty") - } -} - -func TestAccessConfigRegionWithSdkRegionEnv(t *testing.T) { - c := testAccessConfig() - c.Prepare(nil) - - expectedRegion := "sdk_region" - os.Setenv("SDK_REGION", expectedRegion) - os.Setenv("OS_REGION_NAME", "") - if c.Region() != expectedRegion { - t.Fatalf("Region should be: %s", expectedRegion) - } -} - -func TestAccessConfigRegionWithOsRegionNameEnv(t *testing.T) { - c := testAccessConfig() - c.Prepare(nil) - - expectedRegion := "os_region_name" - os.Setenv("SDK_REGION", "") - os.Setenv("OS_REGION_NAME", expectedRegion) - if c.Region() != expectedRegion { - t.Fatalf("Region should be: %s", expectedRegion) - } -} - -func TestAccessConfigPrepare_NoRegion_PrivateCloud(t *testing.T) { - c := testAccessConfig() - c.Provider = "http://some-keystone-server:5000/v2.0" - if err := c.Prepare(nil); err != nil { - t.Fatalf("shouldn't have err: %s", err) - } -} - -func TestAccessConfigPrepare_Region(t *testing.T) { - dfw := "DFW" - c := testAccessConfig() - c.RawRegion = dfw - if err := c.Prepare(nil); err != nil { - t.Fatalf("shouldn't have err: %s", err) - } - if dfw != c.Region() { - t.Fatalf("Regions do not match: %s %s", dfw, c.Region()) - } -} diff --git a/builder/openstack/artifact.go b/builder/openstack/artifact.go index 6e75fad3e..aa60d2641 100644 --- a/builder/openstack/artifact.go +++ b/builder/openstack/artifact.go @@ -4,7 +4,8 @@ import ( "fmt" "log" - "github.com/mitchellh/gophercloud-fork-40444fb" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/images" ) // Artifact is an artifact implementation that contains built images. @@ -16,7 +17,7 @@ type Artifact struct { BuilderIdValue string // OpenStack connection for performing API stuff. - Conn gophercloud.CloudServersProvider + Client *gophercloud.ServiceClient } func (a *Artifact) BuilderId() string { @@ -42,5 +43,5 @@ func (a *Artifact) State(name string) interface{} { func (a *Artifact) Destroy() error { log.Printf("Destroying image: %s", a.ImageId) - return a.Conn.DeleteImageById(a.ImageId) + return images.Delete(a.Client, a.ImageId).ExtractErr() } diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index e6e5c6675..bebb28452 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -9,7 +9,6 @@ import ( "github.com/mitchellh/packer/common" "log" - "github.com/mitchellh/gophercloud-fork-40444fb" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" @@ -55,28 +54,14 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { - auth, err := b.config.AccessConfig.Auth() + computeClient, err := b.config.computeV2Client() if err != nil { - return nil, err - } - //fetches the api requisites from gophercloud for the appropriate - //openstack variant - api, err := gophercloud.PopulateApi(b.config.RunConfig.OpenstackProvider) - if err != nil { - return nil, err - } - api.Region = b.config.AccessConfig.Region() - - csp, err := gophercloud.ServersApi(auth, api) - if err != nil { - log.Printf("Region: %s", b.config.AccessConfig.Region()) - return nil, err + return nil, fmt.Errorf("Error initializing compute client: %s", err) } // Setup the state bag and initial state for the steps state := new(multistep.BasicStateBag) state.Put("config", b.config) - state.Put("csp", csp) state.Put("hook", hook) state.Put("ui", ui) @@ -101,7 +86,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe FloatingIp: b.config.FloatingIp, }, &common.StepConnectSSH{ - SSHAddress: SSHAddress(csp, b.config.SSHInterface, b.config.SSHPort), + SSHAddress: SSHAddress(computeClient, b.config.SSHInterface, b.config.SSHPort), SSHConfig: SSHConfig(b.config.SSHUsername), SSHWaitTimeout: b.config.SSHTimeout(), }, @@ -135,7 +120,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe artifact := &Artifact{ ImageId: state.Get("image").(string), BuilderIdValue: BuilderId, - Conn: csp, + Client: computeClient, } return artifact, nil diff --git a/builder/openstack/server.go b/builder/openstack/server.go index ba22dd3e2..de8c9d103 100644 --- a/builder/openstack/server.go +++ b/builder/openstack/server.go @@ -3,12 +3,12 @@ package openstack import ( "errors" "fmt" - "github.com/mitchellh/multistep" - "github.com/racker/perigee" "log" "time" - "github.com/mitchellh/gophercloud-fork-40444fb" + "github.com/mitchellh/multistep" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" ) // StateRefreshFunc is a function type used for StateChangeConf that is @@ -33,21 +33,22 @@ type StateChangeConf struct { // ServerStateRefreshFunc returns a StateRefreshFunc that is used to watch // an openstack server. -func ServerStateRefreshFunc(csp gophercloud.CloudServersProvider, s *gophercloud.Server) StateRefreshFunc { +func ServerStateRefreshFunc( + client *gophercloud.ServiceClient, s *servers.Server) StateRefreshFunc { return func() (interface{}, string, int, error) { - resp, err := csp.ServerById(s.Id) + serverNew, err := servers.Get(client, s.ID).Extract() if err != nil { - urce, ok := err.(*perigee.UnexpectedResponseCodeError) - if ok && (urce.Actual == 404) { - log.Printf("404 on ServerStateRefresh, returning DELETED") - + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if ok && errCode.Actual == 404 { + log.Printf("[INFO] 404 on ServerStateRefresh, returning DELETED") return nil, "DELETED", 0, nil } else { - log.Printf("Error on ServerStateRefresh: %s", err) + log.Printf("[ERROR] Error on ServerStateRefresh: %s", err) return nil, "", 0, err } } - return resp, resp.Status, resp.Progress, nil + + return serverNew, serverNew.Status, serverNew.Progress, nil } } diff --git a/builder/openstack/ssh.go b/builder/openstack/ssh.go index d20f24170..7b0510f98 100644 --- a/builder/openstack/ssh.go +++ b/builder/openstack/ssh.go @@ -3,49 +3,67 @@ package openstack import ( "errors" "fmt" - "github.com/mitchellh/multistep" - "golang.org/x/crypto/ssh" + "log" "time" - "github.com/mitchellh/gophercloud-fork-40444fb" + "github.com/mitchellh/multistep" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "golang.org/x/crypto/ssh" ) // SSHAddress returns a function that can be given to the SSH communicator // for determining the SSH address based on the server AccessIPv4 setting.. -func SSHAddress(csp gophercloud.CloudServersProvider, sshinterface string, port int) func(multistep.StateBag) (string, error) { +func SSHAddress( + client *gophercloud.ServiceClient, + sshinterface string, port int) func(multistep.StateBag) (string, error) { return func(state multistep.StateBag) (string, error) { - s := state.Get("server").(*gophercloud.Server) + s := state.Get("server").(*servers.Server) - if ip := state.Get("access_ip").(gophercloud.FloatingIp); ip.Ip != "" { - return fmt.Sprintf("%s:%d", ip.Ip, port), nil + // If we have a floating IP, use that + ip := state.Get("access_ip").(*floatingip.FloatingIP) + if ip != nil && ip.FixedIP != "" { + return fmt.Sprintf("%s:%d", ip.FixedIP, port), nil } - ip_pools, err := s.AllAddressPools() - if err != nil { - return "", errors.New("Error parsing SSH addresses") + if s.AccessIPv4 != "" { + return fmt.Sprintf("%s:%d", s.AccessIPv4, port), nil } - for pool, addresses := range ip_pools { - if sshinterface != "" { - if pool != sshinterface { - continue - } + + // Get all the addresses associated with this server. This + // was taken directly from Terraform. + for _, networkAddresses := range s.Addresses { + elements, ok := networkAddresses.([]interface{}) + if !ok { + log.Printf( + "[ERROR] Unknown return type for address field: %#v", + networkAddresses) + continue } - if pool != "" { - for _, address := range addresses { - if address.Addr != "" && address.Version == 4 { - return fmt.Sprintf("%s:%d", address.Addr, port), nil + + for _, element := range elements { + var addr string + address := element.(map[string]interface{}) + if address["OS-EXT-IPS:type"] == "floating" { + addr = address["addr"].(string) + } else { + if address["version"].(float64) == 4 { + addr = address["addr"].(string) } } + if addr != "" { + return fmt.Sprintf("%s:%d", addr, port), nil + } } } - serverState, err := csp.ServerById(s.Id) - + s, err := servers.Get(client, s.ID).Extract() if err != nil { return "", err } - state.Put("server", serverState) + state.Put("server", s) time.Sleep(1 * time.Second) return "", errors.New("couldn't determine IP address for server") diff --git a/builder/openstack/step_allocate_ip.go b/builder/openstack/step_allocate_ip.go index b64f8b617..16efe8d38 100644 --- a/builder/openstack/step_allocate_ip.go +++ b/builder/openstack/step_allocate_ip.go @@ -2,10 +2,11 @@ package openstack import ( "fmt" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" - - "github.com/mitchellh/gophercloud-fork-40444fb" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" ) type StepAllocateIp struct { @@ -15,53 +16,78 @@ type StepAllocateIp struct { func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) - csp := state.Get("csp").(gophercloud.CloudServersProvider) - server := state.Get("server").(*gophercloud.Server) + config := state.Get("config").(Config) + server := state.Get("server").(*servers.Server) - var instanceIp gophercloud.FloatingIp + // We need the v2 compute client + client, err := config.computeV2Client() + if err != nil { + err = fmt.Errorf("Error initializing compute client: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + + var instanceIp *floatingip.FloatingIP // This is here in case we error out before putting instanceIp into the // statebag below, because it is requested by Cleanup() state.Put("access_ip", instanceIp) if s.FloatingIp != "" { - instanceIp.Ip = s.FloatingIp + *instanceIp = floatingip.FloatingIP{FixedIP: s.FloatingIp} } else if s.FloatingIpPool != "" { - newIp, err := csp.CreateFloatingIp(s.FloatingIpPool) + newIp, err := floatingip.Create(client, floatingip.CreateOpts{ + Pool: s.FloatingIpPool, + }).Extract() if err != nil { err := fmt.Errorf("Error creating floating ip from pool '%s'", s.FloatingIpPool) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - instanceIp = newIp - ui.Say(fmt.Sprintf("Created temporary floating IP %s...", instanceIp.Ip)) + + *instanceIp = *newIp + ui.Say(fmt.Sprintf("Created temporary floating IP %s...", instanceIp.FixedIP)) } - if instanceIp.Ip != "" { - if err := csp.AssociateFloatingIp(server.Id, instanceIp); err != nil { - err := fmt.Errorf("Error associating floating IP %s with instance.", instanceIp.Ip) + if instanceIp != nil && instanceIp.FixedIP != "" { + err := floatingip.Associate(client, server.ID, instanceIp.FixedIP).ExtractErr() + if err != nil { + err := fmt.Errorf( + "Error associating floating IP %s with instance.", + instanceIp.FixedIP) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt - } else { - ui.Say(fmt.Sprintf("Added floating IP %s to instance...", instanceIp.Ip)) } + + ui.Say(fmt.Sprintf( + "Added floating IP %s to instance...", instanceIp.FixedIP)) } state.Put("access_ip", instanceIp) - return multistep.ActionContinue } func (s *StepAllocateIp) Cleanup(state multistep.StateBag) { + config := state.Get("config").(Config) ui := state.Get("ui").(packer.Ui) - csp := state.Get("csp").(gophercloud.CloudServersProvider) - instanceIp := state.Get("access_ip").(gophercloud.FloatingIp) - if s.FloatingIpPool != "" && instanceIp.Id != 0 { - if err := csp.DeleteFloatingIp(instanceIp); err != nil { - ui.Error(fmt.Sprintf("Error deleting temporary floating IP %s", instanceIp.Ip)) + instanceIp := state.Get("access_ip").(*floatingip.FloatingIP) + + // We need the v2 compute client + client, err := config.computeV2Client() + if err != nil { + ui.Error(fmt.Sprintf( + "Error deleting temporary floating IP %s", instanceIp.FixedIP)) + return + } + + if s.FloatingIpPool != "" && instanceIp.ID != "" { + if err := floatingip.Delete(client, instanceIp.ID).ExtractErr(); err != nil { + ui.Error(fmt.Sprintf( + "Error deleting temporary floating IP %s", instanceIp.FixedIP)) return } - ui.Say(fmt.Sprintf("Deleted temporary floating IP %s", instanceIp.Ip)) + + ui.Say(fmt.Sprintf("Deleted temporary floating IP %s", instanceIp.FixedIP)) } } diff --git a/builder/openstack/step_create_image.go b/builder/openstack/step_create_image.go index 52a2ec4d1..b777e8b0b 100644 --- a/builder/openstack/step_create_image.go +++ b/builder/openstack/step_create_image.go @@ -2,28 +2,36 @@ package openstack import ( "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "log" "time" - "github.com/mitchellh/gophercloud-fork-40444fb" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/images" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" ) type stepCreateImage struct{} func (s *stepCreateImage) Run(state multistep.StateBag) multistep.StepAction { - csp := state.Get("csp").(gophercloud.CloudServersProvider) config := state.Get("config").(Config) - server := state.Get("server").(*gophercloud.Server) + server := state.Get("server").(*servers.Server) ui := state.Get("ui").(packer.Ui) + // We need the v2 compute client + client, err := config.computeV2Client() + if err != nil { + err = fmt.Errorf("Error initializing compute client: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + // Create the image ui.Say(fmt.Sprintf("Creating the image: %s", config.ImageName)) - createOpts := gophercloud.CreateImage{ + imageId, err := servers.CreateImage(client, server.ID, servers.CreateImageOpts{ Name: config.ImageName, - } - imageId, err := csp.CreateImage(server.Id, createOpts) + }).ExtractImageID() if err != nil { err := fmt.Errorf("Error creating image: %s", err) state.Put("error", err) @@ -32,12 +40,12 @@ func (s *stepCreateImage) Run(state multistep.StateBag) multistep.StepAction { } // Set the Image ID in the state - ui.Say(fmt.Sprintf("Image: %s", imageId)) + ui.Message(fmt.Sprintf("Image: %s", imageId)) state.Put("image", imageId) // Wait for the image to become ready ui.Say("Waiting for image to become ready...") - if err := WaitForImage(csp, imageId); err != nil { + if err := WaitForImage(client, imageId); err != nil { err := fmt.Errorf("Error waiting for image: %s", err) state.Put("error", err) ui.Error(err.Error()) @@ -52,10 +60,17 @@ func (s *stepCreateImage) Cleanup(multistep.StateBag) { } // WaitForImage waits for the given Image ID to become ready. -func WaitForImage(csp gophercloud.CloudServersProvider, imageId string) error { +func WaitForImage(client *gophercloud.ServiceClient, imageId string) error { for { - image, err := csp.ImageById(imageId) + image, err := images.Get(client, imageId).Extract() if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if ok && errCode.Actual == 500 { + log.Printf("[ERROR] 500 error received, will ignore and retry: %s", err) + time.Sleep(2 * time.Second) + continue + } + return err } diff --git a/builder/openstack/step_key_pair.go b/builder/openstack/step_key_pair.go index 9c46b4377..06bcbf9ea 100644 --- a/builder/openstack/step_key_pair.go +++ b/builder/openstack/step_key_pair.go @@ -2,14 +2,13 @@ package openstack import ( "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/common/uuid" - "github.com/mitchellh/packer/packer" - "log" "os" "runtime" - "github.com/mitchellh/gophercloud-fork-40444fb" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/common/uuid" + "github.com/mitchellh/packer/packer" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" ) type StepKeyPair struct { @@ -19,18 +18,28 @@ type StepKeyPair struct { } func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { - csp := state.Get("csp").(gophercloud.CloudServersProvider) + config := state.Get("config").(Config) ui := state.Get("ui").(packer.Ui) + // We need the v2 compute client + computeClient, err := config.computeV2Client() + if err != nil { + err = fmt.Errorf("Error initializing compute client: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + ui.Say("Creating temporary keypair for this instance...") keyName := fmt.Sprintf("packer %s", uuid.TimeOrderedUUID()) - log.Printf("temporary keypair name: %s", keyName) - keyResp, err := csp.CreateKeyPair(gophercloud.NewKeyPair{Name: keyName}) + keypair, err := keypairs.Create(computeClient, keypairs.CreateOpts{ + Name: keyName, + }).Extract() if err != nil { state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err)) return multistep.ActionHalt } - if keyResp.PrivateKey == "" { + + if keypair.PrivateKey == "" { state.Put("error", fmt.Errorf("The temporary keypair returned was blank")) return multistep.ActionHalt } @@ -47,7 +56,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { defer f.Close() // Write the key out - if _, err := f.Write([]byte(keyResp.PrivateKey)); err != nil { + if _, err := f.Write([]byte(keypair.PrivateKey)); err != nil { state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) return multistep.ActionHalt } @@ -66,7 +75,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { // Set some state data for use in future steps state.Put("keyPair", keyName) - state.Put("privateKey", keyResp.PrivateKey) + state.Put("privateKey", keypair.PrivateKey) return multistep.ActionContinue } @@ -77,11 +86,19 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) { return } - csp := state.Get("csp").(gophercloud.CloudServersProvider) + config := state.Get("config").(Config) ui := state.Get("ui").(packer.Ui) + // We need the v2 compute client + computeClient, err := config.computeV2Client() + if err != nil { + ui.Error(fmt.Sprintf( + "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) + return + } + ui.Say("Deleting temporary keypair...") - err := csp.DeleteKeyPair(s.keyName) + err = keypairs.Delete(computeClient, s.keyName).ExtractErr() if err != nil { ui.Error(fmt.Sprintf( "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) diff --git a/builder/openstack/step_run_source_server.go b/builder/openstack/step_run_source_server.go index 19e7f024d..4432d5860 100644 --- a/builder/openstack/step_run_source_server.go +++ b/builder/openstack/step_run_source_server.go @@ -2,11 +2,12 @@ package openstack import ( "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "log" - "github.com/mitchellh/gophercloud-fork-40444fb" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" ) type StepRunSourceServer struct { @@ -16,37 +17,38 @@ type StepRunSourceServer struct { SecurityGroups []string Networks []string - server *gophercloud.Server + server *servers.Server } func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction { - csp := state.Get("csp").(gophercloud.CloudServersProvider) + config := state.Get("config").(Config) keyName := state.Get("keyPair").(string) ui := state.Get("ui").(packer.Ui) - // XXX - validate image and flavor is available - - securityGroups := make([]map[string]interface{}, len(s.SecurityGroups)) - for i, groupName := range s.SecurityGroups { - securityGroups[i] = make(map[string]interface{}) - securityGroups[i]["name"] = groupName + // We need the v2 compute client + computeClient, err := config.computeV2Client() + if err != nil { + err = fmt.Errorf("Error initializing compute client: %s", err) + state.Put("error", err) + return multistep.ActionHalt } - networks := make([]gophercloud.NetworkConfig, len(s.Networks)) + networks := make([]servers.Network, len(s.Networks)) for i, networkUuid := range s.Networks { - networks[i].Uuid = networkUuid + networks[i].UUID = networkUuid } - server := gophercloud.NewServer{ - Name: s.Name, - ImageRef: s.SourceImage, - FlavorRef: s.Flavor, - KeyPairName: keyName, - SecurityGroup: securityGroups, - Networks: networks, - } + s.server, err = servers.Create(computeClient, keypairs.CreateOptsExt{ + CreateOptsBuilder: servers.CreateOpts{ + Name: s.Name, + ImageRef: s.SourceImage, + FlavorName: s.Flavor, + SecurityGroups: s.SecurityGroups, + Networks: networks, + }, - serverResp, err := csp.CreateServer(server) + KeyName: keyName, + }).Extract() if err != nil { err := fmt.Errorf("Error launching source server: %s", err) state.Put("error", err) @@ -54,25 +56,24 @@ func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction return multistep.ActionHalt } - s.server, err = csp.ServerById(serverResp.Id) - log.Printf("server id: %s", s.server.Id) + log.Printf("server id: %s", s.server.ID) - ui.Say(fmt.Sprintf("Waiting for server (%s) to become ready...", s.server.Id)) + ui.Say(fmt.Sprintf("Waiting for server (%s) to become ready...", s.server.ID)) stateChange := StateChangeConf{ Pending: []string{"BUILD"}, Target: "ACTIVE", - Refresh: ServerStateRefreshFunc(csp, s.server), + Refresh: ServerStateRefreshFunc(computeClient, s.server), StepState: state, } latestServer, err := WaitForState(&stateChange) if err != nil { - err := fmt.Errorf("Error waiting for server (%s) to become ready: %s", s.server.Id, err) + err := fmt.Errorf("Error waiting for server (%s) to become ready: %s", s.server.ID, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - s.server = latestServer.(*gophercloud.Server) + s.server = latestServer.(*servers.Server) state.Put("server", s.server) return multistep.ActionContinue @@ -83,18 +84,25 @@ func (s *StepRunSourceServer) Cleanup(state multistep.StateBag) { return } - csp := state.Get("csp").(gophercloud.CloudServersProvider) + config := state.Get("config").(Config) ui := state.Get("ui").(packer.Ui) + // We need the v2 compute client + computeClient, err := config.computeV2Client() + if err != nil { + ui.Error(fmt.Sprintf("Error terminating server, may still be around: %s", err)) + return + } + ui.Say("Terminating the source server...") - if err := csp.DeleteServerById(s.server.Id); err != nil { + if err := servers.Delete(computeClient, s.server.ID).ExtractErr(); err != nil { ui.Error(fmt.Sprintf("Error terminating server, may still be around: %s", err)) return } stateChange := StateChangeConf{ Pending: []string{"ACTIVE", "BUILD", "REBUILD", "SUSPENDED"}, - Refresh: ServerStateRefreshFunc(csp, s.server), + Refresh: ServerStateRefreshFunc(computeClient, s.server), Target: "DELETED", } diff --git a/builder/openstack/step_wait_for_rackconnect.go b/builder/openstack/step_wait_for_rackconnect.go index ee6ee6138..6263bd17d 100644 --- a/builder/openstack/step_wait_for_rackconnect.go +++ b/builder/openstack/step_wait_for_rackconnect.go @@ -2,11 +2,11 @@ package openstack import ( "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "time" - "github.com/mitchellh/gophercloud-fork-40444fb" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" ) type StepWaitForRackConnect struct { @@ -18,14 +18,22 @@ func (s *StepWaitForRackConnect) Run(state multistep.StateBag) multistep.StepAct return multistep.ActionContinue } - csp := state.Get("csp").(gophercloud.CloudServersProvider) - server := state.Get("server").(*gophercloud.Server) + config := state.Get("config").(Config) + server := state.Get("server").(*servers.Server) ui := state.Get("ui").(packer.Ui) - ui.Say(fmt.Sprintf("Waiting for server (%s) to become RackConnect ready...", server.Id)) + // We need the v2 compute client + computeClient, err := config.computeV2Client() + if err != nil { + err = fmt.Errorf("Error initializing compute client: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + ui.Say(fmt.Sprintf( + "Waiting for server (%s) to become RackConnect ready...", server.ID)) for { - server, err := csp.ServerById(server.Id) + server, err = servers.Get(computeClient, server.ID).Extract() if err != nil { return multistep.ActionHalt } From c80d1ab46b609637399a2e696f1e184812b92067 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 00:16:56 -0400 Subject: [PATCH 306/956] remove the new plugin --- plugin/builder-openstack-new/main.go | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100644 plugin/builder-openstack-new/main.go diff --git a/plugin/builder-openstack-new/main.go b/plugin/builder-openstack-new/main.go deleted file mode 100644 index d8075c78d..000000000 --- a/plugin/builder-openstack-new/main.go +++ /dev/null @@ -1,15 +0,0 @@ -package main - -import ( - "github.com/mitchellh/packer/builder/openstack-new" - "github.com/mitchellh/packer/packer/plugin" -) - -func main() { - server, err := plugin.Server() - if err != nil { - panic(err) - } - server.RegisterBuilder(new(openstack.Builder)) - server.Serve() -} From 50e2eb30e6f2c0cb2313b91c1ccb9db62c7eb2e9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 00:28:38 -0400 Subject: [PATCH 307/956] builder/openstack: modifications to work with rackspace --- builder/openstack/access_config.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/builder/openstack/access_config.go b/builder/openstack/access_config.go index e0f962c50..71679b979 100644 --- a/builder/openstack/access_config.go +++ b/builder/openstack/access_config.go @@ -42,10 +42,7 @@ func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error { } // Get as much as possible from the end - ao, err := openstack.AuthOptionsFromEnv() - if err != nil { - return []error{err} - } + ao, _ := openstack.AuthOptionsFromEnv() // Override values if we have them in our config overrides := []struct { From e724b5fe80de907da0dc8aa15b10ae0b83e0a812 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 00:33:52 -0400 Subject: [PATCH 308/956] builder/openstack: support legacy env vars --- builder/openstack/access_config.go | 17 ++++++ .../docs/builders/openstack.html.markdown | 57 +++++-------------- 2 files changed, 30 insertions(+), 44 deletions(-) diff --git a/builder/openstack/access_config.go b/builder/openstack/access_config.go index 71679b979..ca33495a8 100644 --- a/builder/openstack/access_config.go +++ b/builder/openstack/access_config.go @@ -41,6 +41,23 @@ func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error { c.Region = os.Getenv("OS_REGION_NAME") } + // Legacy RackSpace stuff. We're keeping this around to keep things BC. + if c.APIKey == "" { + c.APIKey = os.Getenv("SDK_API_KEY") + } + if c.Password == "" { + c.Password = os.Getenv("SDK_PASSWORD") + } + if c.Region == "" { + c.Region = os.Getenv("SDK_REGION") + } + if c.TenantName == "" { + c.TenantName = os.Getenv("SDK_PROJECT") + } + if c.Username == "" { + c.Username = os.Getenv("SDK_USERNAME") + } + // Get as much as possible from the end ao, _ := openstack.AuthOptionsFromEnv() diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index d5dbbf249..10f38a445 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -34,23 +34,21 @@ each category, the available configuration keys are alphabetized. * `image_name` (string) - The name of the resulting image. -* `password` (string) - The password used to connect to the OpenStack service. - If not specified, Packer will use the environment variables - `SDK_PASSWORD` or `OS_PASSWORD` (in that order), if set. - * `source_image` (string) - The ID or full URL to the base image to use. This is the image that will be used to launch a new server and provision it. * `username` (string) - The username used to connect to the OpenStack service. + If not specified, Packer will use the environment variable + `OS_USERNAME`, if set. + +* `password` (string) - The password used to connect to the OpenStack service. If not specified, Packer will use the environment variables - `SDK_USERNAME` or `OS_USERNAME` (in that order), if set. + `OS_PASSWORD`, if set. ### Optional: * `api_key` (string) - The API key used to access OpenStack. Some OpenStack installations require this. - If not specified, Packer will use the environment variables - `SDK_API_KEY`, if set. * `floating_ip` (string) - A specific floating IP to assign to this instance. `use_floating_ip` must also be set to true for this to have an affect. @@ -65,32 +63,18 @@ each category, the available configuration keys are alphabetized. * `networks` (array of strings) - A list of networks by UUID to attach to this instance. -* `openstack_provider` (string) - A name of a provider that has a slightly - different API model. Currently supported values are "openstack" (default), - and "rackspace". - -* `project` (string) - The project name to boot the instance into. Some - OpenStack installations require this. - If not specified, Packer will use the environment variables - `SDK_PROJECT` or `OS_TENANT_NAME` (in that order), if set. - -* `provider` (string) - The provider used to connect to the OpenStack service. - If not specified, Packer will use the environment variables `SDK_PROVIDER` - or `OS_AUTH_URL` (in that order), if set. - For Rackspace this should be `rackspace-us` or `rackspace-uk`. - -* `proxy_url` (string) +* `tenant_id` or `tenant_name` (string) - The tenant ID or name to boot the + instance into. Some OpenStack installations require this. + If not specified, Packer will use the environment variable + `OS_TENANT_NAME`, if set. * `security_groups` (array of strings) - A list of security groups by name to add to this instance. * `region` (string) - The name of the region, such as "DFW", in which to launch the server to create the AMI. - If not specified, Packer will use the environment variables - `SDK_REGION` or `OS_REGION_NAME` (in that order), if set. - For a `provider` of "rackspace", it is required to specify a region, - either using this option or with an environment variable. For other - providers, including a private cloud, specifying a region is optional. + If not specified, Packer will use the environment variable + `OS_REGION_NAME`, if set. * `ssh_port` (integer) - The port that SSH will be available on. Defaults to port 22. @@ -106,9 +90,6 @@ each category, the available configuration keys are alphabetized. useful for Rackspace are "public" or "private", and the default behavior is to connect via whichever is returned first from the OpenStack API. -* `tenant_id` (string) - Tenant ID for accessing OpenStack if your - installation requires this. - * `use_floating_ip` (boolean) - Whether or not to use a floating IP for the instance. Defaults to false. @@ -124,10 +105,8 @@ Ubuntu 12.04 LTS (Precise Pangolin) on Rackspace OpenStack cloud offering. ```javascript { "type": "openstack", - "username": "", - "api_key": "", - "openstack_provider": "rackspace", - "provider": "rackspace-us", + "username": "foo", + "password": "foo", "region": "DFW", "ssh_username": "root", "image_name": "Test image", @@ -160,13 +139,3 @@ script is setting environment variables like: * `OS_TENANT_ID` * `OS_USERNAME` * `OS_PASSWORD` - -## Troubleshooting - -*I get the error "Missing or incorrect provider"* - -* Verify your "username", "password" and "provider" settings. - -*I get the error "Missing endpoint, or insufficient privileges to access endpoint"* - -* Verify your "region" setting. From 590177ea4b0b5057c5ec9a4da4b2811651d62c8b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 00:35:54 -0400 Subject: [PATCH 309/956] builder/openstack: fix unit tests --- builder/openstack/builder_test.go | 53 ------------------------------- 1 file changed, 53 deletions(-) diff --git a/builder/openstack/builder_test.go b/builder/openstack/builder_test.go index badf9784d..ce15873eb 100644 --- a/builder/openstack/builder_test.go +++ b/builder/openstack/builder_test.go @@ -9,7 +9,6 @@ func testConfig() map[string]interface{} { return map[string]interface{}{ "username": "foo", "password": "bar", - "provider": "foo", "region": "DFW", "image_name": "foo", "source_image": "foo", @@ -40,55 +39,3 @@ func TestBuilder_Prepare_BadType(t *testing.T) { t.Fatalf("prepare should fail") } } - -func TestBuilderPrepare_ImageName(t *testing.T) { - var b Builder - config := testConfig() - - // Test good - config["image_name"] = "foo" - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - // Test bad - config["image_name"] = "foo {{" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test bad - delete(config, "image_name") - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } -} - -func TestBuilderPrepare_InvalidKey(t *testing.T) { - var b Builder - config := testConfig() - - // Add a random key - config["i_should_not_be_valid"] = true - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } -} From 5d32a1f6e059b107a112ec7811dd8a16c2e6101f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 10:02:04 -0400 Subject: [PATCH 310/956] builder/openstack: use IP not FixedIP --- builder/openstack/ssh.go | 4 ++-- builder/openstack/step_allocate_ip.go | 27 ++++++++++++++------------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/builder/openstack/ssh.go b/builder/openstack/ssh.go index 7b0510f98..76c2686b1 100644 --- a/builder/openstack/ssh.go +++ b/builder/openstack/ssh.go @@ -23,8 +23,8 @@ func SSHAddress( // If we have a floating IP, use that ip := state.Get("access_ip").(*floatingip.FloatingIP) - if ip != nil && ip.FixedIP != "" { - return fmt.Sprintf("%s:%d", ip.FixedIP, port), nil + if ip != nil && ip.IP != "" { + return fmt.Sprintf("%s:%d", ip.IP, port), nil } if s.AccessIPv4 != "" { diff --git a/builder/openstack/step_allocate_ip.go b/builder/openstack/step_allocate_ip.go index 16efe8d38..0ab9b3529 100644 --- a/builder/openstack/step_allocate_ip.go +++ b/builder/openstack/step_allocate_ip.go @@ -27,13 +27,14 @@ func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - var instanceIp *floatingip.FloatingIP + var instanceIp floatingip.FloatingIP + // This is here in case we error out before putting instanceIp into the // statebag below, because it is requested by Cleanup() - state.Put("access_ip", instanceIp) + state.Put("access_ip", &instanceIp) if s.FloatingIp != "" { - *instanceIp = floatingip.FloatingIP{FixedIP: s.FloatingIp} + instanceIp.IP = s.FloatingIp } else if s.FloatingIpPool != "" { newIp, err := floatingip.Create(client, floatingip.CreateOpts{ Pool: s.FloatingIpPool, @@ -45,26 +46,26 @@ func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - *instanceIp = *newIp - ui.Say(fmt.Sprintf("Created temporary floating IP %s...", instanceIp.FixedIP)) + instanceIp = *newIp + ui.Say(fmt.Sprintf("Created temporary floating IP %s...", instanceIp.IP)) } - if instanceIp != nil && instanceIp.FixedIP != "" { - err := floatingip.Associate(client, server.ID, instanceIp.FixedIP).ExtractErr() + if instanceIp.IP != "" { + err := floatingip.Associate(client, server.ID, instanceIp.IP).ExtractErr() if err != nil { err := fmt.Errorf( "Error associating floating IP %s with instance.", - instanceIp.FixedIP) + instanceIp.IP) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } ui.Say(fmt.Sprintf( - "Added floating IP %s to instance...", instanceIp.FixedIP)) + "Added floating IP %s to instance...", instanceIp.IP)) } - state.Put("access_ip", instanceIp) + state.Put("access_ip", &instanceIp) return multistep.ActionContinue } @@ -77,17 +78,17 @@ func (s *StepAllocateIp) Cleanup(state multistep.StateBag) { client, err := config.computeV2Client() if err != nil { ui.Error(fmt.Sprintf( - "Error deleting temporary floating IP %s", instanceIp.FixedIP)) + "Error deleting temporary floating IP %s", instanceIp.IP)) return } if s.FloatingIpPool != "" && instanceIp.ID != "" { if err := floatingip.Delete(client, instanceIp.ID).ExtractErr(); err != nil { ui.Error(fmt.Sprintf( - "Error deleting temporary floating IP %s", instanceIp.FixedIP)) + "Error deleting temporary floating IP %s", instanceIp.IP)) return } - ui.Say(fmt.Sprintf("Deleted temporary floating IP %s", instanceIp.FixedIP)) + ui.Say(fmt.Sprintf("Deleted temporary floating IP %s", instanceIp.IP)) } } From ad374e82afe184249687a8e0993e7376b9d92a7c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 10:05:03 -0400 Subject: [PATCH 311/956] builder/openstack: shuffle some fields to note unused fields --- builder/openstack/run_config.go | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/builder/openstack/run_config.go b/builder/openstack/run_config.go index e5d73c9c1..de76fad20 100644 --- a/builder/openstack/run_config.go +++ b/builder/openstack/run_config.go @@ -11,19 +11,21 @@ import ( // RunConfig contains configuration for running an instance from a source // image and details on how to access that launched image. type RunConfig struct { - SourceImage string `mapstructure:"source_image"` - Flavor string `mapstructure:"flavor"` - RawSSHTimeout string `mapstructure:"ssh_timeout"` - SSHUsername string `mapstructure:"ssh_username"` - SSHPort int `mapstructure:"ssh_port"` - SSHInterface string `mapstructure:"ssh_interface"` - OpenstackProvider string `mapstructure:"openstack_provider"` - UseFloatingIp bool `mapstructure:"use_floating_ip"` - RackconnectWait bool `mapstructure:"rackconnect_wait"` - FloatingIpPool string `mapstructure:"floating_ip_pool"` - FloatingIp string `mapstructure:"floating_ip"` - SecurityGroups []string `mapstructure:"security_groups"` - Networks []string `mapstructure:"networks"` + SourceImage string `mapstructure:"source_image"` + Flavor string `mapstructure:"flavor"` + RawSSHTimeout string `mapstructure:"ssh_timeout"` + SSHUsername string `mapstructure:"ssh_username"` + SSHPort int `mapstructure:"ssh_port"` + SSHInterface string `mapstructure:"ssh_interface"` + RackconnectWait bool `mapstructure:"rackconnect_wait"` + FloatingIpPool string `mapstructure:"floating_ip_pool"` + FloatingIp string `mapstructure:"floating_ip"` + SecurityGroups []string `mapstructure:"security_groups"` + Networks []string `mapstructure:"networks"` + + // Not really used, but here for BC + OpenstackProvider string `mapstructure:"openstack_provider"` + UseFloatingIp bool `mapstructure:"use_floating_ip"` // Unexported fields that are calculated from others sshTimeout time.Duration From 92b6b5c387b4f090d55450b44ba1619d5a3eddef Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 10:32:31 -0400 Subject: [PATCH 312/956] builder/openstack: can ref flavor by name --- builder/openstack/builder.go | 4 +- builder/openstack/step_load_flavor.go | 61 +++++++++++++++++++ builder/openstack/step_run_source_server.go | 8 ++- .../docs/builders/openstack.html.markdown | 2 +- 4 files changed, 70 insertions(+), 5 deletions(-) create mode 100644 builder/openstack/step_load_flavor.go diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index bebb28452..d30ede8d3 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -67,13 +67,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe // Build the steps steps := []multistep.Step{ + &StepLoadFlavor{ + Flavor: b.config.Flavor, + }, &StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName), }, &StepRunSourceServer{ Name: b.config.ImageName, - Flavor: b.config.Flavor, SourceImage: b.config.SourceImage, SecurityGroups: b.config.SecurityGroups, Networks: b.config.Networks, diff --git a/builder/openstack/step_load_flavor.go b/builder/openstack/step_load_flavor.go new file mode 100644 index 000000000..8b8cae994 --- /dev/null +++ b/builder/openstack/step_load_flavor.go @@ -0,0 +1,61 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/rackspace/gophercloud/openstack/compute/v2/flavors" +) + +// StepLoadFlavor gets the FlavorRef from a Flavor. It first assumes +// that the Flavor is a ref and verifies it. Otherwise, it tries to find +// the flavor by name. +type StepLoadFlavor struct { + Flavor string +} + +func (s *StepLoadFlavor) Run(state multistep.StateBag) multistep.StepAction { + config := state.Get("config").(Config) + ui := state.Get("ui").(packer.Ui) + + // We need the v2 compute client + client, err := config.computeV2Client() + if err != nil { + err = fmt.Errorf("Error initializing compute client: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + + ui.Say(fmt.Sprintf("Loading flavor: %s", s.Flavor)) + log.Printf("[INFO] Loading flavor by ID: %s", s.Flavor) + flavor, err := flavors.Get(client, s.Flavor).Extract() + if err != nil { + log.Printf("[ERROR] Failed to find flavor by ID: %s", err) + geterr := err + + log.Printf("[INFO] Loading flavor by name: %s", s.Flavor) + id, err := flavors.IDFromName(client, s.Flavor) + if err != nil { + log.Printf("[ERROR] Failed to find flavor by name: %s", err) + err = fmt.Errorf( + "Unable to find specified flavor by ID or name!\n\n"+ + "Error from ID lookup: %s\n\n"+ + "Error from name lookup: %s", + geterr, + err) + state.Put("error", err) + return multistep.ActionHalt + } + + flavor = &flavors.Flavor{ID: id} + } + + ui.Message(fmt.Sprintf("Verified flavor. ID: %s", flavor.ID)) + state.Put("flavor_id", flavor.ID) + return multistep.ActionContinue +} + +func (s *StepLoadFlavor) Cleanup(state multistep.StateBag) { +} diff --git a/builder/openstack/step_run_source_server.go b/builder/openstack/step_run_source_server.go index 4432d5860..89d816297 100644 --- a/builder/openstack/step_run_source_server.go +++ b/builder/openstack/step_run_source_server.go @@ -11,7 +11,6 @@ import ( ) type StepRunSourceServer struct { - Flavor string Name string SourceImage string SecurityGroups []string @@ -22,6 +21,7 @@ type StepRunSourceServer struct { func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction { config := state.Get("config").(Config) + flavor := state.Get("flavor_id").(string) keyName := state.Get("keyPair").(string) ui := state.Get("ui").(packer.Ui) @@ -38,11 +38,12 @@ func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction networks[i].UUID = networkUuid } + ui.Say("Launching server...") s.server, err = servers.Create(computeClient, keypairs.CreateOptsExt{ CreateOptsBuilder: servers.CreateOpts{ Name: s.Name, ImageRef: s.SourceImage, - FlavorName: s.Flavor, + FlavorRef: flavor, SecurityGroups: s.SecurityGroups, Networks: networks, }, @@ -56,9 +57,10 @@ func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction return multistep.ActionHalt } + ui.Message(fmt.Sprintf("Server ID: %s", s.server.ID)) log.Printf("server id: %s", s.server.ID) - ui.Say(fmt.Sprintf("Waiting for server (%s) to become ready...", s.server.ID)) + ui.Say("Waiting for server to become ready...") stateChange := StateChangeConf{ Pending: []string{"BUILD"}, Target: "ACTIVE", diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index 10f38a445..3b5bf791d 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -29,7 +29,7 @@ each category, the available configuration keys are alphabetized. ### Required: -* `flavor` (string) - The ID or full URL for the desired flavor for the +* `flavor` (string) - The ID, name, or full URL for the desired flavor for the server to be created. * `image_name` (string) - The name of the resulting image. From 53b117e223ab27f3dec38ca34b22caea4587273e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 10:54:12 -0400 Subject: [PATCH 313/956] update CHANGELOG --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b1bc247f5..15ce0290a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,10 +2,12 @@ BACKWARDS INCOMPATIBILITIES: - * The DigitalOcean builder no longer supports the v1 API which has been + * builder/digitalocean: no longer supports the v1 API which has been deprecated for some time. Most configurations should continue to work as long as you use the `api_token` field for auth. * builder/digitalocean: `image`, `region`, and `size` are now required. + * builder/openstack: auth parameters have been changed to better + reflect OS terminology. Existing environment variables still work. FEATURES: From 44d86c2e90faa8876b3ac99e7beaf67021c0001a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 10:54:36 -0400 Subject: [PATCH 314/956] update CHANGELOG --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15ce0290a..d077ceba6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,8 +22,9 @@ IMPROVEMENTS: * builder/parallels: Support Parallels Desktop 11 [GH-2199] * builder/openstack: Add `rackconnect_wait` for Rackspace customers to wait for RackConnect data to appear - * buidler/openstakc: Add `ssh_interface` option for rackconnect for users that + * buidler/openstack: Add `ssh_interface` option for rackconnect for users that have prohibitive firewalls + * builder/openstack: Flavor names can be used as well as refs * builder/virtualbox: Added option: `ssh_skip_nat_mapping` to skip the automatic port forward for SSH and to use the guest port directly. [GH-1078] * builder/virtualbox: Added SCSI support From 693f04afccd604e41b07b261ecea873990bc046c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 11:10:10 -0400 Subject: [PATCH 315/956] builder/openstack: AZ support --- builder/openstack/builder.go | 9 ++++---- builder/openstack/run_config.go | 23 ++++++++++--------- builder/openstack/step_run_source_server.go | 20 ++++++++-------- .../docs/builders/openstack.html.markdown | 4 ++++ 4 files changed, 32 insertions(+), 24 deletions(-) diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index d30ede8d3..ab60afc0e 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -75,10 +75,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName), }, &StepRunSourceServer{ - Name: b.config.ImageName, - SourceImage: b.config.SourceImage, - SecurityGroups: b.config.SecurityGroups, - Networks: b.config.Networks, + Name: b.config.ImageName, + SourceImage: b.config.SourceImage, + SecurityGroups: b.config.SecurityGroups, + Networks: b.config.Networks, + AvailabilityZone: b.config.AvailabilityZone, }, &StepWaitForRackConnect{ Wait: b.config.RackconnectWait, diff --git a/builder/openstack/run_config.go b/builder/openstack/run_config.go index de76fad20..4a6a1b81f 100644 --- a/builder/openstack/run_config.go +++ b/builder/openstack/run_config.go @@ -11,17 +11,18 @@ import ( // RunConfig contains configuration for running an instance from a source // image and details on how to access that launched image. type RunConfig struct { - SourceImage string `mapstructure:"source_image"` - Flavor string `mapstructure:"flavor"` - RawSSHTimeout string `mapstructure:"ssh_timeout"` - SSHUsername string `mapstructure:"ssh_username"` - SSHPort int `mapstructure:"ssh_port"` - SSHInterface string `mapstructure:"ssh_interface"` - RackconnectWait bool `mapstructure:"rackconnect_wait"` - FloatingIpPool string `mapstructure:"floating_ip_pool"` - FloatingIp string `mapstructure:"floating_ip"` - SecurityGroups []string `mapstructure:"security_groups"` - Networks []string `mapstructure:"networks"` + SourceImage string `mapstructure:"source_image"` + Flavor string `mapstructure:"flavor"` + RawSSHTimeout string `mapstructure:"ssh_timeout"` + SSHUsername string `mapstructure:"ssh_username"` + SSHPort int `mapstructure:"ssh_port"` + SSHInterface string `mapstructure:"ssh_interface"` + AvailabilityZone string `mapstructure:"availability_zone"` + RackconnectWait bool `mapstructure:"rackconnect_wait"` + FloatingIpPool string `mapstructure:"floating_ip_pool"` + FloatingIp string `mapstructure:"floating_ip"` + SecurityGroups []string `mapstructure:"security_groups"` + Networks []string `mapstructure:"networks"` // Not really used, but here for BC OpenstackProvider string `mapstructure:"openstack_provider"` diff --git a/builder/openstack/step_run_source_server.go b/builder/openstack/step_run_source_server.go index 89d816297..4014f1d95 100644 --- a/builder/openstack/step_run_source_server.go +++ b/builder/openstack/step_run_source_server.go @@ -11,10 +11,11 @@ import ( ) type StepRunSourceServer struct { - Name string - SourceImage string - SecurityGroups []string - Networks []string + Name string + SourceImage string + SecurityGroups []string + Networks []string + AvailabilityZone string server *servers.Server } @@ -41,11 +42,12 @@ func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction ui.Say("Launching server...") s.server, err = servers.Create(computeClient, keypairs.CreateOptsExt{ CreateOptsBuilder: servers.CreateOpts{ - Name: s.Name, - ImageRef: s.SourceImage, - FlavorRef: flavor, - SecurityGroups: s.SecurityGroups, - Networks: networks, + Name: s.Name, + ImageRef: s.SourceImage, + FlavorRef: flavor, + SecurityGroups: s.SecurityGroups, + Networks: networks, + AvailabilityZone: s.AvailabilityZone, }, KeyName: keyName, diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index 3b5bf791d..a7fef29dc 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -50,6 +50,10 @@ each category, the available configuration keys are alphabetized. * `api_key` (string) - The API key used to access OpenStack. Some OpenStack installations require this. +* `availability_zone` (string) - The availability zone to launch the + server in. If this isn't specified, the default enforced by your OpenStack + cluster will be used. This may be required for some OpenStack clusters. + * `floating_ip` (string) - A specific floating IP to assign to this instance. `use_floating_ip` must also be set to true for this to have an affect. From 9bb722c99f3f2e3da83bfaf72d4411e754202644 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 11:10:34 -0400 Subject: [PATCH 316/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d077ceba6..b96154c2b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ IMPROVEMENTS: * buidler/openstack: Add `ssh_interface` option for rackconnect for users that have prohibitive firewalls * builder/openstack: Flavor names can be used as well as refs + * builder/openstack: Add `availability_zone` [GH-2016] * builder/virtualbox: Added option: `ssh_skip_nat_mapping` to skip the automatic port forward for SSH and to use the guest port directly. [GH-1078] * builder/virtualbox: Added SCSI support From 35b8df18162f8849eb28f1bed53f67c1421aacd4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 11:14:28 -0400 Subject: [PATCH 317/956] website: note cloud-init req for openstack [GH-1750] --- website/source/docs/builders/openstack.html.markdown | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index a7fef29dc..fcd210dec 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -36,6 +36,8 @@ each category, the available configuration keys are alphabetized. * `source_image` (string) - The ID or full URL to the base image to use. This is the image that will be used to launch a new server and provision it. + Unless you specify completely custom SSH settings, the source image must + have `cloud-init` installed so that the keypair gets assigned properly. * `username` (string) - The username used to connect to the OpenStack service. If not specified, Packer will use the environment variable From 86206e316db6b1a23dc67c43324af75e7b2860a5 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Fri, 12 Jun 2015 10:39:37 -0500 Subject: [PATCH 318/956] add tags test --- builder/amazon/ebs/tags_acc_test.go | 114 ++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 builder/amazon/ebs/tags_acc_test.go diff --git a/builder/amazon/ebs/tags_acc_test.go b/builder/amazon/ebs/tags_acc_test.go new file mode 100644 index 000000000..606bb89ee --- /dev/null +++ b/builder/amazon/ebs/tags_acc_test.go @@ -0,0 +1,114 @@ +package ebs + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/mitchellh/packer/builder/amazon/common" + builderT "github.com/mitchellh/packer/helper/builder/testing" + "github.com/mitchellh/packer/packer" +) + +func TestBuilderTagsAcc_basic(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Builder: &Builder{}, + Template: testBuilderTagsAccBasic, + Check: checkTags(), + }) +} + +func checkTags() builderT.TestCheckFunc { + return func(artifacts []packer.Artifact) error { + if len(artifacts) > 1 { + return fmt.Errorf("more than 1 artifact") + } + + tags := make(map[string]string) + tags["OS_Version"] = "Ubuntu" + tags["Release"] = "Latest" + + // Get the actual *Artifact pointer so we can access the AMIs directly + artifactRaw := artifacts[0] + artifact, ok := artifactRaw.(*common.Artifact) + if !ok { + return fmt.Errorf("unknown artifact: %#v", artifactRaw) + } + + // describe the image, get block devices with a snapshot + ec2conn, _ := testEC2Conn() + imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ + ImageIDs: []*string{aws.String(artifact.Amis["us-east-1"])}, + }) + + if err != nil { + return fmt.Errorf("Error retrieving details for AMI Artifcat (%#v) in Tags Test: %s", artifact, err) + } + + if len(imageResp.Images) == 0 { + return fmt.Errorf("No images found for AMI Artifcat (%#v) in Tags Test: %s", artifact, err) + } + + image := imageResp.Images[0] + + // Check only those with a Snapshot ID, i.e. not Ephemeral + var snapshots []*string + for _, device := range image.BlockDeviceMappings { + if device.EBS != nil && device.EBS.SnapshotID != nil { + snapshots = append(snapshots, device.EBS.SnapshotID) + } + } + + // grab matching snapshot info + resp, err := ec2conn.DescribeSnapshots(&ec2.DescribeSnapshotsInput{ + SnapshotIDs: snapshots, + }) + + if err != nil { + return fmt.Errorf("Error retreiving Snapshots for AMI Artifcat (%#v) in Tags Test: %s", artifact, err) + } + + if len(resp.Snapshots) == 0 { + return fmt.Errorf("No Snapshots found for AMI Artifcat (%#v) in Tags Test", artifact) + } + + // grab the snapshots, check the tags + for _, s := range resp.Snapshots { + expected := len(tags) + for _, t := range s.Tags { + for key, value := range tags { + if key == *t.Key && value == *t.Value { + expected-- + } + } + } + + if expected > 0 { + return fmt.Errorf("Not all tags found") + } + } + + return nil + } +} + +const testBuilderTagsAccBasic = ` +{ + "builders": [ + { + "type": "test", + "region": "us-east-1", + "source_ami": "ami-9eaa1cf6", + "instance_type": "t2.micro", + "ssh_username": "ubuntu", + "ami_name": "packer-tags-testing-{{timestamp}}", + "tags": { + "OS_Version": "Ubuntu", + "Release": "Latest" + } + } + ] +} +` From 85db8abe8daab3413868aa48591356ea015342c1 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Fri, 12 Jun 2015 10:40:55 -0500 Subject: [PATCH 319/956] remove old bats test --- test/builder_amazon_ebs.bats | 54 ------------------------------------ 1 file changed, 54 deletions(-) delete mode 100755 test/builder_amazon_ebs.bats diff --git a/test/builder_amazon_ebs.bats b/test/builder_amazon_ebs.bats deleted file mode 100755 index 89f32a4a0..000000000 --- a/test/builder_amazon_ebs.bats +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bats -# -# This tests the amazon-ebs builder. The teardown function will automatically -# delete any AMIs with a tag of `packer-test` being equal to "true" so -# be sure any test cases set this. - -load test_helper -fixtures amazon-ebs - -# This counts how many AMIs were copied to another region -aws_ami_region_copy_count() { - aws ec2 describe-images --region $1 --owners self --output text \ - --filters 'Name=tag:packer-id,Values=ami_region_copy' \ - --query "Images[*].ImageId" \ - | wc -l -} - -# This verifies AMI tags are correctly applied to relevant snapshots -aws_ami_snapshot_tags_count() { - filter='Name=tag:packer-id,Values=ami_snapshot_tags' - aws ec2 describe-images --region $1 --owners self --output text \ - --filters "$filter" \ - --query "Images[*].BlockDeviceMappings[*].Ebs.SnapshotId" \ - | aws ec2 describe-snapshots --region $1 --owners self --output text \ - --filters "$filter" \ - --snapshot-ids \ - | wc -l -} - -teardown() { - aws_ami_cleanup 'us-east-1' - aws_ami_cleanup 'us-west-1' - aws_ami_cleanup 'us-west-2' -} - -@test "amazon-ebs: build minimal.json" { - run packer build $FIXTURE_ROOT/minimal.json - [ "$status" -eq 0 ] -} - -# @unit-testable -@test "amazon-ebs: AMI region copy" { - run packer build $FIXTURE_ROOT/ami_region_copy.json - [ "$status" -eq 0 ] - [ "$(aws_ami_region_copy_count 'us-east-1')" -eq "1" ] - [ "$(aws_ami_region_copy_count 'us-west-1')" -eq "1" ] - [ "$(aws_ami_region_copy_count 'us-west-2')" -eq "1" ] -} - -@test "amazon-ebs: AMI snapshot tags" { - run packer build $FIXTURE_ROOT/ami_snapshot_tags.json - [ "$status" -eq 0 ] - [ "$(aws_ami_snapshot_tags)" -eq "2" ] -} From c875d40b2cbf6407379cf2b4f0ecba24c86e4f2f Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Fri, 12 Jun 2015 10:41:44 -0500 Subject: [PATCH 320/956] remove bats test fixture --- .../amazon-ebs/ami_snapshot_tags.json | 20 ------------------- 1 file changed, 20 deletions(-) delete mode 100644 test/fixtures/amazon-ebs/ami_snapshot_tags.json diff --git a/test/fixtures/amazon-ebs/ami_snapshot_tags.json b/test/fixtures/amazon-ebs/ami_snapshot_tags.json deleted file mode 100644 index 278474a32..000000000 --- a/test/fixtures/amazon-ebs/ami_snapshot_tags.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "builders": [{ - "type": "amazon-ebs", - "ami_name": "packer-test {{timestamp}}", - "instance_type": "m1.small", - "region": "us-east-1", - "ssh_username": "ubuntu", - "source_ami": "ami-0568456c", - "tags": { - "packer-test": "true", - "packer-id": "ami_snapshot_tags" - }, - "ami_block_device_mappings": [ - { - "device_name": "/dev/sde", - "volume_type": "standard" - } - ] - }] -} From 31abc93f50d2389b1237a216af3616db1dc31d7c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 14:22:32 -0400 Subject: [PATCH 321/956] builder/openstack: support ssh_interface [GH-2087] --- builder/openstack/ssh.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/builder/openstack/ssh.go b/builder/openstack/ssh.go index 76c2686b1..519ccd406 100644 --- a/builder/openstack/ssh.go +++ b/builder/openstack/ssh.go @@ -33,7 +33,15 @@ func SSHAddress( // Get all the addresses associated with this server. This // was taken directly from Terraform. - for _, networkAddresses := range s.Addresses { + for pool, networkAddresses := range s.Addresses { + // If we have an SSH interface specified, skip it if no match + if sshinterface != "" && pool != sshinterface { + log.Printf( + "[INFO] Skipping pool %s, doesn't match requested %s", + pool, sshinterface) + continue + } + elements, ok := networkAddresses.([]interface{}) if !ok { log.Printf( From bec59b535df5dab334ecbe978bfa846036b00e86 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Fri, 12 Jun 2015 13:05:15 -0500 Subject: [PATCH 322/956] builder/amazon: Add force_deregister option, to automatically deregister artifacts with name conflicts --- builder/amazon/chroot/builder.go | 8 +++ builder/amazon/common/ami_config.go | 1 + builder/amazon/common/step_deregister_ami.go | 56 ++++++++++++++++++++ builder/amazon/common/step_pre_validate.go | 10 +++- builder/amazon/ebs/builder.go | 7 ++- builder/amazon/ebs/builder_acc_test.go | 34 ++++++++++++ builder/amazon/instance/builder.go | 8 +++ helper/builder/testing/testing.go | 18 ++++--- 8 files changed, 133 insertions(+), 9 deletions(-) create mode 100644 builder/amazon/common/step_deregister_ami.go diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index 9e7452182..18b07b81c 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -147,6 +147,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe // Build the steps steps := []multistep.Step{ + &awscommon.StepPreValidate{ + DestAmiName: b.config.AMIName, + ForceDeregister: b.config.AMIForceDeregister, + }, &StepInstanceInfo{}, &awscommon.StepSourceAMIInfo{ SourceAmi: b.config.SourceAmi, @@ -164,6 +168,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &StepChrootProvision{}, &StepEarlyCleanup{}, &StepSnapshot{}, + &awscommon.StepDeregisterAMI{ + ForceDeregister: b.config.AMIForceDeregister, + AMIName: b.config.AMIName, + }, &StepRegisterAMI{}, &awscommon.StepAMIRegionCopy{ AccessConfig: &b.config.AccessConfig, diff --git a/builder/amazon/common/ami_config.go b/builder/amazon/common/ami_config.go index 14b880f4c..377201902 100644 --- a/builder/amazon/common/ami_config.go +++ b/builder/amazon/common/ami_config.go @@ -17,6 +17,7 @@ type AMIConfig struct { AMIRegions []string `mapstructure:"ami_regions"` AMITags map[string]string `mapstructure:"tags"` AMIEnhancedNetworking bool `mapstructure:"enhanced_networking"` + AMIForceDeregister bool `mapstructure:"force_deregister"` } func (c *AMIConfig) Prepare(ctx *interpolate.Context) []error { diff --git a/builder/amazon/common/step_deregister_ami.go b/builder/amazon/common/step_deregister_ami.go new file mode 100644 index 000000000..ce20a5d90 --- /dev/null +++ b/builder/amazon/common/step_deregister_ami.go @@ -0,0 +1,56 @@ +package common + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" +) + +type StepDeregisterAMI struct { + ForceDeregister bool + AMIName string +} + +func (s *StepDeregisterAMI) Run(state multistep.StateBag) multistep.StepAction { + ec2conn := state.Get("ec2").(*ec2.EC2) + ui := state.Get("ui").(packer.Ui) + + // check for force deregister + if s.ForceDeregister { + resp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ + Filters: []*ec2.Filter{&ec2.Filter{ + Name: aws.String("name"), + Values: []*string{aws.String(s.AMIName)}, + }}}) + + if err != nil { + err := fmt.Errorf("Error creating AMI: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + // deregister image(s) by that name + for _, i := range resp.Images { + _, err := ec2conn.DeregisterImage(&ec2.DeregisterImageInput{ + ImageID: i.ImageID, + }) + + if err != nil { + err := fmt.Errorf("Error deregistering existing AMI: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + ui.Say(fmt.Sprintf("Deregistered AMI %s, id: %s", s.AMIName, *i.ImageID)) + } + } + + return multistep.ActionContinue +} + +func (s *StepDeregisterAMI) Cleanup(state multistep.StateBag) { +} diff --git a/builder/amazon/common/step_pre_validate.go b/builder/amazon/common/step_pre_validate.go index 5eb263eca..bbeacea43 100644 --- a/builder/amazon/common/step_pre_validate.go +++ b/builder/amazon/common/step_pre_validate.go @@ -13,12 +13,18 @@ import ( // the build before actually doing any time consuming work // type StepPreValidate struct { - DestAmiName string + DestAmiName string + ForceDeregister bool } func (s *StepPreValidate) Run(state multistep.StateBag) multistep.StepAction { - ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) + if s.ForceDeregister { + ui.Say("Force Deregister flag found, skipping prevalidating AMI Name") + return multistep.ActionContinue + } + + ec2conn := state.Get("ec2").(*ec2.EC2) ui.Say("Prevalidating AMI Name...") resp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index c689accee..356c91e2b 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -79,7 +79,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe // Build the steps steps := []multistep.Step{ &awscommon.StepPreValidate{ - DestAmiName: b.config.AMIName, + DestAmiName: b.config.AMIName, + ForceDeregister: b.config.AMIForceDeregister, }, &awscommon.StepSourceAMIInfo{ SourceAmi: b.config.SourceAmi, @@ -122,6 +123,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &stepStopInstance{SpotPrice: b.config.SpotPrice}, // TODO(mitchellh): verify works with spots &stepModifyInstance{}, + &awscommon.StepDeregisterAMI{ + ForceDeregister: b.config.AMIForceDeregister, + AMIName: b.config.AMIName, + }, &stepCreateAMI{}, &awscommon.StepAMIRegionCopy{ AccessConfig: &b.config.AccessConfig, diff --git a/builder/amazon/ebs/builder_acc_test.go b/builder/amazon/ebs/builder_acc_test.go index b70f7f7b9..1b4de70ce 100644 --- a/builder/amazon/ebs/builder_acc_test.go +++ b/builder/amazon/ebs/builder_acc_test.go @@ -28,6 +28,22 @@ func TestBuilderAcc_regionCopy(t *testing.T) { }) } +func TestBuilderAcc_forceDeregister(t *testing.T) { + // Build the same AMI name twice, with force_deregister on the second run + builderT.Test(t, builderT.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Builder: &Builder{}, + Template: buildForceDeregisterConfig("false", "dereg"), + SkipArtifactTeardown: true, + }) + + builderT.Test(t, builderT.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Builder: &Builder{}, + Template: buildForceDeregisterConfig("true", "dereg"), + }) +} + func checkRegionCopy(regions []string) builderT.TestCheckFunc { return func(artifacts []packer.Artifact) error { if len(artifacts) > 1 { @@ -107,3 +123,21 @@ const testBuilderAccRegionCopy = ` }] } ` + +const testBuilderAccForceDeregister = ` +{ + "builders": [{ + "type": "test", + "region": "us-east-1", + "instance_type": "m3.medium", + "source_ami": "ami-76b2a71e", + "ssh_username": "ubuntu", + "force_deregister": "%s", + "ami_name": "packer-test-%s" + }] +} +` + +func buildForceDeregisterConfig(name, flag string) string { + return fmt.Sprintf(testBuilderAccForceDeregister, name, flag) +} diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 91355b913..385544d61 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -167,6 +167,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe // Build the steps steps := []multistep.Step{ + &awscommon.StepPreValidate{ + DestAmiName: b.config.AMIName, + ForceDeregister: b.config.AMIForceDeregister, + }, &awscommon.StepSourceAMIInfo{ SourceAmi: b.config.SourceAmi, EnhancedNetworking: b.config.AMIEnhancedNetworking, @@ -211,6 +215,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &StepUploadBundle{ Debug: b.config.PackerDebug, }, + &awscommon.StepDeregisterAMI{ + ForceDeregister: b.config.AMIForceDeregister, + AMIName: b.config.AMIName, + }, &StepRegisterAMI{}, &awscommon.StepAMIRegionCopy{ AccessConfig: &b.config.AccessConfig, diff --git a/helper/builder/testing/testing.go b/helper/builder/testing/testing.go index 29200e108..522d7a265 100644 --- a/helper/builder/testing/testing.go +++ b/helper/builder/testing/testing.go @@ -41,6 +41,10 @@ type TestCase struct { // in the case that the test can't guarantee all resources were // properly cleaned up. Teardown TestTeardownFunc + + // If SkipArtifactTeardown is true, we will not attempt to destroy the + // artifact created in this test run. + SkipArtifactTeardown bool } // TestCheckFunc is the callback used for Check in TestStep. @@ -163,12 +167,14 @@ func Test(t TestT, c TestCase) { } TEARDOWN: - // Delete all artifacts - for _, a := range artifacts { - if err := a.Destroy(); err != nil { - t.Error(fmt.Sprintf( - "!!! ERROR REMOVING ARTIFACT '%s': %s !!!", - a.String(), err)) + if !c.SkipArtifactTeardown { + // Delete all artifacts + for _, a := range artifacts { + if err := a.Destroy(); err != nil { + t.Error(fmt.Sprintf( + "!!! ERROR REMOVING ARTIFACT '%s': %s !!!", + a.String(), err)) + } } } From 2c683c50573982d2bb0cb3be745b9f52140b2010 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 14:28:27 -0400 Subject: [PATCH 323/956] builder/openstack: prioritize ssh interfaces --- builder/openstack/ssh.go | 82 ++++++++++++++++++++++++---------------- 1 file changed, 49 insertions(+), 33 deletions(-) diff --git a/builder/openstack/ssh.go b/builder/openstack/ssh.go index 519ccd406..65e057084 100644 --- a/builder/openstack/ssh.go +++ b/builder/openstack/ssh.go @@ -21,6 +21,13 @@ func SSHAddress( return func(state multistep.StateBag) (string, error) { s := state.Get("server").(*servers.Server) + // If we have a specific interface, try that + if sshinterface != "" { + if addr := sshAddrFromPool(s, sshinterface, port); addr != "" { + return addr, nil + } + } + // If we have a floating IP, use that ip := state.Get("access_ip").(*floatingip.FloatingIP) if ip != nil && ip.IP != "" { @@ -31,39 +38,9 @@ func SSHAddress( return fmt.Sprintf("%s:%d", s.AccessIPv4, port), nil } - // Get all the addresses associated with this server. This - // was taken directly from Terraform. - for pool, networkAddresses := range s.Addresses { - // If we have an SSH interface specified, skip it if no match - if sshinterface != "" && pool != sshinterface { - log.Printf( - "[INFO] Skipping pool %s, doesn't match requested %s", - pool, sshinterface) - continue - } - - elements, ok := networkAddresses.([]interface{}) - if !ok { - log.Printf( - "[ERROR] Unknown return type for address field: %#v", - networkAddresses) - continue - } - - for _, element := range elements { - var addr string - address := element.(map[string]interface{}) - if address["OS-EXT-IPS:type"] == "floating" { - addr = address["addr"].(string) - } else { - if address["version"].(float64) == 4 { - addr = address["addr"].(string) - } - } - if addr != "" { - return fmt.Sprintf("%s:%d", addr, port), nil - } - } + // Try to get it from the requested interface + if addr := sshAddrFromPool(s, sshinterface, port); addr != "" { + return addr, nil } s, err := servers.Get(client, s.ID).Extract() @@ -98,3 +75,42 @@ func SSHConfig(username string) func(multistep.StateBag) (*ssh.ClientConfig, err }, nil } } + +func sshAddrFromPool(s *servers.Server, desired string, port int) string { + // Get all the addresses associated with this server. This + // was taken directly from Terraform. + for pool, networkAddresses := range s.Addresses { + // If we have an SSH interface specified, skip it if no match + if desired != "" && pool != desired { + log.Printf( + "[INFO] Skipping pool %s, doesn't match requested %s", + pool, desired) + continue + } + + elements, ok := networkAddresses.([]interface{}) + if !ok { + log.Printf( + "[ERROR] Unknown return type for address field: %#v", + networkAddresses) + continue + } + + for _, element := range elements { + var addr string + address := element.(map[string]interface{}) + if address["OS-EXT-IPS:type"] == "floating" { + addr = address["addr"].(string) + } else { + if address["version"].(float64) == 4 { + addr = address["addr"].(string) + } + } + if addr != "" { + return fmt.Sprintf("%s:%d", addr, port) + } + } + } + + return "" +} From a3863c3495db25eed06da1a2a5741f31dd9c8935 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 15:23:05 -0400 Subject: [PATCH 324/956] builder/openstack: update floating IP messaging --- builder/openstack/step_allocate_ip.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/builder/openstack/step_allocate_ip.go b/builder/openstack/step_allocate_ip.go index 0ab9b3529..fc386082b 100644 --- a/builder/openstack/step_allocate_ip.go +++ b/builder/openstack/step_allocate_ip.go @@ -36,6 +36,8 @@ func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction { if s.FloatingIp != "" { instanceIp.IP = s.FloatingIp } else if s.FloatingIpPool != "" { + ui.Say(fmt.Sprintf("Creating floating IP...")) + ui.Message(fmt.Sprintf("Pool: %s", s.FloatingIpPool)) newIp, err := floatingip.Create(client, floatingip.CreateOpts{ Pool: s.FloatingIpPool, }).Extract() @@ -47,22 +49,24 @@ func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction { } instanceIp = *newIp - ui.Say(fmt.Sprintf("Created temporary floating IP %s...", instanceIp.IP)) + ui.Message(fmt.Sprintf("Created floating IP: %s", instanceIp.IP)) } if instanceIp.IP != "" { + ui.Say(fmt.Sprintf("Associating floating IP with server...")) + ui.Message(fmt.Sprintf("IP: %s", instanceIp.IP)) err := floatingip.Associate(client, server.ID, instanceIp.IP).ExtractErr() if err != nil { err := fmt.Errorf( - "Error associating floating IP %s with instance.", - instanceIp.IP) + "Error associating floating IP %s with instance: %s", + instanceIp.IP, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - ui.Say(fmt.Sprintf( - "Added floating IP %s to instance...", instanceIp.IP)) + ui.Message(fmt.Sprintf( + "Added floating IP %s to instance!", instanceIp.IP)) } state.Put("access_ip", &instanceIp) From f398352996bc5e696ee17c47460099c3fcbd0ffb Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Fri, 12 Jun 2015 14:00:59 -0700 Subject: [PATCH 325/956] Fix a bug where interpolation was broken in some builders --- builder/googlecompute/config.go | 2 +- builder/null/config.go | 2 +- builder/parallels/pvm/config.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go index 743e4745c..69d223a75 100644 --- a/builder/googlecompute/config.go +++ b/builder/googlecompute/config.go @@ -47,7 +47,7 @@ type Config struct { func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) - err := config.Decode(&c, &config.DecodeOpts{ + err := config.Decode(c, &config.DecodeOpts{ Interpolate: true, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ diff --git a/builder/null/config.go b/builder/null/config.go index 7665aec51..9ccc32282 100644 --- a/builder/null/config.go +++ b/builder/null/config.go @@ -21,7 +21,7 @@ type Config struct { func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) - err := config.Decode(&c, &config.DecodeOpts{ + err := config.Decode(c, &config.DecodeOpts{ Interpolate: true, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ diff --git a/builder/parallels/pvm/config.go b/builder/parallels/pvm/config.go index c3fab4446..8f6d9a915 100644 --- a/builder/parallels/pvm/config.go +++ b/builder/parallels/pvm/config.go @@ -33,7 +33,7 @@ type Config struct { func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) - err := config.Decode(&c, &config.DecodeOpts{ + err := config.Decode(c, &config.DecodeOpts{ Interpolate: true, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ From 16320372d64adacf1c172af0535fe56114d3b611 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Fri, 12 Jun 2015 14:02:09 -0700 Subject: [PATCH 326/956] Make some builder config usage more consistent with other builders --- builder/digitalocean/config.go | 6 +++--- builder/docker/config.go | 6 +++--- builder/virtualbox/ovf/config.go | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/builder/digitalocean/config.go b/builder/digitalocean/config.go index a19dabda2..178b54049 100644 --- a/builder/digitalocean/config.go +++ b/builder/digitalocean/config.go @@ -42,10 +42,10 @@ type Config struct { } func NewConfig(raws ...interface{}) (*Config, []string, error) { - var c Config + c := new(Config) var md mapstructure.Metadata - err := config.Decode(&c, &config.DecodeOpts{ + err := config.Decode(c, &config.DecodeOpts{ Metadata: &md, Interpolate: true, InterpolateFilter: &interpolate.RenderFilter{ @@ -142,5 +142,5 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { } common.ScrubConfig(c, c.APIToken) - return &c, nil, nil + return c, nil, nil } diff --git a/builder/docker/config.go b/builder/docker/config.go index 024b915af..d5801c8ba 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -31,10 +31,10 @@ type Config struct { } func NewConfig(raws ...interface{}) (*Config, []string, error) { - var c Config + c := new(Config) var md mapstructure.Metadata - err := config.Decode(&c, &config.DecodeOpts{ + err := config.Decode(c, &config.DecodeOpts{ Metadata: &md, Interpolate: true, InterpolateFilter: &interpolate.RenderFilter{ @@ -91,5 +91,5 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { return nil, nil, errs } - return &c, nil, nil + return c, nil, nil } diff --git a/builder/virtualbox/ovf/config.go b/builder/virtualbox/ovf/config.go index de60bcc7b..837c3f37a 100644 --- a/builder/virtualbox/ovf/config.go +++ b/builder/virtualbox/ovf/config.go @@ -40,8 +40,8 @@ type Config struct { } func NewConfig(raws ...interface{}) (*Config, []string, error) { - var c Config - err := config.Decode(&c, &config.DecodeOpts{ + c := new(Config) + err := config.Decode(c, &config.DecodeOpts{ Interpolate: true, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ @@ -132,5 +132,5 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { c.ImportFlags = append(c.ImportFlags, "--options", c.ImportOpts) } - return &c, warnings, nil + return c, warnings, nil } From 8b4f980123bbb09d0a4a0b404d41070db17c8fe4 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Fri, 12 Jun 2015 15:00:53 -0700 Subject: [PATCH 327/956] website: fix missing comma in parallels-pvm example --- website/source/docs/builders/parallels-pvm.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/parallels-pvm.html.markdown b/website/source/docs/builders/parallels-pvm.html.markdown index 355e325c5..434bda8e7 100644 --- a/website/source/docs/builders/parallels-pvm.html.markdown +++ b/website/source/docs/builders/parallels-pvm.html.markdown @@ -27,7 +27,7 @@ the settings here. ```javascript { "type": "parallels-pvm", - "parallels_tools_flavor": "lin" + "parallels_tools_flavor": "lin", "source_path": "source.pvm", "ssh_username": "packer", "ssh_password": "packer", From 48b674d331c22d1dc75ea80fb8605acf5a3c3a7c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 18:04:46 -0400 Subject: [PATCH 328/956] builder/openstack: load extensions, stop server if supported --- builder/openstack/builder.go | 2 + builder/openstack/step_load_extensions.go | 58 +++++++++++++++++++++++ builder/openstack/step_stop_server.go | 58 +++++++++++++++++++++++ 3 files changed, 118 insertions(+) create mode 100644 builder/openstack/step_load_extensions.go create mode 100644 builder/openstack/step_stop_server.go diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index ab60afc0e..2256ad80e 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -67,6 +67,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe // Build the steps steps := []multistep.Step{ + &StepLoadExtensions{}, &StepLoadFlavor{ Flavor: b.config.Flavor, }, @@ -94,6 +95,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe SSHWaitTimeout: b.config.SSHTimeout(), }, &common.StepProvision{}, + &StepStopServer{}, &stepCreateImage{}, } diff --git a/builder/openstack/step_load_extensions.go b/builder/openstack/step_load_extensions.go new file mode 100644 index 000000000..095863612 --- /dev/null +++ b/builder/openstack/step_load_extensions.go @@ -0,0 +1,58 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions" + "github.com/rackspace/gophercloud/pagination" +) + +// StepLoadExtensions gets the FlavorRef from a Flavor. It first assumes +// that the Flavor is a ref and verifies it. Otherwise, it tries to find +// the flavor by name. +type StepLoadExtensions struct{} + +func (s *StepLoadExtensions) Run(state multistep.StateBag) multistep.StepAction { + config := state.Get("config").(Config) + ui := state.Get("ui").(packer.Ui) + + // We need the v2 compute client + client, err := config.computeV2Client() + if err != nil { + err = fmt.Errorf("Error initializing compute client: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + + ui.Say("Discovering enabled extensions...") + result := make(map[string]struct{}, 15) + pager := extensions.List(client) + err = pager.EachPage(func(p pagination.Page) (bool, error) { + // Extract the extensions from this page + exts, err := extensions.ExtractExtensions(p) + if err != nil { + return false, err + } + + for _, ext := range exts { + log.Printf("[DEBUG] Discovered extension: %s", ext.Alias) + result[ext.Alias] = struct{}{} + } + + return true, nil + }) + if err != nil { + err = fmt.Errorf("Error loading extensions: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + + state.Put("extensions", result) + return multistep.ActionContinue +} + +func (s *StepLoadExtensions) Cleanup(state multistep.StateBag) { +} diff --git a/builder/openstack/step_stop_server.go b/builder/openstack/step_stop_server.go new file mode 100644 index 000000000..9b83fd89b --- /dev/null +++ b/builder/openstack/step_stop_server.go @@ -0,0 +1,58 @@ +package openstack + +import ( + "fmt" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" +) + +type StepStopServer struct{} + +func (s *StepStopServer) Run(state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + config := state.Get("config").(Config) + extensions := state.Get("extensions").(map[string]struct{}) + server := state.Get("server").(*servers.Server) + + // Verify we have the extension + if _, ok := extensions["os-server-start-stop"]; !ok { + ui.Say("OpenStack cluster doesn't support stop, skipping...") + return multistep.ActionContinue + } + + // We need the v2 compute client + client, err := config.computeV2Client() + if err != nil { + err = fmt.Errorf("Error initializing compute client: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + + ui.Say("Stopping server...") + if err := startstop.Stop(client, server.ID).ExtractErr(); err != nil { + err = fmt.Errorf("Error stopping server: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + + ui.Message("Waiting for server to stop...") + stateChange := StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: "STOPPED", + Refresh: ServerStateRefreshFunc(client, server), + StepState: state, + } + if _, err := WaitForState(&stateChange); err != nil { + err := fmt.Errorf("Error waiting for server (%s) to stop: %s", server.ID, err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (s *StepStopServer) Cleanup(state multistep.StateBag) {} From 64fd3a3302c04868065edd5a874010a53e28f7a2 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 12 Jun 2015 17:24:03 -0700 Subject: [PATCH 329/956] Added file builder as a cheap, fast way to build something with output for testing post-processors --- builder/file/artifact.go | 36 ++++++++++++++++++++++++ builder/file/artifact_test.go | 11 ++++++++ builder/file/builder.go | 53 +++++++++++++++++++++++++++++++++++ builder/file/builder_test.go | 11 ++++++++ builder/file/config.go | 48 +++++++++++++++++++++++++++++++ builder/file/config_test.go | 35 +++++++++++++++++++++++ 6 files changed, 194 insertions(+) create mode 100644 builder/file/artifact.go create mode 100644 builder/file/artifact_test.go create mode 100644 builder/file/builder.go create mode 100644 builder/file/builder_test.go create mode 100644 builder/file/config.go create mode 100644 builder/file/config_test.go diff --git a/builder/file/artifact.go b/builder/file/artifact.go new file mode 100644 index 000000000..35bf06e6c --- /dev/null +++ b/builder/file/artifact.go @@ -0,0 +1,36 @@ +package file + +import ( + "fmt" + "log" + "os" +) + +type FileArtifact struct { + filename string +} + +func (*FileArtifact) BuilderId() string { + return BuilderId +} + +func (a *FileArtifact) Files() []string { + return []string{a.filename} +} + +func (a *FileArtifact) Id() string { + return "File" +} + +func (a *FileArtifact) String() string { + return fmt.Sprintf("Stored file: %s", a.filename) +} + +func (a *FileArtifact) State(name string) interface{} { + return nil +} + +func (a *FileArtifact) Destroy() error { + log.Printf("Deleting %s", a.filename) + return os.Remove(a.filename) +} diff --git a/builder/file/artifact_test.go b/builder/file/artifact_test.go new file mode 100644 index 000000000..0aa77894b --- /dev/null +++ b/builder/file/artifact_test.go @@ -0,0 +1,11 @@ +package file + +import ( + "testing" + + "github.com/mitchellh/packer/packer" +) + +func TestNullArtifact(t *testing.T) { + var _ packer.Artifact = new(FileArtifact) +} diff --git a/builder/file/builder.go b/builder/file/builder.go new file mode 100644 index 000000000..89047ab75 --- /dev/null +++ b/builder/file/builder.go @@ -0,0 +1,53 @@ +package file + +/* +The File builder creates an artifact from a file. Because it does not require +any virutalization or network resources, it's very fast and useful for testing. +*/ + +import ( + "io/ioutil" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" +) + +const BuilderId = "cbednarski.file" + +type Builder struct { + config *Config + runner multistep.Runner +} + +// Prepare is responsible for configuring the builder and validating +// that configuration. Any setup should be done in this method. Note that +// NO side effects should take place in prepare, it is meant as a state +// setup only. Calling Prepare is not necessarilly followed by a Run. +// +// The parameters to Prepare are a set of interface{} values of the +// configuration. These are almost always `map[string]interface{}` +// parsed from a template, but no guarantee is made. +// +// Each of the configuration values should merge into the final +// configuration. +// +// Prepare should return a list of warnings along with any errors +// that occured while preparing. +func (b *Builder) Prepare(...interface{}) ([]string, error) { + return nil, nil +} + +// Run is where the actual build should take place. It takes a Build and a Ui. +func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { + artifact := new(FileArtifact) + + ioutil.WriteFile(b.config.Filename, []byte(b.config.Contents), 0600) + + return artifact, nil +} + +// Cancel cancels a possibly running Builder. This should block until +// the builder actually cancels and cleans up after itself. +func (b *Builder) Cancel() { + b.runner.Cancel() +} diff --git a/builder/file/builder_test.go b/builder/file/builder_test.go new file mode 100644 index 000000000..63d36a0a5 --- /dev/null +++ b/builder/file/builder_test.go @@ -0,0 +1,11 @@ +package file + +import ( + "testing" + + "github.com/mitchellh/packer/packer" +) + +func TestBuilder_implBuilder(t *testing.T) { + var _ packer.Builder = new(Builder) +} diff --git a/builder/file/config.go b/builder/file/config.go new file mode 100644 index 000000000..534428ca4 --- /dev/null +++ b/builder/file/config.go @@ -0,0 +1,48 @@ +package file + +import ( + "fmt" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" +) + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + + Filename string `mapstructure:"filename"` + Contents string `mapstructure:"contents"` +} + +func NewConfig(raws ...interface{}) (*Config, []string, error) { + c := new(Config) + warnings := []string{} + + err := config.Decode(c, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) + if err != nil { + return nil, warnings, err + } + + var errs *packer.MultiError + + if c.Filename == "" { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("filename is required")) + } + + if c.Contents == "" { + warnings = append(warnings, "contents is empty") + } + + if errs != nil && len(errs.Errors) > 0 { + return nil, warnings, errs + } + + return c, warnings, nil +} diff --git a/builder/file/config_test.go b/builder/file/config_test.go new file mode 100644 index 000000000..061bb97e5 --- /dev/null +++ b/builder/file/config_test.go @@ -0,0 +1,35 @@ +package file + +import ( + "fmt" + "testing" +) + +func testConfig() map[string]interface{} { + return map[string]interface{}{ + "filename": "test.txt", + "contents": "Hello, world!", + } +} + +func TestNoFilename(t *testing.T) { + raw := testConfig() + + delete(raw, "filename") + _, _, errs := NewConfig(raw) + if errs == nil { + t.Error("Expected config to error without a filename") + } +} + +func TestNoContent(t *testing.T) { + raw := testConfig() + + delete(raw, "contents") + _, warns, _ := NewConfig(raw) + fmt.Println(len(warns)) + fmt.Printf("%#v\n", warns) + if len(warns) == 0 { + t.Error("Expected config to warn without any content") + } +} From 53e4688529c75c632cafe8c43a5c0783b2c2dd1e Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 12 Jun 2015 17:25:09 -0700 Subject: [PATCH 330/956] Renamed some things to be more consistent with existing modules --- post-processor/compress/artifact.go | 25 ++++-- post-processor/compress/post-processor.go | 103 ++++++++++++---------- 2 files changed, 71 insertions(+), 57 deletions(-) diff --git a/post-processor/compress/artifact.go b/post-processor/compress/artifact.go index 054d501d1..cfc914a55 100644 --- a/post-processor/compress/artifact.go +++ b/post-processor/compress/artifact.go @@ -8,25 +8,32 @@ import ( const BuilderId = "packer.post-processor.compress" type Artifact struct { - builderId string - dir string - f []string + Path string + Provider string + files []string +} + +func NewArtifact(provider, path string) *Artifact { + return &Artifact{ + Path: path, + Provider: provider, + } } func (a *Artifact) BuilderId() string { return BuilderId } -func (a *Artifact) Files() []string { - return a.f +func (*Artifact) Id() string { + return "" } -func (*Artifact) Id() string { - return "COMPRESS" +func (a *Artifact) Files() []string { + return a.files } func (a *Artifact) String() string { - return fmt.Sprintf("VM compressed files in directory: %s", a.dir) + return fmt.Sprintf("'%s' compressing: %s", a.Provider, a.Path) } func (*Artifact) State(name string) interface{} { @@ -34,5 +41,5 @@ func (*Artifact) State(name string) interface{} { } func (a *Artifact) Destroy() error { - return os.RemoveAll(a.dir) + return os.Remove(a.Path) } diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 6d28e7c0e..340a75dd4 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -46,47 +46,45 @@ type Config struct { } type PostProcessor struct { - cfg Config + config Config } func (p *PostProcessor) Configure(raws ...interface{}) error { - p.cfg.Compression = -1 - err := config.Decode(&p.cfg, &config.DecodeOpts{ + p.config.Compression = -1 + err := config.Decode(&p.config, &config.DecodeOpts{ Interpolate: true, InterpolateFilter: &interpolate.RenderFilter{ - Exclude: []string{ - // TODO figure out if something needs to go here. - }, + Exclude: []string{}, }, }, raws...) errs := new(packer.MultiError) - if p.cfg.OutputPath == "" { - p.cfg.OutputPath = "packer_{{.BuildName}}_{{.Provider}}" + if p.config.OutputPath == "" { + p.config.OutputPath = "packer_{{.BuildName}}_{{.Provider}}" } - if err = interpolate.Validate(p.cfg.OutputPath, p.cfg.ctx); err != nil { + if err = interpolate.Validate(p.config.OutputPath, p.config.ctx); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing target template: %s", err)) } templates := map[string]*string{ - "output": &p.cfg.OutputPath, + "output": &p.config.OutputPath, } - if p.cfg.Compression > flate.BestCompression { - p.cfg.Compression = flate.BestCompression + if p.config.Compression > flate.BestCompression { + p.config.Compression = flate.BestCompression } - if p.cfg.Compression == -1 { - p.cfg.Compression = flate.DefaultCompression + if p.config.Compression == -1 { + p.config.Compression = flate.DefaultCompression } - if p.cfg.NumCPU < 1 { - p.cfg.NumCPU = runtime.NumCPU() + if p.config.NumCPU < 1 { + p.config.NumCPU = runtime.NumCPU() } - runtime.GOMAXPROCS(p.cfg.NumCPU) + runtime.GOMAXPROCS(p.config.NumCPU) for key, ptr := range templates { if *ptr == "" { @@ -94,7 +92,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { errs, fmt.Errorf("%s must be set", key)) } - *ptr, err = interpolate.Render(p.cfg.OutputPath, p.cfg.ctx) + *ptr, err = interpolate.Render(p.config.OutputPath, p.config.ctx) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", key, err)) @@ -114,7 +112,7 @@ func (p *PostProcessor) fillMetadata(metadata Metadata, files []string) Metadata const layout = "2006-01-02_15-04-05" t := time.Now() - if !p.cfg.Metadata { + if !p.config.Metadata { return metadata } for _, f := range files { @@ -122,7 +120,7 @@ func (p *PostProcessor) fillMetadata(metadata Metadata, files []string) Metadata continue } else { if i, ok := metadata[filepath.Base(f)]; !ok { - metadata[filepath.Base(f)] = Metaitem{CompType: p.cfg.Format, OrigSize: fi.Size(), CompDate: t.Format(layout)} + metadata[filepath.Base(f)] = Metaitem{CompType: p.config.Format, OrigSize: fi.Size(), CompDate: t.Format(layout)} } else { i.CompSize = fi.Size() i.CompDate = t.Format(layout) @@ -134,46 +132,55 @@ func (p *PostProcessor) fillMetadata(metadata Metadata, files []string) Metadata } func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { - newartifact := &Artifact{builderId: artifact.BuilderId(), dir: p.cfg.OutputPath} - metafile := filepath.Join(p.cfg.OutputPath, "metadata") + newartifact := &Artifact{Path: p.config.OutputPath} + metafile := filepath.Join(p.config.OutputPath, "metadata") - _, err := os.Stat(newartifact.dir) + ui.Say(fmt.Sprintf("[CBEDNARSKI] Creating archive at %s", newartifact.Path)) + _, err := os.Stat(newartifact.Path) if err == nil { - return nil, false, fmt.Errorf("output dir must not exists: %s", err) + return nil, false, fmt.Errorf("output dir %s must not exists", newartifact.Path) } - err = os.MkdirAll(newartifact.dir, 0755) + err = os.MkdirAll(newartifact.Path, 0755) if err != nil { return nil, false, fmt.Errorf("failed to create output: %s", err) } - formats := strings.Split(p.cfg.Format, ".") + p.config.Format += "tar.gzip" + formats := strings.Split(p.config.Format, ".") + ui.Say(fmt.Sprintf("[CBEDNARSKI] Formats length %d", len(formats))) + if len(p.config.Format) == 0 { + ui.Say("[CBEDNARSKI] Formats is empty") + formats[0] = "tar.gzip" + } files := artifact.Files() metadata := make(Metadata, 0) metadata = p.fillMetadata(metadata, files) + ui.Say(fmt.Sprintf("[CBEDNARSKI] Formats %#v", formats)) + for _, compress := range formats { switch compress { case "tar": - files, err = p.cmpTAR(files, filepath.Join(p.cfg.OutputPath, p.cfg.OutputFile)) + files, err = p.cmpTAR(files, filepath.Join(p.config.OutputPath, p.config.OutputFile)) metadata = p.fillMetadata(metadata, files) case "zip": - files, err = p.cmpZIP(files, filepath.Join(p.cfg.OutputPath, p.cfg.OutputFile)) + files, err = p.cmpZIP(files, filepath.Join(p.config.OutputPath, p.config.OutputFile)) metadata = p.fillMetadata(metadata, files) case "pgzip": - files, err = p.cmpPGZIP(files, p.cfg.OutputPath) + files, err = p.cmpPGZIP(files, p.config.OutputPath) metadata = p.fillMetadata(metadata, files) case "gzip": - files, err = p.cmpGZIP(files, p.cfg.OutputPath) + files, err = p.cmpGZIP(files, p.config.OutputPath) metadata = p.fillMetadata(metadata, files) case "bgzf": - files, err = p.cmpBGZF(files, p.cfg.OutputPath) + files, err = p.cmpBGZF(files, p.config.OutputPath) metadata = p.fillMetadata(metadata, files) case "lz4": - files, err = p.cmpLZ4(files, p.cfg.OutputPath) + files, err = p.cmpLZ4(files, p.config.OutputPath) metadata = p.fillMetadata(metadata, files) case "e2fs": - files, err = p.cmpE2FS(files, filepath.Join(p.cfg.OutputPath, p.cfg.OutputFile)) + files, err = p.cmpE2FS(files, filepath.Join(p.config.OutputPath, p.config.OutputFile)) metadata = p.fillMetadata(metadata, files) } if err != nil { @@ -181,7 +188,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac } } - if p.cfg.Metadata { + if p.config.Metadata { fp, err := os.Create(metafile) if err != nil { return nil, false, err @@ -198,18 +205,18 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac } } - newartifact.f = append(newartifact.f, files...) - if p.cfg.Metadata { - newartifact.f = append(newartifact.f, metafile) + newartifact.files = append(newartifact.files, files...) + if p.config.Metadata { + newartifact.files = append(newartifact.files, metafile) } - return newartifact, p.cfg.KeepInputArtifact, nil + return newartifact, p.config.KeepInputArtifact, nil } func (p *PostProcessor) cmpTAR(src []string, dst string) ([]string, error) { fw, err := os.Create(dst) if err != nil { - return nil, fmt.Errorf("tar error: %s", err) + return nil, fmt.Errorf("tar error creating tar %s: %s", dst, err) } defer fw.Close() @@ -219,27 +226,27 @@ func (p *PostProcessor) cmpTAR(src []string, dst string) ([]string, error) { for _, name := range src { fi, err := os.Stat(name) if err != nil { - return nil, fmt.Errorf("tar error: %s", err) + return nil, fmt.Errorf("tar error on stat of %s: %s", name, err) } target, _ := os.Readlink(name) header, err := tar.FileInfoHeader(fi, target) if err != nil { - return nil, fmt.Errorf("tar error: %s", err) + return nil, fmt.Errorf("tar error reading info for %s: %s", name, err) } if err = tw.WriteHeader(header); err != nil { - return nil, fmt.Errorf("tar error: %s", err) + return nil, fmt.Errorf("tar error writing header for %s: %s", name, err) } fr, err := os.Open(name) if err != nil { - return nil, fmt.Errorf("tar error: %s", err) + return nil, fmt.Errorf("tar error opening file %s: %s", name, err) } if _, err = io.Copy(tw, fr); err != nil { fr.Close() - return nil, fmt.Errorf("tar error: %s", err) + return nil, fmt.Errorf("tar error copying contents of %s: %s", name, err) } fr.Close() } @@ -254,7 +261,7 @@ func (p *PostProcessor) cmpGZIP(src []string, dst string) ([]string, error) { if err != nil { return nil, fmt.Errorf("gzip error: %s", err) } - cw, err := gzip.NewWriterLevel(fw, p.cfg.Compression) + cw, err := gzip.NewWriterLevel(fw, p.config.Compression) if err != nil { fw.Close() return nil, fmt.Errorf("gzip error: %s", err) @@ -287,7 +294,7 @@ func (p *PostProcessor) cmpPGZIP(src []string, dst string) ([]string, error) { if err != nil { return nil, fmt.Errorf("pgzip error: %s", err) } - cw, err := pgzip.NewWriterLevel(fw, p.cfg.Compression) + cw, err := pgzip.NewWriterLevel(fw, p.config.Compression) if err != nil { fw.Close() return nil, fmt.Errorf("pgzip error: %s", err) @@ -325,7 +332,7 @@ func (p *PostProcessor) cmpLZ4(src []string, dst string) ([]string, error) { fw.Close() return nil, fmt.Errorf("lz4 error: %s", err) } - if p.cfg.Compression > flate.DefaultCompression { + if p.config.Compression > flate.DefaultCompression { cw.Header.HighCompression = true } fr, err := os.Open(name) @@ -357,7 +364,7 @@ func (p *PostProcessor) cmpBGZF(src []string, dst string) ([]string, error) { return nil, fmt.Errorf("bgzf error: %s", err) } - cw, err := bgzf.NewWriterLevel(fw, p.cfg.Compression, runtime.NumCPU()) + cw, err := bgzf.NewWriterLevel(fw, p.config.Compression, runtime.NumCPU()) if err != nil { return nil, fmt.Errorf("bgzf error: %s", err) } From 766d217ed71f511b12a30e13efa829bcf3b05b23 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 12 Jun 2015 17:34:46 -0700 Subject: [PATCH 331/956] Pull config into the builder --- builder/file/builder.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/builder/file/builder.go b/builder/file/builder.go index 89047ab75..3b00aae60 100644 --- a/builder/file/builder.go +++ b/builder/file/builder.go @@ -33,8 +33,14 @@ type Builder struct { // // Prepare should return a list of warnings along with any errors // that occured while preparing. -func (b *Builder) Prepare(...interface{}) ([]string, error) { - return nil, nil +func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { + c, warnings, errs := NewConfig(raws...) + if errs != nil { + return warnings, errs + } + b.config = c + + return warnings, nil } // Run is where the actual build should take place. It takes a Build and a Ui. From f7d85eb49cf169f1dfdf66d20320032ff370fff6 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 12 Jun 2015 17:35:17 -0700 Subject: [PATCH 332/956] Add main() for file builder --- plugin/builder-file/main.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 plugin/builder-file/main.go diff --git a/plugin/builder-file/main.go b/plugin/builder-file/main.go new file mode 100644 index 000000000..54bc4f437 --- /dev/null +++ b/plugin/builder-file/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "github.com/mitchellh/packer/builder/file" + "github.com/mitchellh/packer/packer/plugin" +) + +func main() { + server, err := plugin.Server() + if err != nil { + panic(err) + } + server.RegisterBuilder(new(file.Builder)) + server.Serve() +} From e60b22d48f8b557c620ff083a55263acedc19d55 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 12 Jun 2015 18:18:38 -0700 Subject: [PATCH 333/956] Changed file builder to support content or source file operation --- builder/file/builder.go | 33 ++++++++++++++++++++++++++++++++- builder/file/config.go | 20 ++++++++++++++------ builder/file/config_test.go | 22 +++++++++++++++++----- 3 files changed, 63 insertions(+), 12 deletions(-) diff --git a/builder/file/builder.go b/builder/file/builder.go index 3b00aae60..ea3206dad 100644 --- a/builder/file/builder.go +++ b/builder/file/builder.go @@ -6,7 +6,10 @@ any virutalization or network resources, it's very fast and useful for testing. */ import ( + "fmt" + "io" "io/ioutil" + "os" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" @@ -47,7 +50,35 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { artifact := new(FileArtifact) - ioutil.WriteFile(b.config.Filename, []byte(b.config.Contents), 0600) + if b.config.Source != "" { + source, err := os.Open(b.config.Source) + defer source.Close() + if err != nil { + return nil, err + } + + target, err := os.OpenFile(b.config.Target, os.O_WRONLY, 0600) + defer target.Close() + if err != nil { + return nil, err + } + + ui.Say(fmt.Sprintf("Copying %s to %s", source.Name(), target.Name())) + bytes, err := io.Copy(source, target) + if err != nil { + return nil, err + } + ui.Say(fmt.Sprintf("Copied %d bytes", bytes)) + artifact.filename = target.Name() + } else { + // We're going to write Contents; if it's empty we'll just create an + // empty file. + err := ioutil.WriteFile(b.config.Target, []byte(b.config.Content), 0600) + if err != nil { + return nil, err + } + artifact.filename = b.config.Target + } return artifact, nil } diff --git a/builder/file/config.go b/builder/file/config.go index 534428ca4..6702e6894 100644 --- a/builder/file/config.go +++ b/builder/file/config.go @@ -9,11 +9,15 @@ import ( "github.com/mitchellh/packer/template/interpolate" ) +var ErrTargetRequired = fmt.Errorf("target required") +var ErrContentSourceConflict = fmt.Errorf("Cannot specify source file AND content") + type Config struct { common.PackerConfig `mapstructure:",squash"` - Filename string `mapstructure:"filename"` - Contents string `mapstructure:"contents"` + Source string `mapstructure:"source"` + Target string `mapstructure:"target"` + Content string `mapstructure:"content"` } func NewConfig(raws ...interface{}) (*Config, []string, error) { @@ -32,12 +36,16 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { var errs *packer.MultiError - if c.Filename == "" { - errs = packer.MultiErrorAppend(errs, fmt.Errorf("filename is required")) + if c.Target == "" { + errs = packer.MultiErrorAppend(errs, ErrTargetRequired) } - if c.Contents == "" { - warnings = append(warnings, "contents is empty") + if c.Content == "" && c.Source == "" { + warnings = append(warnings, "Both source file and contents are blank; target will have no content") + } + + if c.Content != "" && c.Source != "" { + errs = packer.MultiErrorAppend(errs, ErrContentSourceConflict) } if errs != nil && len(errs.Errors) > 0 { diff --git a/builder/file/config_test.go b/builder/file/config_test.go index 061bb97e5..6d8039558 100644 --- a/builder/file/config_test.go +++ b/builder/file/config_test.go @@ -2,13 +2,24 @@ package file import ( "fmt" + "strings" "testing" ) func testConfig() map[string]interface{} { return map[string]interface{}{ - "filename": "test.txt", - "contents": "Hello, world!", + "source": "src.txt", + "target": "dst.txt", + "content": "Hello, world!", + } +} + +func TestContentSourceConflict(t *testing.T) { + raw := testConfig() + + _, _, errs := NewConfig(raw) + if !strings.Contains(errs.Error(), ErrContentSourceConflict.Error()) { + t.Errorf("Expected config error: %s", ErrContentSourceConflict.Error()) } } @@ -18,18 +29,19 @@ func TestNoFilename(t *testing.T) { delete(raw, "filename") _, _, errs := NewConfig(raw) if errs == nil { - t.Error("Expected config to error without a filename") + t.Errorf("Expected config error: %s", ErrTargetRequired.Error()) } } func TestNoContent(t *testing.T) { raw := testConfig() - delete(raw, "contents") + delete(raw, "content") + delete(raw, "source") _, warns, _ := NewConfig(raw) fmt.Println(len(warns)) fmt.Printf("%#v\n", warns) if len(warns) == 0 { - t.Error("Expected config to warn without any content") + t.Error("Expected config warning without any content") } } From b6d6a71c6e47b3e99e84c749372c11f227371df6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 22:50:59 -0400 Subject: [PATCH 334/956] builder/openstack: wait for more states --- builder/openstack/server.go | 8 +++++--- builder/openstack/step_run_source_server.go | 6 +++--- builder/openstack/step_stop_server.go | 2 +- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/builder/openstack/server.go b/builder/openstack/server.go index de8c9d103..482657c03 100644 --- a/builder/openstack/server.go +++ b/builder/openstack/server.go @@ -28,7 +28,7 @@ type StateChangeConf struct { Pending []string Refresh StateRefreshFunc StepState multistep.StateBag - Target string + Target []string } // ServerStateRefreshFunc returns a StateRefreshFunc that is used to watch @@ -65,8 +65,10 @@ func WaitForState(conf *StateChangeConf) (i interface{}, err error) { return } - if currentState == conf.Target { - return + for _, t := range conf.Target { + if currentState == t { + return + } } if conf.StepState != nil { diff --git a/builder/openstack/step_run_source_server.go b/builder/openstack/step_run_source_server.go index 4014f1d95..596348def 100644 --- a/builder/openstack/step_run_source_server.go +++ b/builder/openstack/step_run_source_server.go @@ -65,7 +65,7 @@ func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction ui.Say("Waiting for server to become ready...") stateChange := StateChangeConf{ Pending: []string{"BUILD"}, - Target: "ACTIVE", + Target: []string{"ACTIVE"}, Refresh: ServerStateRefreshFunc(computeClient, s.server), StepState: state, } @@ -105,9 +105,9 @@ func (s *StepRunSourceServer) Cleanup(state multistep.StateBag) { } stateChange := StateChangeConf{ - Pending: []string{"ACTIVE", "BUILD", "REBUILD", "SUSPENDED"}, + Pending: []string{"ACTIVE", "BUILD", "REBUILD", "SUSPENDED", "SHUTOFF", "STOPPED"}, Refresh: ServerStateRefreshFunc(computeClient, s.server), - Target: "DELETED", + Target: []string{"DELETED"}, } WaitForState(&stateChange) diff --git a/builder/openstack/step_stop_server.go b/builder/openstack/step_stop_server.go index 9b83fd89b..298d0bc0a 100644 --- a/builder/openstack/step_stop_server.go +++ b/builder/openstack/step_stop_server.go @@ -41,7 +41,7 @@ func (s *StepStopServer) Run(state multistep.StateBag) multistep.StepAction { ui.Message("Waiting for server to stop...") stateChange := StateChangeConf{ Pending: []string{"ACTIVE"}, - Target: "STOPPED", + Target: []string{"SHUTOFF", "STOPPED"}, Refresh: ServerStateRefreshFunc(client, server), StepState: state, } From b3a97124023b48a36d05d2aa8fc1fa0def0ad0c9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 12 Jun 2015 22:55:39 -0400 Subject: [PATCH 335/956] builder/openstack: support user data [GH-1867] --- builder/openstack/builder.go | 2 ++ builder/openstack/run_config.go | 2 ++ builder/openstack/step_run_source_server.go | 14 ++++++++++++++ 3 files changed, 18 insertions(+) diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index ab60afc0e..cf1cfc8c5 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -80,6 +80,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe SecurityGroups: b.config.SecurityGroups, Networks: b.config.Networks, AvailabilityZone: b.config.AvailabilityZone, + UserData: b.config.UserData, + UserDataFile: b.config.UserDataFile, }, &StepWaitForRackConnect{ Wait: b.config.RackconnectWait, diff --git a/builder/openstack/run_config.go b/builder/openstack/run_config.go index 4a6a1b81f..ca0360a10 100644 --- a/builder/openstack/run_config.go +++ b/builder/openstack/run_config.go @@ -23,6 +23,8 @@ type RunConfig struct { FloatingIp string `mapstructure:"floating_ip"` SecurityGroups []string `mapstructure:"security_groups"` Networks []string `mapstructure:"networks"` + UserData string `mapstructure:"user_data"` + UserDataFile string `mapstructure:"user_data_file"` // Not really used, but here for BC OpenstackProvider string `mapstructure:"openstack_provider"` diff --git a/builder/openstack/step_run_source_server.go b/builder/openstack/step_run_source_server.go index 4014f1d95..9e55dea0e 100644 --- a/builder/openstack/step_run_source_server.go +++ b/builder/openstack/step_run_source_server.go @@ -2,6 +2,7 @@ package openstack import ( "fmt" + "io/ioutil" "log" "github.com/mitchellh/multistep" @@ -16,6 +17,8 @@ type StepRunSourceServer struct { SecurityGroups []string Networks []string AvailabilityZone string + UserData string + UserDataFile string server *servers.Server } @@ -39,6 +42,16 @@ func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction networks[i].UUID = networkUuid } + userData := []byte(s.UserData) + if s.UserDataFile != "" { + userData, err = ioutil.ReadFile(s.UserDataFile) + if err != nil { + err = fmt.Errorf("Error reading user data file: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + } + ui.Say("Launching server...") s.server, err = servers.Create(computeClient, keypairs.CreateOptsExt{ CreateOptsBuilder: servers.CreateOpts{ @@ -48,6 +61,7 @@ func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction SecurityGroups: s.SecurityGroups, Networks: networks, AvailabilityZone: s.AvailabilityZone, + UserData: userData, }, KeyName: keyName, From 0dd80c0eca33164dbbd7749533f3a2318e2675eb Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 10:17:42 -0400 Subject: [PATCH 336/956] config file doesn't need to exist if set [GH-2225] --- main.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/main.go b/main.go index d00336283..aab28c141 100644 --- a/main.go +++ b/main.go @@ -217,12 +217,10 @@ func loadConfig() (*config, error) { return nil, err } - mustExist := true configFilePath := os.Getenv("PACKER_CONFIG") if configFilePath == "" { var err error configFilePath, err = configFile() - mustExist = false if err != nil { log.Printf("Error detecting default config file path: %s", err) @@ -240,11 +238,7 @@ func loadConfig() (*config, error) { return nil, err } - if mustExist { - return nil, err - } - - log.Println("File doesn't exist, but doesn't need to. Ignoring.") + log.Println("[WARN] Config file doesn't exist: %s", configFilePath) return &config, nil } defer f.Close() From 1e853f9f1fcf36c54114db39039a50df7d4271c2 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 10:20:54 -0400 Subject: [PATCH 337/956] common: revert some changes from #2121 for Windows --- common/config.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/common/config.go b/common/config.go index ebbfa6d8c..e6c6477b5 100644 --- a/common/config.go +++ b/common/config.go @@ -99,6 +99,14 @@ func DownloadableURL(original string) (string, error) { // Make sure it is lowercased url.Scheme = strings.ToLower(url.Scheme) + // This is to work around issue #5927. This can safely be removed once + // we distribute with a version of Go that fixes that bug. + // + // See: https://code.google.com/p/go/issues/detail?id=5927 + if url.Path != "" && url.Path[0] != '/' { + url.Path = "/" + url.Path + } + // Verify that the scheme is something we support in our common downloader. supported := []string{"file", "http", "https"} found := false From 1999c83a0c00ec7e1ad6a06ce76b544ab2e7897f Mon Sep 17 00:00:00 2001 From: Emil Hessman Date: Sat, 13 Jun 2015 18:58:13 +0200 Subject: [PATCH 338/956] post-processor/atlas: adjust test for cross-platform filepath separator Make TestLongestCommonPrefix cross-platform friendly by defining the test cases with filepath.Separator. Fixes test failure on Windows. --- post-processor/atlas/util_test.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/post-processor/atlas/util_test.go b/post-processor/atlas/util_test.go index b6b9da3d9..d0bed0a5d 100644 --- a/post-processor/atlas/util_test.go +++ b/post-processor/atlas/util_test.go @@ -1,10 +1,12 @@ package atlas import ( + "path/filepath" "testing" ) func TestLongestCommonPrefix(t *testing.T) { + sep := string(filepath.Separator) cases := []struct { Input []string Output string @@ -18,12 +20,12 @@ func TestLongestCommonPrefix(t *testing.T) { "", }, { - []string{"foo/", "foo/bar"}, - "foo/", + []string{"foo" + sep, "foo" + sep + "bar"}, + "foo" + sep, }, { - []string{"/foo/", "/bar"}, - "/", + []string{sep + "foo" + sep, sep + "bar"}, + sep, }, } From 1bcb52a093e79e66ecc1370f6567757c7912bab0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 13:56:09 -0400 Subject: [PATCH 339/956] command/fix: validate resulting template [GH-2075] --- command/fix.go | 29 ++++++++++ command/fix_test.go | 58 +++++++++++++++++++ .../test-fixtures/fix-invalid/template.json | 3 + command/test-fixtures/fix/template.json | 7 +++ 4 files changed, 97 insertions(+) create mode 100644 command/fix_test.go create mode 100644 command/test-fixtures/fix-invalid/template.json create mode 100644 command/test-fixtures/fix/template.json diff --git a/command/fix.go b/command/fix.go index e908dc52e..2d9bcce37 100644 --- a/command/fix.go +++ b/command/fix.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/mitchellh/packer/fix" + "github.com/mitchellh/packer/template" ) type FixCommand struct { @@ -16,7 +17,9 @@ type FixCommand struct { } func (c *FixCommand) Run(args []string) int { + var flagValidate bool flags := c.Meta.FlagSet("fix", FlagSetNone) + flags.BoolVar(&flagValidate, "validate", true, "") flags.Usage = func() { c.Ui.Say(c.Help()) } if err := flags.Parse(args); err != nil { return 1 @@ -80,6 +83,28 @@ func (c *FixCommand) Run(args []string) int { result = strings.Replace(result, `\u003c`, "<", -1) result = strings.Replace(result, `\u003e`, ">", -1) c.Ui.Say(result) + + if flagValidate { + // Attemot to parse and validate the template + tpl, err := template.Parse(strings.NewReader(result)) + if err != nil { + c.Ui.Error(fmt.Sprintf( + "Error! Fixed template fails to parse: %s\n\n"+ + "This is usually caused by an error in the input template.\n"+ + "Please fix the error and try again.", + err)) + return 1 + } + if err := tpl.Validate(); err != nil { + c.Ui.Error(fmt.Sprintf( + "Error! Fixed template failed to validate: %s\n\n"+ + "This is usually caused by an error in the input template.\n"+ + "Please fix the error and try again.", + err)) + return 1 + } + } + return 0 } @@ -102,6 +127,10 @@ Fixes that are run: pp-vagrant-override Replaces old-style provider overrides for the Vagrant post-processor to new-style as of Packer 0.5.0. virtualbox-rename Updates "virtualbox" builders to "virtualbox-iso" + +Options: + + -validate=true If true (default), validates the fixed template. ` return strings.TrimSpace(helpText) diff --git a/command/fix_test.go b/command/fix_test.go new file mode 100644 index 000000000..1bf6f1900 --- /dev/null +++ b/command/fix_test.go @@ -0,0 +1,58 @@ +package command + +import ( + "path/filepath" + "testing" +) + +func TestFix_noArgs(t *testing.T) { + c := &PushCommand{Meta: testMeta(t)} + code := c.Run(nil) + if code != 1 { + t.Fatalf("bad: %#v", code) + } +} + +func TestFix_multiArgs(t *testing.T) { + c := &PushCommand{Meta: testMeta(t)} + code := c.Run([]string{"one", "two"}) + if code != 1 { + t.Fatalf("bad: %#v", code) + } +} + +func TestFix(t *testing.T) { + c := &FixCommand{ + Meta: testMeta(t), + } + + args := []string{filepath.Join(testFixture("fix"), "template.json")} + if code := c.Run(args); code != 0 { + fatalCommand(t, c.Meta) + } +} + +func TestFix_invalidTemplate(t *testing.T) { + c := &FixCommand{ + Meta: testMeta(t), + } + + args := []string{filepath.Join(testFixture("fix-invalid"), "template.json")} + if code := c.Run(args); code != 1 { + fatalCommand(t, c.Meta) + } +} + +func TestFix_invalidTemplateDisableValidation(t *testing.T) { + c := &FixCommand{ + Meta: testMeta(t), + } + + args := []string{ + "-validate=false", + filepath.Join(testFixture("fix-invalid"), "template.json"), + } + if code := c.Run(args); code != 0 { + fatalCommand(t, c.Meta) + } +} diff --git a/command/test-fixtures/fix-invalid/template.json b/command/test-fixtures/fix-invalid/template.json new file mode 100644 index 000000000..ea50c5dcf --- /dev/null +++ b/command/test-fixtures/fix-invalid/template.json @@ -0,0 +1,3 @@ +{ + "hello": "world" +} diff --git a/command/test-fixtures/fix/template.json b/command/test-fixtures/fix/template.json new file mode 100644 index 000000000..63b0f9037 --- /dev/null +++ b/command/test-fixtures/fix/template.json @@ -0,0 +1,7 @@ +{ + "builders": [{"type": "dummy"}], + + "push": { + "name": "foo/bar" + } +} From f1cef0baaee6a572831e9f6e61a983f32126ed2a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 14:07:43 -0400 Subject: [PATCH 340/956] builder/null: fix config parsing --- builder/null/config.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/builder/null/config.go b/builder/null/config.go index 9ccc32282..aa3f15120 100644 --- a/builder/null/config.go +++ b/builder/null/config.go @@ -2,6 +2,7 @@ package null import ( "fmt" + "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" @@ -19,9 +20,9 @@ type Config struct { } func NewConfig(raws ...interface{}) (*Config, []string, error) { - c := new(Config) + var c Config - err := config.Decode(c, &config.DecodeOpts{ + err := config.Decode(&c, &config.DecodeOpts{ Interpolate: true, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ @@ -62,5 +63,5 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { return nil, nil, errs } - return c, nil, nil + return &c, nil, nil } From c549fce85e9e09be6c072bfdbc44a430219663b1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 14:11:20 -0400 Subject: [PATCH 341/956] provisioner/shell: escape single quotes [GH-2067] --- provisioner/shell/provisioner.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index 212ec2abf..baedd645a 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -145,6 +145,9 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Environment variable not in format 'key=value': %s", kv)) } else { + // Replace single quotes so they parse + vs[1] = strings.Replace(vs[1], "'", `'"'"'`, -1) + // Single quote env var values p.config.Vars[idx] = fmt.Sprintf("%s='%s'", vs[0], vs[1]) } From facbb6577d060cd841c9c761d5e7320539640743 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 16:19:25 -0400 Subject: [PATCH 342/956] template: allow _ prefix to root level keys for comments [GH-2066] --- template/parse.go | 9 +++++++-- template/parse_test.go | 13 +++++++++++++ template/test-fixtures/parse-comment.json | 4 ++++ .../docs/templates/introduction.html.markdown | 16 ++++++++++++++++ 4 files changed, 40 insertions(+), 2 deletions(-) create mode 100644 template/test-fixtures/parse-comment.json diff --git a/template/parse.go b/template/parse.go index dbb29569d..f64057db2 100644 --- a/template/parse.go +++ b/template/parse.go @@ -291,11 +291,16 @@ func Parse(r io.Reader) (*Template, error) { if len(md.Unused) > 0 { sort.Strings(md.Unused) for _, unused := range md.Unused { + // Ignore keys starting with '_' as comments + if unused[0] == '_' { + continue + } + err = multierror.Append(err, fmt.Errorf( "Unknown root level key in template: '%s'", unused)) } - - // Return early for these errors + } + if err != nil { return nil, err } diff --git a/template/parse_test.go b/template/parse_test.go index 9abca2f77..5285e27d3 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -303,6 +303,19 @@ func TestParse(t *testing.T) { }, false, }, + + { + "parse-comment.json", + &Template{ + Builders: map[string]*Builder{ + "something": &Builder{ + Name: "something", + Type: "something", + }, + }, + }, + false, + }, } for _, tc := range cases { diff --git a/template/test-fixtures/parse-comment.json b/template/test-fixtures/parse-comment.json new file mode 100644 index 000000000..d3bb95a3b --- /dev/null +++ b/template/test-fixtures/parse-comment.json @@ -0,0 +1,4 @@ +{ + "_info": "foo", + "builders": [{"type": "something"}] +} diff --git a/website/source/docs/templates/introduction.html.markdown b/website/source/docs/templates/introduction.html.markdown index a8be4f592..3dc363916 100644 --- a/website/source/docs/templates/introduction.html.markdown +++ b/website/source/docs/templates/introduction.html.markdown @@ -58,6 +58,22 @@ Along with each key, it is noted whether it is required or not. For more information on how to define and use user variables, read the sub-section on [user variables in templates](/docs/templates/user-variables.html). +## Comments + +JSON doesn't support comments and Packer reports unknown keys as validation +errors. If you'd like to comment your template, you can prefix a _root level_ +key with an underscore. Example: + +```javascript +{ + "_comment": "This is a comment", + "builders": [{}] +} +``` + +**Important:** Only _root level_ keys can be underscore prefixed. Keys within +builders, provisioners, etc. will still result in validation errors. + ## Example Template Below is an example of a basic template that is nearly fully functional. It is just From 7eff6b117da1047004dd33fb209f7cf54422509b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 16:24:47 -0400 Subject: [PATCH 343/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b96154c2b..c933040c1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ BACKWARDS INCOMPATIBILITIES: + * core: SSH connection will no longer request a PTY by default. This + can be enabled per builder. * builder/digitalocean: no longer supports the v1 API which has been deprecated for some time. Most configurations should continue to work as long as you use the `api_token` field for auth. From c3f54ba5a9650a39f89e1471154b1292db34a9c0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 16:39:12 -0400 Subject: [PATCH 344/956] fix: virtualbox rename fixes overrides [GH-1828] --- fix/fixer_virtualbox_rename.go | 40 ++++++++++++++++++++++++--- fix/fixer_virtualbox_rename_test.go | 42 +++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 4 deletions(-) diff --git a/fix/fixer_virtualbox_rename.go b/fix/fixer_virtualbox_rename.go index 292bf0bf1..34187ac2c 100644 --- a/fix/fixer_virtualbox_rename.go +++ b/fix/fixer_virtualbox_rename.go @@ -8,14 +8,14 @@ import ( type FixerVirtualBoxRename struct{} func (FixerVirtualBoxRename) Fix(input map[string]interface{}) (map[string]interface{}, error) { - // The type we'll decode into; we only care about builders type template struct { - Builders []map[string]interface{} + Builders []map[string]interface{} + Provisioners []interface{} } // Decode the input into our structure, if we can var tpl template - if err := mapstructure.Decode(input, &tpl); err != nil { + if err := mapstructure.WeakDecode(input, &tpl); err != nil { return nil, err } @@ -37,7 +37,39 @@ func (FixerVirtualBoxRename) Fix(input map[string]interface{}) (map[string]inter builder["type"] = "virtualbox-iso" } - input["builders"] = tpl.Builders + for i, raw := range tpl.Provisioners { + var m map[string]interface{} + if err := mapstructure.WeakDecode(raw, &m); err != nil { + // Ignore errors, could be a non-map + continue + } + + raw, ok := m["override"] + if !ok { + continue + } + + var override map[string]interface{} + if err := mapstructure.WeakDecode(raw, &override); err != nil { + return nil, err + } + + if raw, ok := override["virtualbox"]; ok { + override["virtualbox-iso"] = raw + delete(override, "virtualbox") + + // Set the change + m["override"] = override + tpl.Provisioners[i] = m + } + } + + if len(tpl.Builders) > 0 { + input["builders"] = tpl.Builders + } + if len(tpl.Provisioners) > 0 { + input["provisioners"] = tpl.Provisioners + } return input, nil } diff --git a/fix/fixer_virtualbox_rename_test.go b/fix/fixer_virtualbox_rename_test.go index 78b7bccf2..355e5276a 100644 --- a/fix/fixer_virtualbox_rename_test.go +++ b/fix/fixer_virtualbox_rename_test.go @@ -46,3 +46,45 @@ func TestFixerVirtualBoxRename_Fix(t *testing.T) { } } } + +func TestFixerVirtualBoxRenameFix_provisionerOverride(t *testing.T) { + cases := []struct { + Input map[string]interface{} + Expected map[string]interface{} + }{ + { + Input: map[string]interface{}{ + "provisioners": []interface{}{ + map[string]interface{}{ + "override": map[string]interface{}{ + "virtualbox": map[string]interface{}{}, + }, + }, + }, + }, + + Expected: map[string]interface{}{ + "provisioners": []interface{}{ + map[string]interface{}{ + "override": map[string]interface{}{ + "virtualbox-iso": map[string]interface{}{}, + }, + }, + }, + }, + }, + } + + for _, tc := range cases { + var f FixerVirtualBoxRename + + output, err := f.Fix(tc.Input) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(output, tc.Expected) { + t.Fatalf("unexpected:\n\n%#v\nexpected:\n\n%#v\n", output, tc.Expected) + } + } +} From f1b3c8a7ae49c4acdbb74c862956eb22538cfd17 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 16:48:35 -0400 Subject: [PATCH 345/956] template/interpolate: build_name and build_type functions --- template/interpolate/funcs.go | 22 +++++++++++++ template/interpolate/funcs_test.go | 50 ++++++++++++++++++++++++++++++ template/interpolate/i.go | 15 ++++++--- 3 files changed, 83 insertions(+), 4 deletions(-) diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go index 6092707b8..e5d01b455 100644 --- a/template/interpolate/funcs.go +++ b/template/interpolate/funcs.go @@ -24,6 +24,8 @@ func init() { // Funcs are the interpolation funcs that are available within interpolations. var FuncGens = map[string]FuncGenerator{ + "build_name": funcGenBuildName, + "build_type": funcGenBuildType, "env": funcGenEnv, "isotime": funcGenIsotime, "pwd": funcGenPwd, @@ -56,6 +58,26 @@ func Funcs(ctx *Context) template.FuncMap { return template.FuncMap(result) } +func funcGenBuildName(ctx *Context) interface{} { + return func() (string, error) { + if ctx == nil || ctx.BuildName == "" { + return "", errors.New("build_name not available") + } + + return ctx.BuildName, nil + } +} + +func funcGenBuildType(ctx *Context) interface{} { + return func() (string, error) { + if ctx == nil || ctx.BuildType == "" { + return "", errors.New("build_name not available") + } + + return ctx.BuildType, nil + } +} + func funcGenEnv(ctx *Context) interface{} { return func(k string) (string, error) { if !ctx.EnableEnv { diff --git a/template/interpolate/funcs_test.go b/template/interpolate/funcs_test.go index ff877f13e..065942c93 100644 --- a/template/interpolate/funcs_test.go +++ b/template/interpolate/funcs_test.go @@ -8,6 +8,56 @@ import ( "time" ) +func TestFuncBuildName(t *testing.T) { + cases := []struct { + Input string + Output string + }{ + { + `{{build_name}}`, + "foo", + }, + } + + ctx := &Context{BuildName: "foo"} + for _, tc := range cases { + i := &I{Value: tc.Input} + result, err := i.Render(ctx) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", tc.Input, err) + } + + if result != tc.Output { + t.Fatalf("Input: %s\n\nGot: %s", tc.Input, result) + } + } +} + +func TestFuncBuildType(t *testing.T) { + cases := []struct { + Input string + Output string + }{ + { + `{{build_type}}`, + "foo", + }, + } + + ctx := &Context{BuildType: "foo"} + for _, tc := range cases { + i := &I{Value: tc.Input} + result, err := i.Render(ctx) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", tc.Input, err) + } + + if result != tc.Output { + t.Fatalf("Input: %s\n\nGot: %s", tc.Input, result) + } + } +} + func TestFuncEnv(t *testing.T) { cases := []struct { Input string diff --git a/template/interpolate/i.go b/template/interpolate/i.go index d5f7c8413..02f56197a 100644 --- a/template/interpolate/i.go +++ b/template/interpolate/i.go @@ -14,16 +14,23 @@ type Context struct { // Funcs are extra functions available in the template Funcs map[string]interface{} - // TemplatePath is the path to the template that this is being - // rendered within. - TemplatePath string - // UserVariables is the mapping of user variables that the // "user" function reads from. UserVariables map[string]string // EnableEnv enables the env function EnableEnv bool + + // All the fields below are used for built-in functions. + // + // BuildName and BuildType are the name and type, respectively, + // of the builder being used. + // + // TemplatePath is the path to the template that this is being + // rendered within. + BuildName string + BuildType string + TemplatePath string } // Render is shorthand for constructing an I and calling Render. From 472b060394622a1b9c25c0570718a0cba3d691a7 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 16:51:29 -0400 Subject: [PATCH 346/956] packer: build_name and build_type work + tests --- helper/config/decode.go | 4 ++ packer/core_test.go | 58 +++++++++++++++++++ .../test-fixtures/build-var-build-name.json | 6 ++ .../test-fixtures/build-var-build-type.json | 6 ++ 4 files changed, 74 insertions(+) create mode 100644 packer/test-fixtures/build-var-build-name.json create mode 100644 packer/test-fixtures/build-var-build-type.json diff --git a/helper/config/decode.go b/helper/config/decode.go index 0148ad27c..ccb71ac7f 100644 --- a/helper/config/decode.go +++ b/helper/config/decode.go @@ -104,6 +104,8 @@ func Decode(target interface{}, config *DecodeOpts, raws ...interface{}) error { // detecting things like user variables from the raw configuration params. func DetectContext(raws ...interface{}) (*interpolate.Context, error) { var s struct { + BuildName string `mapstructure:"packer_build_name"` + BuildType string `mapstructure:"packer_builder_type"` TemplatePath string `mapstructure:"packer_template_path"` Vars map[string]string `mapstructure:"packer_user_variables"` } @@ -115,6 +117,8 @@ func DetectContext(raws ...interface{}) (*interpolate.Context, error) { } return &interpolate.Context{ + BuildName: s.BuildName, + BuildType: s.BuildType, TemplatePath: s.TemplatePath, UserVariables: s.Vars, }, nil diff --git a/packer/core_test.go b/packer/core_test.go index f11242d0c..cc958356e 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -142,6 +142,64 @@ func TestCoreBuild_env(t *testing.T) { } } +func TestCoreBuild_buildNameVar(t *testing.T) { + config := TestCoreConfig(t) + testCoreTemplate(t, config, fixtureDir("build-var-build-name.json")) + b := TestBuilder(t, config, "test") + core := TestCore(t, config) + + b.ArtifactId = "hello" + + build, err := core.Build("test") + if err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := build.Prepare(); err != nil { + t.Fatalf("err: %s", err) + } + + // Interpolate the config + var result map[string]interface{} + err = configHelper.Decode(&result, nil, b.PrepareConfig...) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result["value"] != "test" { + t.Fatalf("bad: %#v", result) + } +} + +func TestCoreBuild_buildTypeVar(t *testing.T) { + config := TestCoreConfig(t) + testCoreTemplate(t, config, fixtureDir("build-var-build-type.json")) + b := TestBuilder(t, config, "test") + core := TestCore(t, config) + + b.ArtifactId = "hello" + + build, err := core.Build("test") + if err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := build.Prepare(); err != nil { + t.Fatalf("err: %s", err) + } + + // Interpolate the config + var result map[string]interface{} + err = configHelper.Decode(&result, nil, b.PrepareConfig...) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result["value"] != "test" { + t.Fatalf("bad: %#v", result) + } +} + func TestCoreBuild_nonExist(t *testing.T) { config := TestCoreConfig(t) testCoreTemplate(t, config, fixtureDir("build-basic.json")) diff --git a/packer/test-fixtures/build-var-build-name.json b/packer/test-fixtures/build-var-build-name.json new file mode 100644 index 000000000..da43d9ad9 --- /dev/null +++ b/packer/test-fixtures/build-var-build-name.json @@ -0,0 +1,6 @@ +{ + "builders": [{ + "type": "test", + "value": "{{build_name}}" + }] +} diff --git a/packer/test-fixtures/build-var-build-type.json b/packer/test-fixtures/build-var-build-type.json new file mode 100644 index 000000000..3d925e406 --- /dev/null +++ b/packer/test-fixtures/build-var-build-type.json @@ -0,0 +1,6 @@ +{ + "builders": [{ + "type": "test", + "value": "{{build_type}}" + }] +} From 3976a34d29c125f4c92c8a9d31d469446dcce75f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 16:58:37 -0400 Subject: [PATCH 347/956] builder/virtualbox: validate output dir in step, no in config --- builder/virtualbox/common/output_config.go | 11 +-------- .../virtualbox/common/output_config_test.go | 22 +---------------- builder/virtualbox/common/step_output_dir.go | 11 ++++++++- .../virtualbox/common/step_output_dir_test.go | 24 +++++++++++++++++++ 4 files changed, 36 insertions(+), 32 deletions(-) diff --git a/builder/virtualbox/common/output_config.go b/builder/virtualbox/common/output_config.go index f3427183c..7b5ddcd45 100644 --- a/builder/virtualbox/common/output_config.go +++ b/builder/virtualbox/common/output_config.go @@ -2,7 +2,6 @@ package common import ( "fmt" - "os" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/template/interpolate" @@ -17,13 +16,5 @@ func (c *OutputConfig) Prepare(ctx *interpolate.Context, pc *common.PackerConfig c.OutputDir = fmt.Sprintf("output-%s", pc.PackerBuildName) } - var errs []error - if !pc.PackerForce { - if _, err := os.Stat(c.OutputDir); err == nil { - errs = append(errs, fmt.Errorf( - "Output directory '%s' already exists. It must not exist.", c.OutputDir)) - } - } - - return errs + return nil } diff --git a/builder/virtualbox/common/output_config_test.go b/builder/virtualbox/common/output_config_test.go index 7fa039a16..a4d8e7999 100644 --- a/builder/virtualbox/common/output_config_test.go +++ b/builder/virtualbox/common/output_config_test.go @@ -39,27 +39,7 @@ func TestOutputConfigPrepare_exists(t *testing.T) { PackerForce: false, } errs := c.Prepare(testConfigTemplate(t), pc) - if len(errs) == 0 { - t.Fatal("should have errors") - } -} - -func TestOutputConfigPrepare_forceExists(t *testing.T) { - td, err := ioutil.TempDir("", "packer") - if err != nil { - t.Fatalf("err: %s", err) - } - defer os.RemoveAll(td) - - c := new(OutputConfig) - c.OutputDir = td - - pc := &common.PackerConfig{ - PackerBuildName: "foo", - PackerForce: true, - } - errs := c.Prepare(testConfigTemplate(t), pc) - if len(errs) > 0 { + if len(errs) != 0 { t.Fatal("should not have errors") } } diff --git a/builder/virtualbox/common/step_output_dir.go b/builder/virtualbox/common/step_output_dir.go index 209bbabe2..e01928b7a 100644 --- a/builder/virtualbox/common/step_output_dir.go +++ b/builder/virtualbox/common/step_output_dir.go @@ -22,7 +22,16 @@ type StepOutputDir struct { func (s *StepOutputDir) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) - if _, err := os.Stat(s.Path); err == nil && s.Force { + if _, err := os.Stat(s.Path); err == nil { + if !s.Force { + err := fmt.Errorf( + "Output directory exists: %s\n\n"+ + "Use the force flag to delete it prior to building.", + s.Path) + state.Put("error", err) + return multistep.ActionHalt + } + ui.Say("Deleting previous output directory...") os.RemoveAll(s.Path) } diff --git a/builder/virtualbox/common/step_output_dir_test.go b/builder/virtualbox/common/step_output_dir_test.go index be485c278..77d1f855f 100644 --- a/builder/virtualbox/common/step_output_dir_test.go +++ b/builder/virtualbox/common/step_output_dir_test.go @@ -45,6 +45,30 @@ func TestStepOutputDir(t *testing.T) { } } +func TestStepOutputDir_exists(t *testing.T) { + state := testState(t) + step := testStepOutputDir(t) + + // Make the dir + if err := os.MkdirAll(step.Path, 0755); err != nil { + t.Fatalf("bad: %s", err) + } + + // Test the run + if action := step.Run(state); action != multistep.ActionHalt { + t.Fatalf("bad action: %#v", action) + } + if _, ok := state.GetOk("error"); !ok { + t.Fatal("should have error") + } + + // Test the cleanup + step.Cleanup(state) + if _, err := os.Stat(step.Path); err != nil { + t.Fatalf("err: %s", err) + } +} + func TestStepOutputDir_cancelled(t *testing.T) { state := testState(t) step := testStepOutputDir(t) From d8518981317b1afea2885948f56606bb8687f990 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 17:00:14 -0400 Subject: [PATCH 348/956] builder/vmware: mirror virtualbox output dir changes --- builder/vmware/common/output_config.go | 11 +---- builder/vmware/common/output_config_test.go | 45 +-------------------- builder/vmware/iso/builder_test.go | 4 +- 3 files changed, 5 insertions(+), 55 deletions(-) diff --git a/builder/vmware/common/output_config.go b/builder/vmware/common/output_config.go index f3427183c..7b5ddcd45 100644 --- a/builder/vmware/common/output_config.go +++ b/builder/vmware/common/output_config.go @@ -2,7 +2,6 @@ package common import ( "fmt" - "os" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/template/interpolate" @@ -17,13 +16,5 @@ func (c *OutputConfig) Prepare(ctx *interpolate.Context, pc *common.PackerConfig c.OutputDir = fmt.Sprintf("output-%s", pc.PackerBuildName) } - var errs []error - if !pc.PackerForce { - if _, err := os.Stat(c.OutputDir); err == nil { - errs = append(errs, fmt.Errorf( - "Output directory '%s' already exists. It must not exist.", c.OutputDir)) - } - } - - return errs + return nil } diff --git a/builder/vmware/common/output_config_test.go b/builder/vmware/common/output_config_test.go index 7fa039a16..7de378d89 100644 --- a/builder/vmware/common/output_config_test.go +++ b/builder/vmware/common/output_config_test.go @@ -1,10 +1,9 @@ package common import ( - "github.com/mitchellh/packer/common" - "io/ioutil" - "os" "testing" + + "github.com/mitchellh/packer/common" ) func TestOutputConfigPrepare(t *testing.T) { @@ -23,43 +22,3 @@ func TestOutputConfigPrepare(t *testing.T) { t.Fatal("should have output dir") } } - -func TestOutputConfigPrepare_exists(t *testing.T) { - td, err := ioutil.TempDir("", "packer") - if err != nil { - t.Fatalf("err: %s", err) - } - defer os.RemoveAll(td) - - c := new(OutputConfig) - c.OutputDir = td - - pc := &common.PackerConfig{ - PackerBuildName: "foo", - PackerForce: false, - } - errs := c.Prepare(testConfigTemplate(t), pc) - if len(errs) == 0 { - t.Fatal("should have errors") - } -} - -func TestOutputConfigPrepare_forceExists(t *testing.T) { - td, err := ioutil.TempDir("", "packer") - if err != nil { - t.Fatalf("err: %s", err) - } - defer os.RemoveAll(td) - - c := new(OutputConfig) - c.OutputDir = td - - pc := &common.PackerConfig{ - PackerBuildName: "foo", - PackerForce: true, - } - errs := c.Prepare(testConfigTemplate(t), pc) - if len(errs) > 0 { - t.Fatal("should not have errors") - } -} diff --git a/builder/vmware/iso/builder_test.go b/builder/vmware/iso/builder_test.go index 1749b396e..bf658938b 100644 --- a/builder/vmware/iso/builder_test.go +++ b/builder/vmware/iso/builder_test.go @@ -353,8 +353,8 @@ func TestBuilderPrepare_OutputDir(t *testing.T) { if len(warns) > 0 { t.Fatalf("bad: %#v", warns) } - if err == nil { - t.Fatal("should have error") + if err != nil { + t.Fatalf("err: %s", err) } // Test with a good one From 511013dbe4b845dcb2898f37852a2e260162c9f9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 17:05:38 -0400 Subject: [PATCH 349/956] fix go vet warning --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index aab28c141..7f5cb7bef 100644 --- a/main.go +++ b/main.go @@ -238,7 +238,7 @@ func loadConfig() (*config, error) { return nil, err } - log.Println("[WARN] Config file doesn't exist: %s", configFilePath) + log.Printf("[WARN] Config file doesn't exist: %s", configFilePath) return &config, nil } defer f.Close() From 4b3ed5d7e2f61aaa1c02951aad97d5ec33c46005 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 17:42:38 -0400 Subject: [PATCH 350/956] helper/communicator --- helper/communicator/config.go | 38 ++++++ helper/communicator/step_connect.go | 44 +++++++ helper/communicator/step_connect_ssh.go | 153 ++++++++++++++++++++++++ 3 files changed, 235 insertions(+) create mode 100644 helper/communicator/config.go create mode 100644 helper/communicator/step_connect.go create mode 100644 helper/communicator/step_connect_ssh.go diff --git a/helper/communicator/config.go b/helper/communicator/config.go new file mode 100644 index 000000000..2500a7d2d --- /dev/null +++ b/helper/communicator/config.go @@ -0,0 +1,38 @@ +package communicator + +import ( + "errors" + "time" + + "github.com/mitchellh/packer/template/interpolate" +) + +// Config is the common configuration that communicators allow within +// a builder. +type Config struct { + SSHHost string `mapstructure:"ssh_host"` + SSHPort int `mapstructure:"ssh_port"` + SSHUsername string `mapstructure:"ssh_username"` + SSHPassword string `mapstructure:"ssh_password"` + SSHPrivateKey string `mapstructure:"ssh_private_key_file"` + SSHPty bool `mapstructure:"ssh_pty"` + SSHTimeout time.Duration `mapstructure:"ssh_timeout"` +} + +func (c *Config) Prepare(ctx *interpolate.Context) []error { + if c.SSHPort == 0 { + c.SSHPort = 22 + } + + if c.SSHTimeout == 0 { + c.SSHTimeout = 5 * time.Minute + } + + // Validation + var errs []error + if c.SSHUsername == "" { + errs = append(errs, errors.New("An ssh_username must be specified")) + } + + return errs +} diff --git a/helper/communicator/step_connect.go b/helper/communicator/step_connect.go new file mode 100644 index 000000000..77feebfb9 --- /dev/null +++ b/helper/communicator/step_connect.go @@ -0,0 +1,44 @@ +package communicator + +import ( + "github.com/mitchellh/multistep" + gossh "golang.org/x/crypto/ssh" +) + +// StepConnect is a multistep Step implementation that connects to +// the proper communicator and stores it in the "communicator" key in the +// state bag. +type StepConnect struct { + // Config is the communicator config struct + Config *Config + + // The fields below are callbacks to assist with connecting to SSH. + // + // SSHAddress should return the default host to connect to for SSH. + // This is only called if ssh_host isn't specified in the config. + // + // SSHConfig should return the default configuration for + // connecting via SSH. + SSHAddress func(multistep.StateBag) (string, error) + SSHConfig func(multistep.StateBag) (*gossh.ClientConfig, error) + + substep multistep.Step +} + +func (s *StepConnect) Run(state multistep.StateBag) multistep.StepAction { + // Eventually we might switch between multiple of these depending + // on the communicator type. + s.substep = &StepConnectSSH{ + Config: s.Config, + SSHAddress: s.SSHAddress, + SSHConfig: s.SSHConfig, + } + + return s.substep.Run(state) +} + +func (s *StepConnect) Cleanup(state multistep.StateBag) { + if s.substep != nil { + s.substep.Cleanup(state) + } +} diff --git a/helper/communicator/step_connect_ssh.go b/helper/communicator/step_connect_ssh.go new file mode 100644 index 000000000..9be653c01 --- /dev/null +++ b/helper/communicator/step_connect_ssh.go @@ -0,0 +1,153 @@ +package communicator + +import ( + "errors" + "fmt" + "log" + "strings" + "time" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/communicator/ssh" + "github.com/mitchellh/packer/packer" + gossh "golang.org/x/crypto/ssh" +) + +// StepConnectSSH is a step that only connects to SSH. +// +// In general, you should use StepConnect. +type StepConnectSSH struct { + // All the fields below are documented on StepConnect + Config *Config + SSHAddress func(multistep.StateBag) (string, error) + SSHConfig func(multistep.StateBag) (*gossh.ClientConfig, error) +} + +func (s *StepConnectSSH) Run(state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + + var comm packer.Communicator + var err error + + cancel := make(chan struct{}) + waitDone := make(chan bool, 1) + go func() { + ui.Say("Waiting for SSH to become available...") + comm, err = s.waitForSSH(state, cancel) + waitDone <- true + }() + + log.Printf("[INFO] Waiting for SSH, up to timeout: %s", s.Config.SSHTimeout) + timeout := time.After(s.Config.SSHTimeout) +WaitLoop: + for { + // Wait for either SSH to become available, a timeout to occur, + // or an interrupt to come through. + select { + case <-waitDone: + if err != nil { + ui.Error(fmt.Sprintf("Error waiting for SSH: %s", err)) + state.Put("error", err) + return multistep.ActionHalt + } + + ui.Say("Connected to SSH!") + state.Put("communicator", comm) + break WaitLoop + case <-timeout: + err := fmt.Errorf("Timeout waiting for SSH.") + state.Put("error", err) + ui.Error(err.Error()) + close(cancel) + return multistep.ActionHalt + case <-time.After(1 * time.Second): + if _, ok := state.GetOk(multistep.StateCancelled); ok { + // The step sequence was cancelled, so cancel waiting for SSH + // and just start the halting process. + close(cancel) + log.Println("[WARN] Interrupt detected, quitting waiting for SSH.") + return multistep.ActionHalt + } + } + } + + return multistep.ActionContinue +} + +func (s *StepConnectSSH) Cleanup(multistep.StateBag) { +} + +func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan struct{}) (packer.Communicator, error) { + handshakeAttempts := 0 + + var comm packer.Communicator + first := true + for { + // Don't check for cancel or wait on first iteration + if !first { + select { + case <-cancel: + log.Println("[DEBUG] SSH wait cancelled. Exiting loop.") + return nil, errors.New("SSH wait cancelled") + case <-time.After(5 * time.Second): + } + } + first = false + + // First we request the TCP connection information + address, err := s.SSHAddress(state) + if err != nil { + log.Printf("[DEBUG] Error getting SSH address: %s", err) + continue + } + + // Retrieve the SSH configuration + sshConfig, err := s.SSHConfig(state) + if err != nil { + log.Printf("[DEBUG] Error getting SSH config: %s", err) + continue + } + + // Attempt to connect to SSH port + connFunc := ssh.ConnectFunc("tcp", address) + nc, err := connFunc() + if err != nil { + log.Printf("[DEBUG] TCP connection to SSH ip/port failed: %s", err) + continue + } + nc.Close() + + // Then we attempt to connect via SSH + config := &ssh.Config{ + Connection: connFunc, + SSHConfig: sshConfig, + Pty: s.Config.SSHPty, + } + + log.Println("[INFO] Attempting SSH connection...") + comm, err = ssh.New(address, config) + if err != nil { + log.Printf("[DEBUG] SSH handshake err: %s", err) + + // Only count this as an attempt if we were able to attempt + // to authenticate. Note this is very brittle since it depends + // on the string of the error... but I don't see any other way. + if strings.Contains(err.Error(), "authenticate") { + log.Printf( + "[DEBUG] Detected authentication error. Increasing handshake attempts.") + handshakeAttempts += 1 + } + + if handshakeAttempts < 10 { + // Try to connect via SSH a handful of times + continue + } + + return nil, err + } + + break + } + + return comm, nil +} From d545431f9bc735f5200489917512da5230ec7418 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 17:42:43 -0400 Subject: [PATCH 351/956] builder/null: adopt helper/communicator --- builder/null/builder.go | 17 +++++++++++------ builder/null/config.go | 30 +++++++++++------------------- builder/null/config_test.go | 10 +++++----- 3 files changed, 27 insertions(+), 30 deletions(-) diff --git a/builder/null/builder.go b/builder/null/builder.go index 7ca1b57fd..925075ee0 100644 --- a/builder/null/builder.go +++ b/builder/null/builder.go @@ -1,11 +1,12 @@ package null import ( + "log" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/packer" - "log" - "time" ) const BuilderId = "fnoeding.null" @@ -27,10 +28,14 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { steps := []multistep.Step{ - &common.StepConnectSSH{ - SSHAddress: SSHAddress(b.config.Host, b.config.Port), - SSHConfig: SSHConfig(b.config.SSHUsername, b.config.SSHPassword, b.config.SSHPrivateKeyFile), - SSHWaitTimeout: 1 * time.Minute, + &communicator.StepConnect{ + Config: &b.config.CommConfig, + SSHAddress: SSHAddress( + b.config.CommConfig.SSHHost, b.config.CommConfig.SSHPort), + SSHConfig: SSHConfig( + b.config.CommConfig.SSHUsername, + b.config.CommConfig.SSHPassword, + b.config.CommConfig.SSHPrivateKey), }, &common.StepProvision{}, } diff --git a/builder/null/config.go b/builder/null/config.go index aa3f15120..a6a12332e 100644 --- a/builder/null/config.go +++ b/builder/null/config.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" @@ -12,49 +13,40 @@ import ( type Config struct { common.PackerConfig `mapstructure:",squash"` - Host string `mapstructure:"host"` - Port int `mapstructure:"port"` - SSHUsername string `mapstructure:"ssh_username"` - SSHPassword string `mapstructure:"ssh_password"` - SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file"` + CommConfig communicator.Config `mapstructure:",squash"` } func NewConfig(raws ...interface{}) (*Config, []string, error) { var c Config err := config.Decode(&c, &config.DecodeOpts{ - Interpolate: true, - InterpolateFilter: &interpolate.RenderFilter{ - Exclude: []string{ - "run_command", - }, - }, + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{}, }, raws...) if err != nil { return nil, nil, err } - if c.Port == 0 { - c.Port = 22 - } - var errs *packer.MultiError - if c.Host == "" { + if es := c.CommConfig.Prepare(nil); len(es) > 0 { + errs = packer.MultiErrorAppend(errs, es...) + } + if c.CommConfig.SSHHost == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("host must be specified")) } - if c.SSHUsername == "" { + if c.CommConfig.SSHUsername == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("ssh_username must be specified")) } - if c.SSHPassword == "" && c.SSHPrivateKeyFile == "" { + if c.CommConfig.SSHPassword == "" && c.CommConfig.SSHPrivateKey == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("one of ssh_password and ssh_private_key_file must be specified")) } - if c.SSHPassword != "" && c.SSHPrivateKeyFile != "" { + if c.CommConfig.SSHPassword != "" && c.CommConfig.SSHPrivateKey != "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("only one of ssh_password and ssh_private_key_file must be specified")) } diff --git a/builder/null/config_test.go b/builder/null/config_test.go index dd574de35..12123378d 100644 --- a/builder/null/config_test.go +++ b/builder/null/config_test.go @@ -6,7 +6,7 @@ import ( func testConfig() map[string]interface{} { return map[string]interface{}{ - "host": "foo", + "ssh_host": "foo", "ssh_username": "bar", "ssh_password": "baz", } @@ -48,8 +48,8 @@ func TestConfigPrepare_port(t *testing.T) { // default port should be 22 delete(raw, "port") c, warns, errs := NewConfig(raw) - if c.Port != 22 { - t.Fatalf("bad: port should default to 22, not %d", c.Port) + if c.CommConfig.SSHPort != 22 { + t.Fatalf("bad: port should default to 22, not %d", c.CommConfig.SSHPort) } testConfigOk(t, warns, errs) } @@ -58,12 +58,12 @@ func TestConfigPrepare_host(t *testing.T) { raw := testConfig() // No host - delete(raw, "host") + delete(raw, "ssh_host") _, warns, errs := NewConfig(raw) testConfigErr(t, warns, errs) // Good host - raw["host"] = "good" + raw["ssh_host"] = "good" _, warns, errs = NewConfig(raw) testConfigOk(t, warns, errs) } From 4b4fe2280d291a19c356bbc793ecf6760983748b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 17:50:45 -0400 Subject: [PATCH 352/956] helper/communicator: can be disabled --- helper/communicator/config.go | 5 +++ helper/communicator/config_test.go | 28 +++++++++++++++++ helper/communicator/step_connect.go | 28 +++++++++++++---- helper/communicator/step_connect_test.go | 39 ++++++++++++++++++++++++ 4 files changed, 94 insertions(+), 6 deletions(-) create mode 100644 helper/communicator/config_test.go create mode 100644 helper/communicator/step_connect_test.go diff --git a/helper/communicator/config.go b/helper/communicator/config.go index 2500a7d2d..28d1d3a43 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -10,6 +10,7 @@ import ( // Config is the common configuration that communicators allow within // a builder. type Config struct { + Type string `mapstructure:"communicator"` SSHHost string `mapstructure:"ssh_host"` SSHPort int `mapstructure:"ssh_port"` SSHUsername string `mapstructure:"ssh_username"` @@ -20,6 +21,10 @@ type Config struct { } func (c *Config) Prepare(ctx *interpolate.Context) []error { + if c.Type == "" { + c.Type = "ssh" + } + if c.SSHPort == 0 { c.SSHPort = 22 } diff --git a/helper/communicator/config_test.go b/helper/communicator/config_test.go new file mode 100644 index 000000000..f57fb68ca --- /dev/null +++ b/helper/communicator/config_test.go @@ -0,0 +1,28 @@ +package communicator + +import ( + "testing" + + "github.com/mitchellh/packer/template/interpolate" +) + +func testConfig() *Config { + return &Config{ + SSHUsername: "root", + } +} + +func TestConfigType(t *testing.T) { + c := testConfig() + if err := c.Prepare(testContext(t)); len(err) > 0 { + t.Fatalf("bad: %#v", err) + } + + if c.Type != "ssh" { + t.Fatal("bad: %#v", c) + } +} + +func testContext(t *testing.T) *interpolate.Context { + return nil +} diff --git a/helper/communicator/step_connect.go b/helper/communicator/step_connect.go index 77feebfb9..e6338027e 100644 --- a/helper/communicator/step_connect.go +++ b/helper/communicator/step_connect.go @@ -1,6 +1,9 @@ package communicator import ( + "fmt" + "log" + "github.com/mitchellh/multistep" gossh "golang.org/x/crypto/ssh" ) @@ -26,14 +29,27 @@ type StepConnect struct { } func (s *StepConnect) Run(state multistep.StateBag) multistep.StepAction { - // Eventually we might switch between multiple of these depending - // on the communicator type. - s.substep = &StepConnectSSH{ - Config: s.Config, - SSHAddress: s.SSHAddress, - SSHConfig: s.SSHConfig, + typeMap := map[string]multistep.Step{ + "none": nil, + "ssh": &StepConnectSSH{ + Config: s.Config, + SSHAddress: s.SSHAddress, + SSHConfig: s.SSHConfig, + }, } + step, ok := typeMap[s.Config.Type] + if !ok { + state.Put("error", fmt.Errorf("unknown communicator type: %s", s.Config.Type)) + return multistep.ActionHalt + } + + if step == nil { + log.Printf("[INFO] communicator disabled, will not connect") + return multistep.ActionContinue + } + + s.substep = step return s.substep.Run(state) } diff --git a/helper/communicator/step_connect_test.go b/helper/communicator/step_connect_test.go new file mode 100644 index 000000000..bf908f8fb --- /dev/null +++ b/helper/communicator/step_connect_test.go @@ -0,0 +1,39 @@ +package communicator + +import ( + "bytes" + "testing" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" +) + +func TestStepConnect_impl(t *testing.T) { + var _ multistep.Step = new(StepConnect) +} + +func TestStepConnect_none(t *testing.T) { + state := testState(t) + + step := &StepConnect{ + Config: &Config{ + Type: "none", + }, + } + defer step.Cleanup(state) + + // run the step + if action := step.Run(state); action != multistep.ActionContinue { + t.Fatalf("bad action: %#v", action) + } +} + +func testState(t *testing.T) multistep.StateBag { + state := new(multistep.BasicStateBag) + state.Put("hook", &packer.MockHook{}) + state.Put("ui", &packer.BasicUi{ + Reader: new(bytes.Buffer), + Writer: new(bytes.Buffer), + }) + return state +} From 60081c323a2e546717199ca12c83209eb375a6f7 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 17:51:27 -0400 Subject: [PATCH 353/956] helper/communicator: ssh settings aren't required if type is none --- helper/communicator/config.go | 6 ++++-- helper/communicator/config_test.go | 7 +++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/helper/communicator/config.go b/helper/communicator/config.go index 28d1d3a43..a2719db13 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -35,8 +35,10 @@ func (c *Config) Prepare(ctx *interpolate.Context) []error { // Validation var errs []error - if c.SSHUsername == "" { - errs = append(errs, errors.New("An ssh_username must be specified")) + if c.Type == "ssh" { + if c.SSHUsername == "" { + errs = append(errs, errors.New("An ssh_username must be specified")) + } } return errs diff --git a/helper/communicator/config_test.go b/helper/communicator/config_test.go index f57fb68ca..029c9fe35 100644 --- a/helper/communicator/config_test.go +++ b/helper/communicator/config_test.go @@ -23,6 +23,13 @@ func TestConfigType(t *testing.T) { } } +func TestConfig_none(t *testing.T) { + c := &Config{Type: "none"} + if err := c.Prepare(testContext(t)); len(err) > 0 { + t.Fatalf("bad: %#v", err) + } +} + func testContext(t *testing.T) *interpolate.Context { return nil } From 90581899a4f0013e70a66b2e0999405dd0023835 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 17:53:45 -0400 Subject: [PATCH 354/956] helper/config: decode time durations --- helper/config/decode.go | 1 + helper/config/decode_test.go | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/helper/config/decode.go b/helper/config/decode.go index 0148ad27c..20554da61 100644 --- a/helper/config/decode.go +++ b/helper/config/decode.go @@ -66,6 +66,7 @@ func Decode(target interface{}, config *DecodeOpts, raws ...interface{}) error { DecodeHook: mapstructure.ComposeDecodeHookFunc( uint8ToStringHook, mapstructure.StringToSliceHookFunc(","), + mapstructure.StringToTimeDurationHookFunc(), ), }) if err != nil { diff --git a/helper/config/decode_test.go b/helper/config/decode_test.go index 43aa615a7..f9fa590c9 100644 --- a/helper/config/decode_test.go +++ b/helper/config/decode_test.go @@ -3,6 +3,7 @@ package config import ( "reflect" "testing" + "time" "github.com/mitchellh/packer/template/interpolate" ) @@ -11,6 +12,7 @@ func TestDecode(t *testing.T) { type Target struct { Name string Address string + Time time.Duration } cases := map[string]struct { @@ -22,10 +24,12 @@ func TestDecode(t *testing.T) { []interface{}{ map[string]interface{}{ "name": "bar", + "time": "5s", }, }, &Target{ Name: "bar", + Time: 5 * time.Second, }, nil, }, From 5d630bf5fb555f4ea496a13a8b811779827c5be6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 18:05:10 -0400 Subject: [PATCH 355/956] helper/communicator: validate ssh keys --- helper/communicator/config.go | 12 ++++++++++ helper/communicator/ssh.go | 44 +++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) create mode 100644 helper/communicator/ssh.go diff --git a/helper/communicator/config.go b/helper/communicator/config.go index a2719db13..a2d93a480 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -2,6 +2,8 @@ package communicator import ( "errors" + "fmt" + "os" "time" "github.com/mitchellh/packer/template/interpolate" @@ -39,6 +41,16 @@ func (c *Config) Prepare(ctx *interpolate.Context) []error { if c.SSHUsername == "" { errs = append(errs, errors.New("An ssh_username must be specified")) } + + if c.SSHPrivateKey != "" { + if _, err := os.Stat(c.SSHPrivateKey); err != nil { + errs = append(errs, fmt.Errorf( + "ssh_private_key_file is invalid: %s", err)) + } else if _, err := SSHFileSigner(c.SSHPrivateKey); err != nil { + errs = append(errs, fmt.Errorf( + "ssh_private_key_file is invalid: %s", err)) + } + } } return errs diff --git a/helper/communicator/ssh.go b/helper/communicator/ssh.go new file mode 100644 index 000000000..831d620bc --- /dev/null +++ b/helper/communicator/ssh.go @@ -0,0 +1,44 @@ +package communicator + +import ( + "encoding/pem" + "fmt" + "io/ioutil" + "os" + + "golang.org/x/crypto/ssh" +) + +// SSHFileSigner returns an ssh.Signer for a key file. +func SSHFileSigner(path string) (ssh.Signer, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + keyBytes, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + // We parse the private key on our own first so that we can + // show a nicer error if the private key has a password. + block, _ := pem.Decode(keyBytes) + if block == nil { + return nil, fmt.Errorf( + "Failed to read key '%s': no key found", path) + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return nil, fmt.Errorf( + "Failed to read key '%s': password protected keys are\n"+ + "not supported. Please decrypt the key prior to use.", path) + } + + signer, err := ssh.ParsePrivateKey(keyBytes) + if err != nil { + return nil, fmt.Errorf("Error setting up SSH config: %s", err) + } + + return signer, nil +} From d5166a8e6c411a2bca48507a3a0a691540fabf6a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 18:08:12 -0400 Subject: [PATCH 356/956] builder/virtualbox: use new communicator abstraction --- builder/virtualbox/common/ssh.go | 8 +-- builder/virtualbox/common/ssh_config.go | 52 ++++++-------------- builder/virtualbox/common/ssh_config_test.go | 46 ++++------------- builder/virtualbox/iso/builder.go | 11 +++-- builder/virtualbox/ovf/builder.go | 11 +++-- 5 files changed, 41 insertions(+), 87 deletions(-) diff --git a/builder/virtualbox/common/ssh.go b/builder/virtualbox/common/ssh.go index 9ca2529b8..c20ac2836 100644 --- a/builder/virtualbox/common/ssh.go +++ b/builder/virtualbox/common/ssh.go @@ -17,13 +17,13 @@ func SSHAddress(state multistep.StateBag) (string, error) { func SSHConfigFunc(config SSHConfig) func(multistep.StateBag) (*gossh.ClientConfig, error) { return func(state multistep.StateBag) (*gossh.ClientConfig, error) { auth := []gossh.AuthMethod{ - gossh.Password(config.SSHPassword), + gossh.Password(config.Comm.SSHPassword), gossh.KeyboardInteractive( - ssh.PasswordKeyboardInteractive(config.SSHPassword)), + ssh.PasswordKeyboardInteractive(config.Comm.SSHPassword)), } if config.SSHKeyPath != "" { - signer, err := commonssh.FileSigner(config.SSHKeyPath) + signer, err := commonssh.FileSigner(config.Comm.SSHPrivateKey) if err != nil { return nil, err } @@ -32,7 +32,7 @@ func SSHConfigFunc(config SSHConfig) func(multistep.StateBag) (*gossh.ClientConf } return &gossh.ClientConfig{ - User: config.SSHUser, + User: config.Comm.SSHUsername, Auth: auth, }, nil } diff --git a/builder/virtualbox/common/ssh_config.go b/builder/virtualbox/common/ssh_config.go index 366b86201..8997c58f7 100644 --- a/builder/virtualbox/common/ssh_config.go +++ b/builder/virtualbox/common/ssh_config.go @@ -2,25 +2,23 @@ package common import ( "errors" - "fmt" - "os" "time" - commonssh "github.com/mitchellh/packer/common/ssh" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/template/interpolate" ) type SSHConfig struct { - SSHHostPortMin uint `mapstructure:"ssh_host_port_min"` - SSHHostPortMax uint `mapstructure:"ssh_host_port_max"` - SSHKeyPath string `mapstructure:"ssh_key_path"` - SSHPassword string `mapstructure:"ssh_password"` - SSHPort uint `mapstructure:"ssh_port"` - SSHUser string `mapstructure:"ssh_username"` - RawSSHWaitTimeout string `mapstructure:"ssh_wait_timeout"` - SSHSkipNatMapping bool `mapstructure:"ssh_skip_nat_mapping"` + Comm communicator.Config `mapstructure:",squash"` - SSHWaitTimeout time.Duration + SSHHostPortMin uint `mapstructure:"ssh_host_port_min"` + SSHHostPortMax uint `mapstructure:"ssh_host_port_max"` + SSHSkipNatMapping bool `mapstructure:"ssh_skip_nat_mapping"` + + // These are deprecated, but we keep them around for BC + // TODO(@mitchellh): remove + SSHKeyPath string `mapstructure:"ssh_key_path"` + SSHWaitTimeout time.Duration `mapstructure:"ssh_wait_timeout"` } func (c *SSHConfig) Prepare(ctx *interpolate.Context) []error { @@ -32,37 +30,19 @@ func (c *SSHConfig) Prepare(ctx *interpolate.Context) []error { c.SSHHostPortMax = 4444 } - if c.SSHPort == 0 { - c.SSHPort = 22 - } - - if c.RawSSHWaitTimeout == "" { - c.RawSSHWaitTimeout = "20m" - } - - var errs []error + // TODO: backwards compatibility, write fixer instead if c.SSHKeyPath != "" { - if _, err := os.Stat(c.SSHKeyPath); err != nil { - errs = append(errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) - } else if _, err := commonssh.FileSigner(c.SSHKeyPath); err != nil { - errs = append(errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) - } + c.Comm.SSHPrivateKey = c.SSHKeyPath + } + if c.SSHWaitTimeout != 0 { + c.Comm.SSHTimeout = c.SSHWaitTimeout } + errs := c.Comm.Prepare(ctx) if c.SSHHostPortMin > c.SSHHostPortMax { errs = append(errs, errors.New("ssh_host_port_min must be less than ssh_host_port_max")) } - if c.SSHUser == "" { - errs = append(errs, errors.New("An ssh_username must be specified.")) - } - - var err error - c.SSHWaitTimeout, err = time.ParseDuration(c.RawSSHWaitTimeout) - if err != nil { - errs = append(errs, fmt.Errorf("Failed parsing ssh_wait_timeout: %s", err)) - } - return errs } diff --git a/builder/virtualbox/common/ssh_config_test.go b/builder/virtualbox/common/ssh_config_test.go index 489b7eae8..e5b918fe9 100644 --- a/builder/virtualbox/common/ssh_config_test.go +++ b/builder/virtualbox/common/ssh_config_test.go @@ -4,11 +4,15 @@ import ( "io/ioutil" "os" "testing" + + "github.com/mitchellh/packer/helper/communicator" ) func testSSHConfig() *SSHConfig { return &SSHConfig{ - SSHUser: "foo", + Comm: communicator.Config{ + SSHUsername: "foo", + }, } } @@ -27,8 +31,8 @@ func TestSSHConfigPrepare(t *testing.T) { t.Errorf("bad max ssh host port: %d", c.SSHHostPortMax) } - if c.SSHPort != 22 { - t.Errorf("bad ssh port: %d", c.SSHPort) + if c.Comm.SSHPort != 22 { + t.Errorf("bad ssh port: %d", c.Comm.SSHPort) } } @@ -109,46 +113,14 @@ func TestSSHConfigPrepare_SSHUser(t *testing.T) { var errs []error c = testSSHConfig() - c.SSHUser = "" + c.Comm.SSHUsername = "" errs = c.Prepare(testConfigTemplate(t)) if len(errs) == 0 { t.Fatalf("should have error") } c = testSSHConfig() - c.SSHUser = "exists" - errs = c.Prepare(testConfigTemplate(t)) - if len(errs) > 0 { - t.Fatalf("should not have error: %#v", errs) - } -} - -func TestSSHConfigPrepare_SSHWaitTimeout(t *testing.T) { - var c *SSHConfig - var errs []error - - // Defaults - c = testSSHConfig() - c.RawSSHWaitTimeout = "" - errs = c.Prepare(testConfigTemplate(t)) - if len(errs) > 0 { - t.Fatalf("should not have error: %#v", errs) - } - if c.RawSSHWaitTimeout != "20m" { - t.Fatalf("bad value: %s", c.RawSSHWaitTimeout) - } - - // Test with a bad value - c = testSSHConfig() - c.RawSSHWaitTimeout = "this is not good" - errs = c.Prepare(testConfigTemplate(t)) - if len(errs) == 0 { - t.Fatal("should have error") - } - - // Test with a good one - c = testSSHConfig() - c.RawSSHWaitTimeout = "5s" + c.Comm.SSHUsername = "exists" errs = c.Prepare(testConfigTemplate(t)) if len(errs) > 0 { t.Fatalf("should not have error: %#v", errs) diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go index 908acf2c1..226de0527 100644 --- a/builder/virtualbox/iso/builder.go +++ b/builder/virtualbox/iso/builder.go @@ -11,6 +11,7 @@ import ( "github.com/mitchellh/multistep" vboxcommon "github.com/mitchellh/packer/builder/virtualbox/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" @@ -253,7 +254,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, new(vboxcommon.StepAttachFloppy), &vboxcommon.StepForwardSSH{ - GuestPort: b.config.SSHPort, + GuestPort: uint(b.config.SSHConfig.Comm.SSHPort), HostPortMin: b.config.SSHHostPortMin, HostPortMax: b.config.SSHHostPortMax, SkipNatMapping: b.config.SSHSkipNatMapping, @@ -271,10 +272,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe VMName: b.config.VMName, Ctx: b.config.ctx, }, - &common.StepConnectSSH{ - SSHAddress: vboxcommon.SSHAddress, - SSHConfig: vboxcommon.SSHConfigFunc(b.config.SSHConfig), - SSHWaitTimeout: b.config.SSHWaitTimeout, + &communicator.StepConnect{ + Config: &b.config.SSHConfig.Comm, + SSHAddress: vboxcommon.SSHAddress, + SSHConfig: vboxcommon.SSHConfigFunc(b.config.SSHConfig), }, &vboxcommon.StepUploadVersion{ Path: b.config.VBoxVersionFile, diff --git a/builder/virtualbox/ovf/builder.go b/builder/virtualbox/ovf/builder.go index 82ba4d636..05ec6159f 100644 --- a/builder/virtualbox/ovf/builder.go +++ b/builder/virtualbox/ovf/builder.go @@ -10,6 +10,7 @@ import ( "github.com/mitchellh/multistep" vboxcommon "github.com/mitchellh/packer/builder/virtualbox/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/packer" ) @@ -82,7 +83,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, new(vboxcommon.StepAttachFloppy), &vboxcommon.StepForwardSSH{ - GuestPort: b.config.SSHPort, + GuestPort: uint(b.config.SSHConfig.Comm.SSHPort), HostPortMin: b.config.SSHHostPortMin, HostPortMax: b.config.SSHHostPortMax, SkipNatMapping: b.config.SSHSkipNatMapping, @@ -100,10 +101,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe VMName: b.config.VMName, Ctx: b.config.ctx, }, - &common.StepConnectSSH{ - SSHAddress: vboxcommon.SSHAddress, - SSHConfig: vboxcommon.SSHConfigFunc(b.config.SSHConfig), - SSHWaitTimeout: b.config.SSHWaitTimeout, + &communicator.StepConnect{ + Config: &b.config.SSHConfig.Comm, + SSHAddress: vboxcommon.SSHAddress, + SSHConfig: vboxcommon.SSHConfigFunc(b.config.SSHConfig), }, &vboxcommon.StepUploadVersion{ Path: b.config.VBoxVersionFile, From a1ceb5a7ef8a39d78c88783c58f330e52d2cb852 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 18:10:37 -0400 Subject: [PATCH 357/956] common: remove StepConnectSSH --- common/step_connect_ssh.go | 171 -------------------------------- common/step_connect_ssh_test.go | 14 --- 2 files changed, 185 deletions(-) delete mode 100644 common/step_connect_ssh.go delete mode 100644 common/step_connect_ssh_test.go diff --git a/common/step_connect_ssh.go b/common/step_connect_ssh.go deleted file mode 100644 index 0c1c624bb..000000000 --- a/common/step_connect_ssh.go +++ /dev/null @@ -1,171 +0,0 @@ -package common - -import ( - "errors" - "fmt" - "log" - "strings" - "time" - - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/communicator/ssh" - "github.com/mitchellh/packer/packer" - gossh "golang.org/x/crypto/ssh" -) - -// StepConnectSSH is a multistep Step implementation that waits for SSH -// to become available. It gets the connection information from a single -// configuration when creating the step. -// -// Uses: -// ui packer.Ui -// -// Produces: -// communicator packer.Communicator -type StepConnectSSH struct { - // SSHAddress is a function that returns the TCP address to connect to - // for SSH. This is a function so that you can query information - // if necessary for this address. - SSHAddress func(multistep.StateBag) (string, error) - - // SSHConfig is a function that returns the proper client configuration - // for SSH access. - SSHConfig func(multistep.StateBag) (*gossh.ClientConfig, error) - - // SSHWaitTimeout is the total timeout to wait for SSH to become available. - SSHWaitTimeout time.Duration - - // Pty, if true, will request a Pty from the remote end. - Pty bool - - comm packer.Communicator -} - -func (s *StepConnectSSH) Run(state multistep.StateBag) multistep.StepAction { - ui := state.Get("ui").(packer.Ui) - - var comm packer.Communicator - var err error - - cancel := make(chan struct{}) - waitDone := make(chan bool, 1) - go func() { - ui.Say("Waiting for SSH to become available...") - comm, err = s.waitForSSH(state, cancel) - waitDone <- true - }() - - log.Printf("Waiting for SSH, up to timeout: %s", s.SSHWaitTimeout) - timeout := time.After(s.SSHWaitTimeout) -WaitLoop: - for { - // Wait for either SSH to become available, a timeout to occur, - // or an interrupt to come through. - select { - case <-waitDone: - if err != nil { - ui.Error(fmt.Sprintf("Error waiting for SSH: %s", err)) - state.Put("error", err) - return multistep.ActionHalt - } - - ui.Say("Connected to SSH!") - s.comm = comm - state.Put("communicator", comm) - break WaitLoop - case <-timeout: - err := fmt.Errorf("Timeout waiting for SSH.") - state.Put("error", err) - ui.Error(err.Error()) - close(cancel) - return multistep.ActionHalt - case <-time.After(1 * time.Second): - if _, ok := state.GetOk(multistep.StateCancelled); ok { - // The step sequence was cancelled, so cancel waiting for SSH - // and just start the halting process. - close(cancel) - log.Println("Interrupt detected, quitting waiting for SSH.") - return multistep.ActionHalt - } - } - } - - return multistep.ActionContinue -} - -func (s *StepConnectSSH) Cleanup(multistep.StateBag) { -} - -func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan struct{}) (packer.Communicator, error) { - handshakeAttempts := 0 - - var comm packer.Communicator - first := true - for { - // Don't check for cancel or wait on first iteration - if !first { - select { - case <-cancel: - log.Println("SSH wait cancelled. Exiting loop.") - return nil, errors.New("SSH wait cancelled") - case <-time.After(5 * time.Second): - } - } - first = false - - // First we request the TCP connection information - address, err := s.SSHAddress(state) - if err != nil { - log.Printf("Error getting SSH address: %s", err) - continue - } - - // Retrieve the SSH configuration - sshConfig, err := s.SSHConfig(state) - if err != nil { - log.Printf("Error getting SSH config: %s", err) - continue - } - - // Attempt to connect to SSH port - connFunc := ssh.ConnectFunc("tcp", address) - nc, err := connFunc() - if err != nil { - log.Printf("TCP connection to SSH ip/port failed: %s", err) - continue - } - nc.Close() - - // Then we attempt to connect via SSH - config := &ssh.Config{ - Connection: connFunc, - SSHConfig: sshConfig, - Pty: s.Pty, - } - - log.Println("Attempting SSH connection...") - comm, err = ssh.New(address, config) - if err != nil { - log.Printf("SSH handshake err: %s", err) - - // Only count this as an attempt if we were able to attempt - // to authenticate. Note this is very brittle since it depends - // on the string of the error... but I don't see any other way. - if strings.Contains(err.Error(), "authenticate") { - log.Printf("Detected authentication error. Increasing handshake attempts.") - handshakeAttempts += 1 - } - - if handshakeAttempts < 10 { - // Try to connect via SSH a handful of times - continue - } - - return nil, err - } - - break - } - - return comm, nil -} diff --git a/common/step_connect_ssh_test.go b/common/step_connect_ssh_test.go deleted file mode 100644 index 49b0e52b4..000000000 --- a/common/step_connect_ssh_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package common - -import ( - "github.com/mitchellh/multistep" - "testing" -) - -func TestStepConnectSSH_Impl(t *testing.T) { - var raw interface{} - raw = new(StepConnectSSH) - if _, ok := raw.(multistep.Step); !ok { - t.Fatalf("connect ssh should be a step") - } -} From e55792811967d9c90367a8f92d9b60b79497055c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 18:16:12 -0400 Subject: [PATCH 358/956] builder/amazon: use helper/communicator --- builder/amazon/common/run_config.go | 37 ++++-------------------- builder/amazon/common/run_config_test.go | 34 +++++++++------------- builder/amazon/ebs/builder.go | 16 ++++++---- builder/amazon/instance/builder.go | 16 ++++++---- 4 files changed, 38 insertions(+), 65 deletions(-) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index 0afdeb07c..6dec07b39 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -4,9 +4,9 @@ import ( "errors" "fmt" "os" - "time" "github.com/mitchellh/packer/common/uuid" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/template/interpolate" ) @@ -21,11 +21,6 @@ type RunConfig struct { SourceAmi string `mapstructure:"source_ami"` SpotPrice string `mapstructure:"spot_price"` SpotPriceAutoProduct string `mapstructure:"spot_price_auto_product"` - RawSSHTimeout string `mapstructure:"ssh_timeout"` - SSHUsername string `mapstructure:"ssh_username"` - SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file"` - SSHPrivateIp bool `mapstructure:"ssh_private_ip"` - SSHPort int `mapstructure:"ssh_port"` SecurityGroupId string `mapstructure:"security_group_id"` SecurityGroupIds []string `mapstructure:"security_group_ids"` SubnetId string `mapstructure:"subnet_id"` @@ -34,27 +29,19 @@ type RunConfig struct { UserDataFile string `mapstructure:"user_data_file"` VpcId string `mapstructure:"vpc_id"` - // Unexported fields that are calculated from others - sshTimeout time.Duration + // Communicator settings + Comm communicator.Config `mapstructure:",squash"` + SSHPrivateIp bool `mapstructure:"ssh_private_ip"` } func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { - // Defaults - if c.SSHPort == 0 { - c.SSHPort = 22 - } - - if c.RawSSHTimeout == "" { - c.RawSSHTimeout = "5m" - } - if c.TemporaryKeyPairName == "" { c.TemporaryKeyPairName = fmt.Sprintf( "packer %s", uuid.TimeOrderedUUID()) } // Validation - var errs []error + errs := c.Comm.Prepare(ctx) if c.SourceAmi == "" { errs = append(errs, errors.New("A source_ami must be specified")) } @@ -70,10 +57,6 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { } } - if c.SSHUsername == "" { - errs = append(errs, errors.New("An ssh_username must be specified")) - } - if c.UserData != "" && c.UserDataFile != "" { errs = append(errs, fmt.Errorf("Only one of user_data or user_data_file can be specified.")) } else if c.UserDataFile != "" { @@ -91,15 +74,5 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { } } - var err error - c.sshTimeout, err = time.ParseDuration(c.RawSSHTimeout) - if err != nil { - errs = append(errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) - } - return errs } - -func (c *RunConfig) SSHTimeout() time.Duration { - return c.sshTimeout -} diff --git a/builder/amazon/common/run_config_test.go b/builder/amazon/common/run_config_test.go index 8e9c4b6b9..0b029169b 100644 --- a/builder/amazon/common/run_config_test.go +++ b/builder/amazon/common/run_config_test.go @@ -4,6 +4,8 @@ import ( "io/ioutil" "os" "testing" + + "github.com/mitchellh/packer/helper/communicator" ) func init() { @@ -19,7 +21,10 @@ func testConfig() *RunConfig { return &RunConfig{ SourceAmi: "abcd", InstanceType: "m1.small", - SSHUsername: "root", + + Comm: communicator.Config{ + SSHUsername: "foo", + }, } } @@ -62,41 +67,28 @@ func TestRunConfigPrepare_SpotAuto(t *testing.T) { func TestRunConfigPrepare_SSHPort(t *testing.T) { c := testConfig() - c.SSHPort = 0 + c.Comm.SSHPort = 0 if err := c.Prepare(nil); len(err) != 0 { t.Fatalf("err: %s", err) } - if c.SSHPort != 22 { - t.Fatalf("invalid value: %d", c.SSHPort) + if c.Comm.SSHPort != 22 { + t.Fatalf("invalid value: %d", c.Comm.SSHPort) } - c.SSHPort = 44 + c.Comm.SSHPort = 44 if err := c.Prepare(nil); len(err) != 0 { t.Fatalf("err: %s", err) } - if c.SSHPort != 44 { - t.Fatalf("invalid value: %d", c.SSHPort) - } -} - -func TestRunConfigPrepare_SSHTimeout(t *testing.T) { - c := testConfig() - c.RawSSHTimeout = "" - if err := c.Prepare(nil); len(err) != 0 { - t.Fatalf("err: %s", err) - } - - c.RawSSHTimeout = "bad" - if err := c.Prepare(nil); len(err) != 1 { - t.Fatalf("err: %s", err) + if c.Comm.SSHPort != 44 { + t.Fatalf("invalid value: %d", c.Comm.SSHPort) } } func TestRunConfigPrepare_SSHUsername(t *testing.T) { c := testConfig() - c.SSHUsername = "" + c.Comm.SSHUsername = "" if err := c.Prepare(nil); len(err) != 1 { t.Fatalf("err: %s", err) } diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index c689accee..a9adcf208 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -13,6 +13,7 @@ import ( "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" @@ -89,11 +90,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), KeyPairName: b.config.TemporaryKeyPairName, - PrivateKeyFile: b.config.SSHPrivateKeyFile, + PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, }, &awscommon.StepSecurityGroup{ SecurityGroupIds: b.config.SecurityGroupIds, - SSHPort: b.config.SSHPort, + SSHPort: b.config.RunConfig.Comm.SSHPort, VpcId: b.config.VpcId, }, &awscommon.StepRunSourceInstance{ @@ -112,11 +113,14 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe BlockDevices: b.config.BlockDevices, Tags: b.config.RunTags, }, - &common.StepConnectSSH{ + &communicator.StepConnect{ + Config: &b.config.RunConfig.Comm, SSHAddress: awscommon.SSHAddress( - ec2conn, b.config.SSHPort, b.config.SSHPrivateIp), - SSHConfig: awscommon.SSHConfig(b.config.SSHUsername), - SSHWaitTimeout: b.config.SSHTimeout(), + ec2conn, + b.config.RunConfig.Comm.SSHPort, + b.config.SSHPrivateIp), + SSHConfig: awscommon.SSHConfig( + b.config.RunConfig.Comm.SSHUsername), }, &common.StepProvision{}, &stepStopInstance{SpotPrice: b.config.SpotPrice}, diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 91355b913..09bda686a 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -13,6 +13,7 @@ import ( "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" @@ -175,11 +176,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), KeyPairName: b.config.TemporaryKeyPairName, - PrivateKeyFile: b.config.SSHPrivateKeyFile, + PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, }, &awscommon.StepSecurityGroup{ SecurityGroupIds: b.config.SecurityGroupIds, - SSHPort: b.config.SSHPort, + SSHPort: b.config.RunConfig.Comm.SSHPort, VpcId: b.config.VpcId, }, &awscommon.StepRunSourceInstance{ @@ -197,11 +198,14 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe BlockDevices: b.config.BlockDevices, Tags: b.config.RunTags, }, - &common.StepConnectSSH{ + &communicator.StepConnect{ + Config: &b.config.RunConfig.Comm, SSHAddress: awscommon.SSHAddress( - ec2conn, b.config.SSHPort, b.config.SSHPrivateIp), - SSHConfig: awscommon.SSHConfig(b.config.SSHUsername), - SSHWaitTimeout: b.config.SSHTimeout(), + ec2conn, + b.config.RunConfig.Comm.SSHPort, + b.config.SSHPrivateIp), + SSHConfig: awscommon.SSHConfig( + b.config.RunConfig.Comm.SSHUsername), }, &common.StepProvision{}, &StepUploadX509Cert{}, From 669f3018813886cdc64cfdbd9a2431d58c7555f6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 18:26:13 -0400 Subject: [PATCH 359/956] builder/digitalocean: use helper/comm --- builder/digitalocean/builder.go | 10 ++-- builder/digitalocean/builder_test.go | 54 +++------------------- builder/digitalocean/config.go | 56 ++++++----------------- builder/digitalocean/ssh.go | 4 +- builder/digitalocean/step_droplet_info.go | 2 +- builder/digitalocean/step_power_off.go | 2 +- builder/digitalocean/step_snapshot.go | 2 +- 7 files changed, 31 insertions(+), 99 deletions(-) diff --git a/builder/digitalocean/builder.go b/builder/digitalocean/builder.go index 97569d0fe..d5f1b7a83 100644 --- a/builder/digitalocean/builder.go +++ b/builder/digitalocean/builder.go @@ -6,11 +6,11 @@ package digitalocean import ( "fmt" "log" - "time" "github.com/digitalocean/godo" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/packer" "golang.org/x/oauth2" ) @@ -53,10 +53,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, new(stepCreateDroplet), new(stepDropletInfo), - &common.StepConnectSSH{ - SSHAddress: sshAddress, - SSHConfig: sshConfig, - SSHWaitTimeout: 5 * time.Minute, + &communicator.StepConnect{ + Config: &b.config.Comm, + SSHAddress: sshAddress, + SSHConfig: sshConfig, }, new(common.StepProvision), new(stepShutdown), diff --git a/builder/digitalocean/builder_test.go b/builder/digitalocean/builder_test.go index 22c9e3b50..5e3014937 100644 --- a/builder/digitalocean/builder_test.go +++ b/builder/digitalocean/builder_test.go @@ -3,6 +3,7 @@ package digitalocean import ( "strconv" "testing" + "time" "github.com/mitchellh/packer/packer" ) @@ -163,8 +164,8 @@ func TestBuilderPrepare_SSHUsername(t *testing.T) { t.Fatalf("should not have error: %s", err) } - if b.config.SSHUsername != "root" { - t.Errorf("invalid: %s", b.config.SSHUsername) + if b.config.Comm.SSHUsername != "root" { + t.Errorf("invalid: %s", b.config.Comm.SSHUsername) } // Test set @@ -178,52 +179,11 @@ func TestBuilderPrepare_SSHUsername(t *testing.T) { t.Fatalf("should not have error: %s", err) } - if b.config.SSHUsername != "foo" { - t.Errorf("invalid: %s", b.config.SSHUsername) + if b.config.Comm.SSHUsername != "foo" { + t.Errorf("invalid: %s", b.config.Comm.SSHUsername) } } -func TestBuilderPrepare_SSHTimeout(t *testing.T) { - var b Builder - config := testConfig() - - // Test default - warnings, err := b.Prepare(config) - if len(warnings) > 0 { - t.Fatalf("bad: %#v", warnings) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.RawSSHTimeout != "1m" { - t.Errorf("invalid: %s", b.config.RawSSHTimeout) - } - - // Test set - config["ssh_timeout"] = "30s" - b = Builder{} - warnings, err = b.Prepare(config) - if len(warnings) > 0 { - t.Fatalf("bad: %#v", warnings) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - // Test bad - config["ssh_timeout"] = "tubes" - b = Builder{} - warnings, err = b.Prepare(config) - if len(warnings) > 0 { - t.Fatalf("bad: %#v", warnings) - } - if err == nil { - t.Fatal("should have error") - } - -} - func TestBuilderPrepare_StateTimeout(t *testing.T) { var b Builder config := testConfig() @@ -237,8 +197,8 @@ func TestBuilderPrepare_StateTimeout(t *testing.T) { t.Fatalf("should not have error: %s", err) } - if b.config.RawStateTimeout != "6m" { - t.Errorf("invalid: %s", b.config.RawStateTimeout) + if b.config.StateTimeout != 6*time.Minute { + t.Errorf("invalid: %s", b.config.StateTimeout) } // Test set diff --git a/builder/digitalocean/config.go b/builder/digitalocean/config.go index 178b54049..3bddf2f7e 100644 --- a/builder/digitalocean/config.go +++ b/builder/digitalocean/config.go @@ -9,6 +9,7 @@ import ( "github.com/mitchellh/mapstructure" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/common/uuid" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" @@ -16,6 +17,7 @@ import ( type Config struct { common.PackerConfig `mapstructure:",squash"` + Comm communicator.Config `mapstructure:",squash"` APIToken string `mapstructure:"api_token"` @@ -23,20 +25,11 @@ type Config struct { Size string `mapstructure:"size"` Image string `mapstructure:"image"` - PrivateNetworking bool `mapstructure:"private_networking"` - SnapshotName string `mapstructure:"snapshot_name"` - DropletName string `mapstructure:"droplet_name"` - UserData string `mapstructure:"user_data"` - SSHUsername string `mapstructure:"ssh_username"` - SSHPort uint `mapstructure:"ssh_port"` - - RawSSHTimeout string `mapstructure:"ssh_timeout"` - RawStateTimeout string `mapstructure:"state_timeout"` - - // These are unexported since they're set by other fields - // being set. - sshTimeout time.Duration - stateTimeout time.Duration + PrivateNetworking bool `mapstructure:"private_networking"` + SnapshotName string `mapstructure:"snapshot_name"` + StateTimeout time.Duration `mapstructure:"state_timeout"` + DropletName string `mapstructure:"droplet_name"` + UserData string `mapstructure:"user_data"` ctx *interpolate.Context } @@ -79,29 +72,22 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { c.DropletName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) } - if c.SSHUsername == "" { + if c.Comm.SSHUsername == "" { // Default to "root". You can override this if your // SourceImage has a different user account then the DO default - c.SSHUsername = "root" + c.Comm.SSHUsername = "root" } - if c.SSHPort == 0 { - // Default to port 22 per DO default - c.SSHPort = 22 - } - - if c.RawSSHTimeout == "" { - // Default to 1 minute timeouts - c.RawSSHTimeout = "1m" - } - - if c.RawStateTimeout == "" { + if c.StateTimeout == 0 { // Default to 6 minute timeouts waiting for // desired state. i.e waiting for droplet to become active - c.RawStateTimeout = "6m" + c.StateTimeout = 6 * time.Minute } var errs *packer.MultiError + if es := c.Comm.Prepare(c.ctx); len(es) > 0 { + errs = packer.MultiErrorAppend(errs, es...) + } if c.APIToken == "" { // Required configurations that will display errors if not set errs = packer.MultiErrorAppend( @@ -123,20 +109,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { errs, errors.New("image is required")) } - sshTimeout, err := time.ParseDuration(c.RawSSHTimeout) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) - } - c.sshTimeout = sshTimeout - - stateTimeout, err := time.ParseDuration(c.RawStateTimeout) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Failed parsing state_timeout: %s", err)) - } - c.stateTimeout = stateTimeout - if errs != nil && len(errs.Errors) > 0 { return nil, nil, errs } diff --git a/builder/digitalocean/ssh.go b/builder/digitalocean/ssh.go index 12046b04b..4d8d1f08c 100644 --- a/builder/digitalocean/ssh.go +++ b/builder/digitalocean/ssh.go @@ -9,7 +9,7 @@ import ( func sshAddress(state multistep.StateBag) (string, error) { config := state.Get("config").(Config) ipAddress := state.Get("droplet_ip").(string) - return fmt.Sprintf("%s:%d", ipAddress, config.SSHPort), nil + return fmt.Sprintf("%s:%d", ipAddress, config.Comm.SSHPort), nil } func sshConfig(state multistep.StateBag) (*ssh.ClientConfig, error) { @@ -22,7 +22,7 @@ func sshConfig(state multistep.StateBag) (*ssh.ClientConfig, error) { } return &ssh.ClientConfig{ - User: config.SSHUsername, + User: config.Comm.SSHUsername, Auth: []ssh.AuthMethod{ ssh.PublicKeys(signer), }, diff --git a/builder/digitalocean/step_droplet_info.go b/builder/digitalocean/step_droplet_info.go index 5fbcb7141..81d84dc8d 100644 --- a/builder/digitalocean/step_droplet_info.go +++ b/builder/digitalocean/step_droplet_info.go @@ -18,7 +18,7 @@ func (s *stepDropletInfo) Run(state multistep.StateBag) multistep.StepAction { ui.Say("Waiting for droplet to become active...") - err := waitForDropletState("active", dropletId, client, c.stateTimeout) + err := waitForDropletState("active", dropletId, client, c.StateTimeout) if err != nil { err := fmt.Errorf("Error waiting for droplet to become active: %s", err) state.Put("error", err) diff --git a/builder/digitalocean/step_power_off.go b/builder/digitalocean/step_power_off.go index 94891e227..8341083aa 100644 --- a/builder/digitalocean/step_power_off.go +++ b/builder/digitalocean/step_power_off.go @@ -42,7 +42,7 @@ func (s *stepPowerOff) Run(state multistep.StateBag) multistep.StepAction { } log.Println("Waiting for poweroff event to complete...") - err = waitForDropletState("off", dropletId, client, c.stateTimeout) + err = waitForDropletState("off", dropletId, client, c.StateTimeout) if err != nil { state.Put("error", err) ui.Error(err.Error()) diff --git a/builder/digitalocean/step_snapshot.go b/builder/digitalocean/step_snapshot.go index f6902b8c5..fdc303105 100644 --- a/builder/digitalocean/step_snapshot.go +++ b/builder/digitalocean/step_snapshot.go @@ -41,7 +41,7 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { // With the pending state over, verify that we're in the active state ui.Say("Waiting for snapshot to complete...") - err = waitForDropletState("active", dropletId, client, c.stateTimeout) + err = waitForDropletState("active", dropletId, client, c.StateTimeout) if err != nil { err := fmt.Errorf("Error waiting for snapshot to complete: %s", err) state.Put("error", err) From 502076c92e6f9abb15e77d6170cfa86105b0b495 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 18:30:16 -0400 Subject: [PATCH 360/956] builder/googlecompute: use helper/comm --- builder/googlecompute/builder.go | 9 ++++--- builder/googlecompute/config.go | 26 +++---------------- builder/googlecompute/ssh.go | 4 +-- builder/googlecompute/step_create_instance.go | 2 +- 4 files changed, 12 insertions(+), 29 deletions(-) diff --git a/builder/googlecompute/builder.go b/builder/googlecompute/builder.go index 8e35598d2..3ed576b50 100644 --- a/builder/googlecompute/builder.go +++ b/builder/googlecompute/builder.go @@ -8,6 +8,7 @@ import ( "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/packer" ) @@ -60,10 +61,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &StepInstanceInfo{ Debug: b.config.PackerDebug, }, - &common.StepConnectSSH{ - SSHAddress: sshAddress, - SSHConfig: sshConfig, - SSHWaitTimeout: b.config.sshTimeout, + &communicator.StepConnect{ + Config: &b.config.Comm, + SSHAddress: sshAddress, + SSHConfig: sshConfig, }, new(common.StepProvision), new(StepTeardownInstance), diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go index 69d223a75..7f59aa183 100644 --- a/builder/googlecompute/config.go +++ b/builder/googlecompute/config.go @@ -7,6 +7,7 @@ import ( "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/common/uuid" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" @@ -17,6 +18,7 @@ import ( // state of the config object. type Config struct { common.PackerConfig `mapstructure:",squash"` + Comm communicator.Config `mapstructure:",squash"` AccountFile string `mapstructure:"account_file"` ProjectId string `mapstructure:"project_id"` @@ -31,16 +33,12 @@ type Config struct { Network string `mapstructure:"network"` SourceImage string `mapstructure:"source_image"` SourceImageProjectId string `mapstructure:"source_image_project_id"` - SSHUsername string `mapstructure:"ssh_username"` - SSHPort uint `mapstructure:"ssh_port"` - RawSSHTimeout string `mapstructure:"ssh_timeout"` RawStateTimeout string `mapstructure:"state_timeout"` Tags []string `mapstructure:"tags"` Zone string `mapstructure:"zone"` account accountFile privateKeyBytes []byte - sshTimeout time.Duration stateTimeout time.Duration ctx *interpolate.Context } @@ -88,20 +86,12 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { c.MachineType = "n1-standard-1" } - if c.RawSSHTimeout == "" { - c.RawSSHTimeout = "5m" - } - if c.RawStateTimeout == "" { c.RawStateTimeout = "5m" } - if c.SSHUsername == "" { - c.SSHUsername = "root" - } - - if c.SSHPort == 0 { - c.SSHPort = 22 + if c.Comm.SSHUsername == "" { + c.Comm.SSHUsername = "root" } var errs *packer.MultiError @@ -122,14 +112,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { errs, errors.New("a zone must be specified")) } - // Process timeout settings. - sshTimeout, err := time.ParseDuration(c.RawSSHTimeout) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) - } - c.sshTimeout = sshTimeout - stateTimeout, err := time.ParseDuration(c.RawStateTimeout) if err != nil { errs = packer.MultiErrorAppend( diff --git a/builder/googlecompute/ssh.go b/builder/googlecompute/ssh.go index e04029e44..446648884 100644 --- a/builder/googlecompute/ssh.go +++ b/builder/googlecompute/ssh.go @@ -10,7 +10,7 @@ import ( func sshAddress(state multistep.StateBag) (string, error) { config := state.Get("config").(*Config) ipAddress := state.Get("instance_ip").(string) - return fmt.Sprintf("%s:%d", ipAddress, config.SSHPort), nil + return fmt.Sprintf("%s:%d", ipAddress, config.Comm.SSHPort), nil } // sshConfig returns the ssh configuration. @@ -24,7 +24,7 @@ func sshConfig(state multistep.StateBag) (*ssh.ClientConfig, error) { } return &ssh.ClientConfig{ - User: config.SSHUsername, + User: config.Comm.SSHUsername, Auth: []ssh.AuthMethod{ ssh.PublicKeys(signer), }, diff --git a/builder/googlecompute/step_create_instance.go b/builder/googlecompute/step_create_instance.go index 6bfee5460..939925c58 100644 --- a/builder/googlecompute/step_create_instance.go +++ b/builder/googlecompute/step_create_instance.go @@ -32,7 +32,7 @@ func (config *Config) getInstanceMetadata(sshPublicKey string) map[string]string // Merge any existing ssh keys with our public key sshMetaKey := "sshKeys" - sshKeys := fmt.Sprintf("%s:%s", config.SSHUsername, sshPublicKey) + sshKeys := fmt.Sprintf("%s:%s", config.Comm.SSHUsername, sshPublicKey) if confSshKeys, exists := instanceMetadata[sshMetaKey]; exists { sshKeys = fmt.Sprintf("%s\n%s", sshKeys, confSshKeys) } From f55e2d2c4b7d005191975c0e7b0256cd504d705e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 18:34:37 -0400 Subject: [PATCH 361/956] builder/openstack: convert to helper/comm --- builder/openstack/builder.go | 23 ++++++++++------ builder/openstack/run_config.go | 41 +++++----------------------- builder/openstack/run_config_test.go | 34 +++++++++-------------- 3 files changed, 34 insertions(+), 64 deletions(-) diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index ab60afc0e..6d178c6ef 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -5,10 +5,11 @@ package openstack import ( "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/common" "log" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" @@ -19,9 +20,10 @@ const BuilderId = "mitchellh.openstack" type Config struct { common.PackerConfig `mapstructure:",squash"` - AccessConfig `mapstructure:",squash"` - ImageConfig `mapstructure:",squash"` - RunConfig `mapstructure:",squash"` + + AccessConfig `mapstructure:",squash"` + ImageConfig `mapstructure:",squash"` + RunConfig `mapstructure:",squash"` ctx interpolate.Context } @@ -88,10 +90,13 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe FloatingIpPool: b.config.FloatingIpPool, FloatingIp: b.config.FloatingIp, }, - &common.StepConnectSSH{ - SSHAddress: SSHAddress(computeClient, b.config.SSHInterface, b.config.SSHPort), - SSHConfig: SSHConfig(b.config.SSHUsername), - SSHWaitTimeout: b.config.SSHTimeout(), + &communicator.StepConnect{ + Config: &b.config.RunConfig.Comm, + SSHAddress: SSHAddress( + computeClient, + b.config.SSHInterface, + b.config.RunConfig.Comm.SSHPort), + SSHConfig: SSHConfig(b.config.RunConfig.Comm.SSHUsername), }, &common.StepProvision{}, &stepCreateImage{}, diff --git a/builder/openstack/run_config.go b/builder/openstack/run_config.go index 4a6a1b81f..a758b0dd1 100644 --- a/builder/openstack/run_config.go +++ b/builder/openstack/run_config.go @@ -2,21 +2,19 @@ package openstack import ( "errors" - "fmt" - "time" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/template/interpolate" ) // RunConfig contains configuration for running an instance from a source // image and details on how to access that launched image. type RunConfig struct { + Comm communicator.Config `mapstructure:",squash"` + SSHInterface string `mapstructure:"ssh_interface"` + SourceImage string `mapstructure:"source_image"` Flavor string `mapstructure:"flavor"` - RawSSHTimeout string `mapstructure:"ssh_timeout"` - SSHUsername string `mapstructure:"ssh_username"` - SSHPort int `mapstructure:"ssh_port"` - SSHInterface string `mapstructure:"ssh_interface"` AvailabilityZone string `mapstructure:"availability_zone"` RackconnectWait bool `mapstructure:"rackconnect_wait"` FloatingIpPool string `mapstructure:"floating_ip_pool"` @@ -27,23 +25,12 @@ type RunConfig struct { // Not really used, but here for BC OpenstackProvider string `mapstructure:"openstack_provider"` UseFloatingIp bool `mapstructure:"use_floating_ip"` - - // Unexported fields that are calculated from others - sshTimeout time.Duration } func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { // Defaults - if c.SSHUsername == "" { - c.SSHUsername = "root" - } - - if c.SSHPort == 0 { - c.SSHPort = 22 - } - - if c.RawSSHTimeout == "" { - c.RawSSHTimeout = "5m" + if c.Comm.SSHUsername == "" { + c.Comm.SSHUsername = "root" } if c.UseFloatingIp && c.FloatingIpPool == "" { @@ -51,8 +38,7 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { } // Validation - var err error - errs := make([]error, 0) + errs := c.Comm.Prepare(ctx) if c.SourceImage == "" { errs = append(errs, errors.New("A source_image must be specified")) } @@ -61,18 +47,5 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { errs = append(errs, errors.New("A flavor must be specified")) } - if c.SSHUsername == "" { - errs = append(errs, errors.New("An ssh_username must be specified")) - } - - c.sshTimeout, err = time.ParseDuration(c.RawSSHTimeout) - if err != nil { - errs = append(errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) - } - return errs } - -func (c *RunConfig) SSHTimeout() time.Duration { - return c.sshTimeout -} diff --git a/builder/openstack/run_config_test.go b/builder/openstack/run_config_test.go index 16b89b352..113934a29 100644 --- a/builder/openstack/run_config_test.go +++ b/builder/openstack/run_config_test.go @@ -3,6 +3,8 @@ package openstack import ( "os" "testing" + + "github.com/mitchellh/packer/helper/communicator" ) func init() { @@ -17,7 +19,10 @@ func testRunConfig() *RunConfig { return &RunConfig{ SourceImage: "abcd", Flavor: "m1.small", - SSHUsername: "root", + + Comm: communicator.Config{ + SSHUsername: "foo", + }, } } @@ -47,41 +52,28 @@ func TestRunConfigPrepare_SourceImage(t *testing.T) { func TestRunConfigPrepare_SSHPort(t *testing.T) { c := testRunConfig() - c.SSHPort = 0 + c.Comm.SSHPort = 0 if err := c.Prepare(nil); len(err) != 0 { t.Fatalf("err: %s", err) } - if c.SSHPort != 22 { - t.Fatalf("invalid value: %d", c.SSHPort) + if c.Comm.SSHPort != 22 { + t.Fatalf("invalid value: %d", c.Comm.SSHPort) } - c.SSHPort = 44 + c.Comm.SSHPort = 44 if err := c.Prepare(nil); len(err) != 0 { t.Fatalf("err: %s", err) } - if c.SSHPort != 44 { - t.Fatalf("invalid value: %d", c.SSHPort) - } -} - -func TestRunConfigPrepare_SSHTimeout(t *testing.T) { - c := testRunConfig() - c.RawSSHTimeout = "" - if err := c.Prepare(nil); len(err) != 0 { - t.Fatalf("err: %s", err) - } - - c.RawSSHTimeout = "bad" - if err := c.Prepare(nil); len(err) != 1 { - t.Fatalf("err: %s", err) + if c.Comm.SSHPort != 44 { + t.Fatalf("invalid value: %d", c.Comm.SSHPort) } } func TestRunConfigPrepare_SSHUsername(t *testing.T) { c := testRunConfig() - c.SSHUsername = "" + c.Comm.SSHUsername = "" if err := c.Prepare(nil); len(err) != 0 { t.Fatalf("err: %s", err) } From 820bad69390687185970d9b32f6005320da62b09 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 18:43:27 -0400 Subject: [PATCH 362/956] builder/parallels: convert to new comm type --- builder/parallels/common/ssh.go | 8 ++-- builder/parallels/common/ssh_config.go | 47 ++++++--------------- builder/parallels/common/ssh_config_test.go | 46 ++++---------------- builder/parallels/iso/builder.go | 9 ++-- builder/parallels/pvm/builder.go | 12 +++--- 5 files changed, 37 insertions(+), 85 deletions(-) diff --git a/builder/parallels/common/ssh.go b/builder/parallels/common/ssh.go index becf68e42..827124677 100644 --- a/builder/parallels/common/ssh.go +++ b/builder/parallels/common/ssh.go @@ -29,13 +29,13 @@ func SSHAddress(state multistep.StateBag) (string, error) { func SSHConfigFunc(config SSHConfig) func(multistep.StateBag) (*ssh.ClientConfig, error) { return func(state multistep.StateBag) (*ssh.ClientConfig, error) { auth := []ssh.AuthMethod{ - ssh.Password(config.SSHPassword), + ssh.Password(config.Comm.SSHPassword), ssh.KeyboardInteractive( - packerssh.PasswordKeyboardInteractive(config.SSHPassword)), + packerssh.PasswordKeyboardInteractive(config.Comm.SSHPassword)), } if config.SSHKeyPath != "" { - signer, err := commonssh.FileSigner(config.SSHKeyPath) + signer, err := commonssh.FileSigner(config.Comm.SSHPrivateKey) if err != nil { return nil, err } @@ -44,7 +44,7 @@ func SSHConfigFunc(config SSHConfig) func(multistep.StateBag) (*ssh.ClientConfig } return &ssh.ClientConfig{ - User: config.SSHUser, + User: config.Comm.SSHUsername, Auth: auth, }, nil } diff --git a/builder/parallels/common/ssh_config.go b/builder/parallels/common/ssh_config.go index 9f1a9506f..bea164b06 100644 --- a/builder/parallels/common/ssh_config.go +++ b/builder/parallels/common/ssh_config.go @@ -1,52 +1,29 @@ package common import ( - "errors" - "fmt" - "os" "time" - commonssh "github.com/mitchellh/packer/common/ssh" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/template/interpolate" ) type SSHConfig struct { - SSHKeyPath string `mapstructure:"ssh_key_path"` - SSHPassword string `mapstructure:"ssh_password"` - SSHPort uint `mapstructure:"ssh_port"` - SSHUser string `mapstructure:"ssh_username"` - RawSSHWaitTimeout string `mapstructure:"ssh_wait_timeout"` + Comm communicator.Config `mapstructure:",squash"` - SSHWaitTimeout time.Duration + // These are deprecated, but we keep them around for BC + // TODO(@mitchellh): remove + SSHKeyPath string `mapstructure:"ssh_key_path"` + SSHWaitTimeout time.Duration `mapstructure:"ssh_wait_timeout"` } func (c *SSHConfig) Prepare(ctx *interpolate.Context) []error { - if c.SSHPort == 0 { - c.SSHPort = 22 - } - - if c.RawSSHWaitTimeout == "" { - c.RawSSHWaitTimeout = "20m" - } - - var errs []error + // TODO: backwards compatibility, write fixer instead if c.SSHKeyPath != "" { - if _, err := os.Stat(c.SSHKeyPath); err != nil { - errs = append(errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) - } else if _, err := commonssh.FileSigner(c.SSHKeyPath); err != nil { - errs = append(errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) - } + c.Comm.SSHPrivateKey = c.SSHKeyPath + } + if c.SSHWaitTimeout != 0 { + c.Comm.SSHTimeout = c.SSHWaitTimeout } - if c.SSHUser == "" { - errs = append(errs, errors.New("An ssh_username must be specified.")) - } - - var err error - c.SSHWaitTimeout, err = time.ParseDuration(c.RawSSHWaitTimeout) - if err != nil { - errs = append(errs, fmt.Errorf("Failed parsing ssh_wait_timeout: %s", err)) - } - - return errs + return c.Comm.Prepare(ctx) } diff --git a/builder/parallels/common/ssh_config_test.go b/builder/parallels/common/ssh_config_test.go index a6c9e8ef5..01dd1ff62 100644 --- a/builder/parallels/common/ssh_config_test.go +++ b/builder/parallels/common/ssh_config_test.go @@ -4,11 +4,15 @@ import ( "io/ioutil" "os" "testing" + + "github.com/mitchellh/packer/helper/communicator" ) func testSSHConfig() *SSHConfig { return &SSHConfig{ - SSHUser: "foo", + Comm: communicator.Config{ + SSHUsername: "foo", + }, } } @@ -19,8 +23,8 @@ func TestSSHConfigPrepare(t *testing.T) { t.Fatalf("err: %#v", errs) } - if c.SSHPort != 22 { - t.Errorf("bad ssh port: %d", c.SSHPort) + if c.Comm.SSHPort != 22 { + t.Errorf("bad ssh port: %d", c.Comm.SSHPort) } } @@ -78,46 +82,14 @@ func TestSSHConfigPrepare_SSHUser(t *testing.T) { var errs []error c = testSSHConfig() - c.SSHUser = "" + c.Comm.SSHUsername = "" errs = c.Prepare(testConfigTemplate(t)) if len(errs) == 0 { t.Fatalf("should have error") } c = testSSHConfig() - c.SSHUser = "exists" - errs = c.Prepare(testConfigTemplate(t)) - if len(errs) > 0 { - t.Fatalf("should not have error: %#v", errs) - } -} - -func TestSSHConfigPrepare_SSHWaitTimeout(t *testing.T) { - var c *SSHConfig - var errs []error - - // Defaults - c = testSSHConfig() - c.RawSSHWaitTimeout = "" - errs = c.Prepare(testConfigTemplate(t)) - if len(errs) > 0 { - t.Fatalf("should not have error: %#v", errs) - } - if c.RawSSHWaitTimeout != "20m" { - t.Fatalf("bad value: %s", c.RawSSHWaitTimeout) - } - - // Test with a bad value - c = testSSHConfig() - c.RawSSHWaitTimeout = "this is not good" - errs = c.Prepare(testConfigTemplate(t)) - if len(errs) == 0 { - t.Fatal("should have error") - } - - // Test with a good one - c = testSSHConfig() - c.RawSSHWaitTimeout = "5s" + c.Comm.SSHUsername = "exists" errs = c.Prepare(testConfigTemplate(t)) if len(errs) > 0 { t.Fatalf("should not have error: %#v", errs) diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index 24ef3c726..9758e6bec 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -9,6 +9,7 @@ import ( "github.com/mitchellh/multistep" parallelscommon "github.com/mitchellh/packer/builder/parallels/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" @@ -245,10 +246,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe VMName: b.config.VMName, Ctx: b.config.ctx, }, - &common.StepConnectSSH{ - SSHAddress: parallelscommon.SSHAddress, - SSHConfig: parallelscommon.SSHConfigFunc(b.config.SSHConfig), - SSHWaitTimeout: b.config.SSHWaitTimeout, + &communicator.StepConnect{ + Config: &b.config.SSHConfig.Comm, + SSHAddress: parallelscommon.SSHAddress, + SSHConfig: parallelscommon.SSHConfigFunc(b.config.SSHConfig), }, ¶llelscommon.StepUploadVersion{ Path: b.config.PrlctlVersionFile, diff --git a/builder/parallels/pvm/builder.go b/builder/parallels/pvm/builder.go index a0c7ca9cd..1ddd64202 100644 --- a/builder/parallels/pvm/builder.go +++ b/builder/parallels/pvm/builder.go @@ -3,11 +3,13 @@ package pvm import ( "errors" "fmt" + "log" + "github.com/mitchellh/multistep" parallelscommon "github.com/mitchellh/packer/builder/parallels/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/packer" - "log" ) // Builder implements packer.Builder and builds the actual Parallels @@ -80,10 +82,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe VMName: b.config.VMName, Ctx: b.config.ctx, }, - &common.StepConnectSSH{ - SSHAddress: parallelscommon.SSHAddress, - SSHConfig: parallelscommon.SSHConfigFunc(b.config.SSHConfig), - SSHWaitTimeout: b.config.SSHWaitTimeout, + &communicator.StepConnect{ + Config: &b.config.SSHConfig.Comm, + SSHAddress: parallelscommon.SSHAddress, + SSHConfig: parallelscommon.SSHConfigFunc(b.config.SSHConfig), }, ¶llelscommon.StepUploadVersion{ Path: b.config.PrlctlVersionFile, From 89af447c8cfd34dcabaf760bf35ac54239bf1731 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 18:47:59 -0400 Subject: [PATCH 363/956] builder/qemu: convert to helper/comm --- builder/qemu/builder.go | 69 ++++++++++++++---------------------- builder/qemu/builder_test.go | 8 ++--- builder/qemu/ssh.go | 10 +++--- 3 files changed, 33 insertions(+), 54 deletions(-) diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index cb0ac6c88..2a1a2fe1c 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -12,7 +12,7 @@ import ( "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common" - commonssh "github.com/mitchellh/packer/common/ssh" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" @@ -78,6 +78,7 @@ type Builder struct { type Config struct { common.PackerConfig `mapstructure:",squash"` + Comm communicator.Config `mapstructure:",squash"` Accelerator string `mapstructure:"accelerator"` BootCommand []string `mapstructure:"boot_command"` @@ -103,25 +104,24 @@ type Config struct { ShutdownCommand string `mapstructure:"shutdown_command"` SSHHostPortMin uint `mapstructure:"ssh_host_port_min"` SSHHostPortMax uint `mapstructure:"ssh_host_port_max"` - SSHPassword string `mapstructure:"ssh_password"` - SSHPort uint `mapstructure:"ssh_port"` - SSHUser string `mapstructure:"ssh_username"` - SSHKeyPath string `mapstructure:"ssh_key_path"` VNCPortMin uint `mapstructure:"vnc_port_min"` VNCPortMax uint `mapstructure:"vnc_port_max"` VMName string `mapstructure:"vm_name"` + // These are deprecated, but we keep them around for BC + // TODO(@mitchellh): remove + SSHKeyPath string `mapstructure:"ssh_key_path"` + SSHWaitTimeout time.Duration `mapstructure:"ssh_wait_timeout"` + // TODO(mitchellh): deprecate RunOnce bool `mapstructure:"run_once"` RawBootWait string `mapstructure:"boot_wait"` RawSingleISOUrl string `mapstructure:"iso_url"` RawShutdownTimeout string `mapstructure:"shutdown_timeout"` - RawSSHWaitTimeout string `mapstructure:"ssh_wait_timeout"` bootWait time.Duration `` shutdownTimeout time.Duration `` - sshWaitTimeout time.Duration `` ctx interpolate.Context } @@ -139,9 +139,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { return nil, err } - var errs *packer.MultiError - warnings := make([]string, 0) - if b.config.DiskSize == 0 { b.config.DiskSize = 40000 } @@ -190,10 +187,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.SSHHostPortMax = 4444 } - if b.config.SSHPort == 0 { - b.config.SSHPort = 22 - } - if b.config.VNCPortMin == 0 { b.config.VNCPortMin = 5900 } @@ -222,6 +215,21 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.DiskInterface = "virtio" } + // TODO: backwards compatibility, write fixer instead + if b.config.SSHKeyPath != "" { + b.config.Comm.SSHPrivateKey = b.config.SSHKeyPath + } + if b.config.SSHWaitTimeout != 0 { + b.config.Comm.SSHTimeout = b.config.SSHWaitTimeout + } + + var errs *packer.MultiError + warnings := make([]string, 0) + + if es := b.config.Comm.Prepare(&b.config.ctx); len(es) > 0 { + errs = packer.MultiErrorAppend(errs, es...) + } + if !(b.config.Format == "qcow2" || b.config.Format == "raw") { errs = packer.MultiErrorAppend( errs, errors.New("invalid format, only 'qcow2' or 'raw' are allowed")) @@ -314,42 +322,17 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.RawShutdownTimeout = "5m" } - if b.config.RawSSHWaitTimeout == "" { - b.config.RawSSHWaitTimeout = "20m" - } - b.config.shutdownTimeout, err = time.ParseDuration(b.config.RawShutdownTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing shutdown_timeout: %s", err)) } - if b.config.SSHKeyPath != "" { - if _, err := os.Stat(b.config.SSHKeyPath); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) - } else if _, err := commonssh.FileSigner(b.config.SSHKeyPath); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) - } - } - if b.config.SSHHostPortMin > b.config.SSHHostPortMax { errs = packer.MultiErrorAppend( errs, errors.New("ssh_host_port_min must be less than ssh_host_port_max")) } - if b.config.SSHUser == "" { - errs = packer.MultiErrorAppend( - errs, errors.New("An ssh_username must be specified.")) - } - - b.config.sshWaitTimeout, err = time.ParseDuration(b.config.RawSSHWaitTimeout) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Failed parsing ssh_wait_timeout: %s", err)) - } - if b.config.VNCPortMin > b.config.VNCPortMax { errs = packer.MultiErrorAppend( errs, fmt.Errorf("vnc_port_min must be less than vnc_port_max")) @@ -409,10 +392,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe steprun, &stepBootWait{}, &stepTypeBootCommand{}, - &common.StepConnectSSH{ - SSHAddress: sshAddress, - SSHConfig: sshConfig, - SSHWaitTimeout: b.config.sshWaitTimeout, + &communicator.StepConnect{ + Config: &b.config.Comm, + SSHAddress: sshAddress, + SSHConfig: sshConfig, }, new(common.StepProvision), new(stepShutdown), diff --git a/builder/qemu/builder_test.go b/builder/qemu/builder_test.go index e3415d514..84d1d40c3 100644 --- a/builder/qemu/builder_test.go +++ b/builder/qemu/builder_test.go @@ -79,8 +79,8 @@ func TestBuilderPrepare_Defaults(t *testing.T) { t.Errorf("bad max ssh host port: %d", b.config.SSHHostPortMax) } - if b.config.SSHPort != 22 { - t.Errorf("bad ssh port: %d", b.config.SSHPort) + if b.config.Comm.SSHPort != 22 { + t.Errorf("bad ssh port: %d", b.config.Comm.SSHPort) } if b.config.VMName != "packer-foo" { @@ -595,10 +595,6 @@ func TestBuilderPrepare_SSHWaitTimeout(t *testing.T) { t.Fatalf("err: %s", err) } - if b.config.RawSSHWaitTimeout != "20m" { - t.Fatalf("bad value: %s", b.config.RawSSHWaitTimeout) - } - // Test with a bad value config["ssh_wait_timeout"] = "this is not good" b = Builder{} diff --git a/builder/qemu/ssh.go b/builder/qemu/ssh.go index 9724d7483..17dc36de9 100644 --- a/builder/qemu/ssh.go +++ b/builder/qemu/ssh.go @@ -18,13 +18,13 @@ func sshConfig(state multistep.StateBag) (*gossh.ClientConfig, error) { config := state.Get("config").(*Config) auth := []gossh.AuthMethod{ - gossh.Password(config.SSHPassword), + gossh.Password(config.Comm.SSHPassword), gossh.KeyboardInteractive( - ssh.PasswordKeyboardInteractive(config.SSHPassword)), + ssh.PasswordKeyboardInteractive(config.Comm.SSHPassword)), } - if config.SSHKeyPath != "" { - signer, err := commonssh.FileSigner(config.SSHKeyPath) + if config.Comm.SSHPrivateKey != "" { + signer, err := commonssh.FileSigner(config.Comm.SSHPrivateKey) if err != nil { return nil, err } @@ -33,7 +33,7 @@ func sshConfig(state multistep.StateBag) (*gossh.ClientConfig, error) { } return &gossh.ClientConfig{ - User: config.SSHUser, + User: config.Comm.SSHUsername, Auth: auth, }, nil } From b61ed3adfcdcc34de3c08aea087e2b3acbb0c9c9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 18:52:44 -0400 Subject: [PATCH 364/956] builder/vmware: convert to helper/comm --- builder/vmware/common/ssh.go | 16 +++--- builder/vmware/common/ssh_config.go | 62 ++++++------------------ builder/vmware/common/ssh_config_test.go | 61 +++-------------------- builder/vmware/iso/builder.go | 10 ++-- builder/vmware/iso/builder_test.go | 8 +-- builder/vmware/iso/driver_esx5.go | 2 +- builder/vmware/vmx/builder.go | 10 ++-- 7 files changed, 44 insertions(+), 125 deletions(-) diff --git a/builder/vmware/common/ssh.go b/builder/vmware/common/ssh.go index 167bd6792..7181e90bf 100644 --- a/builder/vmware/common/ssh.go +++ b/builder/vmware/common/ssh.go @@ -18,8 +18,8 @@ func SSHAddressFunc(config *SSHConfig) func(multistep.StateBag) (string, error) driver := state.Get("driver").(Driver) vmxPath := state.Get("vmx_path").(string) - if config.SSHHost != "" { - return fmt.Sprintf("%s:%d", config.SSHHost, config.SSHPort), nil + if config.Comm.SSHHost != "" { + return fmt.Sprintf("%s:%d", config.Comm.SSHHost, config.Comm.SSHPort), nil } log.Println("Lookup up IP information...") @@ -62,20 +62,20 @@ func SSHAddressFunc(config *SSHConfig) func(multistep.StateBag) (string, error) } log.Printf("Detected IP: %s", ipAddress) - return fmt.Sprintf("%s:%d", ipAddress, config.SSHPort), nil + return fmt.Sprintf("%s:%d", ipAddress, config.Comm.SSHPort), nil } } func SSHConfigFunc(config *SSHConfig) func(multistep.StateBag) (*gossh.ClientConfig, error) { return func(state multistep.StateBag) (*gossh.ClientConfig, error) { auth := []gossh.AuthMethod{ - gossh.Password(config.SSHPassword), + gossh.Password(config.Comm.SSHPassword), gossh.KeyboardInteractive( - ssh.PasswordKeyboardInteractive(config.SSHPassword)), + ssh.PasswordKeyboardInteractive(config.Comm.SSHPassword)), } - if config.SSHKeyPath != "" { - signer, err := commonssh.FileSigner(config.SSHKeyPath) + if config.Comm.SSHPrivateKey != "" { + signer, err := commonssh.FileSigner(config.Comm.SSHPrivateKey) if err != nil { return nil, err } @@ -84,7 +84,7 @@ func SSHConfigFunc(config *SSHConfig) func(multistep.StateBag) (*gossh.ClientCon } return &gossh.ClientConfig{ - User: config.SSHUser, + User: config.Comm.SSHUsername, Auth: auth, }, nil } diff --git a/builder/vmware/common/ssh_config.go b/builder/vmware/common/ssh_config.go index 1bd481d92..86754f6e4 100644 --- a/builder/vmware/common/ssh_config.go +++ b/builder/vmware/common/ssh_config.go @@ -1,63 +1,33 @@ package common import ( - "errors" - "fmt" - "net" - "os" "time" - commonssh "github.com/mitchellh/packer/common/ssh" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/template/interpolate" ) type SSHConfig struct { - SSHUser string `mapstructure:"ssh_username"` - SSHKeyPath string `mapstructure:"ssh_key_path"` - SSHPassword string `mapstructure:"ssh_password"` - SSHHost string `mapstructure:"ssh_host"` - SSHPort uint `mapstructure:"ssh_port"` - SSHSkipRequestPty bool `mapstructure:"ssh_skip_request_pty"` - RawSSHWaitTimeout string `mapstructure:"ssh_wait_timeout"` + Comm communicator.Config `mapstructure:",squash"` - SSHWaitTimeout time.Duration + // These are deprecated, but we keep them around for BC + // TODO(@mitchellh): remove + SSHKeyPath string `mapstructure:"ssh_key_path"` + SSHSkipRequestPty bool `mapstructure:"ssh_skip_request_pty"` + SSHWaitTimeout time.Duration `mapstructure:"ssh_wait_timeout"` } func (c *SSHConfig) Prepare(ctx *interpolate.Context) []error { - if c.SSHPort == 0 { - c.SSHPort = 22 - } - - if c.RawSSHWaitTimeout == "" { - c.RawSSHWaitTimeout = "20m" - } - - var errs []error + // TODO: backwards compatibility, write fixer instead if c.SSHKeyPath != "" { - if _, err := os.Stat(c.SSHKeyPath); err != nil { - errs = append(errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) - } else if _, err := commonssh.FileSigner(c.SSHKeyPath); err != nil { - errs = append(errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) - } + c.Comm.SSHPrivateKey = c.SSHKeyPath + } + if c.SSHWaitTimeout != 0 { + c.Comm.SSHTimeout = c.SSHWaitTimeout + } + if c.SSHSkipRequestPty { + c.Comm.SSHPty = false } - if c.SSHHost != "" { - if ip := net.ParseIP(c.SSHHost); ip == nil { - if _, err := net.LookupHost(c.SSHHost); err != nil { - errs = append(errs, errors.New("ssh_host is an invalid IP or hostname")) - } - } - } - - if c.SSHUser == "" { - errs = append(errs, errors.New("An ssh_username must be specified.")) - } - - var err error - c.SSHWaitTimeout, err = time.ParseDuration(c.RawSSHWaitTimeout) - if err != nil { - errs = append(errs, fmt.Errorf("Failed parsing ssh_wait_timeout: %s", err)) - } - - return errs + return c.Comm.Prepare(ctx) } diff --git a/builder/vmware/common/ssh_config_test.go b/builder/vmware/common/ssh_config_test.go index a6c9e8ef5..1fd84c02f 100644 --- a/builder/vmware/common/ssh_config_test.go +++ b/builder/vmware/common/ssh_config_test.go @@ -4,11 +4,15 @@ import ( "io/ioutil" "os" "testing" + + "github.com/mitchellh/packer/helper/communicator" ) func testSSHConfig() *SSHConfig { return &SSHConfig{ - SSHUser: "foo", + Comm: communicator.Config{ + SSHUsername: "foo", + }, } } @@ -19,8 +23,8 @@ func TestSSHConfigPrepare(t *testing.T) { t.Fatalf("err: %#v", errs) } - if c.SSHPort != 22 { - t.Errorf("bad ssh port: %d", c.SSHPort) + if c.Comm.SSHPort != 22 { + t.Errorf("bad ssh port: %d", c.Comm.SSHPort) } } @@ -73,57 +77,6 @@ func TestSSHConfigPrepare_SSHKeyPath(t *testing.T) { } } -func TestSSHConfigPrepare_SSHUser(t *testing.T) { - var c *SSHConfig - var errs []error - - c = testSSHConfig() - c.SSHUser = "" - errs = c.Prepare(testConfigTemplate(t)) - if len(errs) == 0 { - t.Fatalf("should have error") - } - - c = testSSHConfig() - c.SSHUser = "exists" - errs = c.Prepare(testConfigTemplate(t)) - if len(errs) > 0 { - t.Fatalf("should not have error: %#v", errs) - } -} - -func TestSSHConfigPrepare_SSHWaitTimeout(t *testing.T) { - var c *SSHConfig - var errs []error - - // Defaults - c = testSSHConfig() - c.RawSSHWaitTimeout = "" - errs = c.Prepare(testConfigTemplate(t)) - if len(errs) > 0 { - t.Fatalf("should not have error: %#v", errs) - } - if c.RawSSHWaitTimeout != "20m" { - t.Fatalf("bad value: %s", c.RawSSHWaitTimeout) - } - - // Test with a bad value - c = testSSHConfig() - c.RawSSHWaitTimeout = "this is not good" - errs = c.Prepare(testConfigTemplate(t)) - if len(errs) == 0 { - t.Fatal("should have error") - } - - // Test with a good one - c = testSSHConfig() - c.RawSSHWaitTimeout = "5s" - errs = c.Prepare(testConfigTemplate(t)) - if len(errs) > 0 { - t.Fatalf("should not have error: %#v", errs) - } -} - const testPem = ` -----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAxd4iamvrwRJvtNDGQSIbNvvIQN8imXTRWlRY62EvKov60vqu diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index dce0bbd2a..9599df675 100644 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -13,6 +13,7 @@ import ( "github.com/mitchellh/multistep" vmwcommon "github.com/mitchellh/packer/builder/vmware/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" @@ -298,11 +299,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe VMName: b.config.VMName, Ctx: b.config.ctx, }, - &common.StepConnectSSH{ - SSHAddress: driver.SSHAddress, - SSHConfig: vmwcommon.SSHConfigFunc(&b.config.SSHConfig), - SSHWaitTimeout: b.config.SSHWaitTimeout, - Pty: !b.config.SSHSkipRequestPty, + &communicator.StepConnect{ + Config: &b.config.SSHConfig.Comm, + SSHAddress: driver.SSHAddress, + SSHConfig: vmwcommon.SSHConfigFunc(&b.config.SSHConfig), }, &vmwcommon.StepUploadTools{ RemoteType: b.config.RemoteType, diff --git a/builder/vmware/iso/builder_test.go b/builder/vmware/iso/builder_test.go index 1749b396e..60c6aeca0 100644 --- a/builder/vmware/iso/builder_test.go +++ b/builder/vmware/iso/builder_test.go @@ -1,12 +1,12 @@ package iso import ( - "github.com/mitchellh/packer/packer" "io/ioutil" "os" "reflect" "testing" - "time" + + "github.com/mitchellh/packer/packer" ) func testConfig() map[string]interface{} { @@ -138,10 +138,6 @@ func TestBuilderPrepare_Defaults(t *testing.T) { t.Errorf("bad Version: %s", b.config.Version) } - if b.config.SSHWaitTimeout != (20 * time.Minute) { - t.Errorf("bad wait timeout: %s", b.config.SSHWaitTimeout) - } - if b.config.VMName != "packer-foo" { t.Errorf("bad vm name: %s", b.config.VMName) } diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index ed56db067..26642f3d7 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -253,7 +253,7 @@ func (d *ESX5Driver) SSHAddress(state multistep.StateBag) (string, error) { return "", errors.New("VM network port found, but no IP address") } - address := fmt.Sprintf("%s:%d", record["IPAddress"], config.SSHPort) + address := fmt.Sprintf("%s:%d", record["IPAddress"], config.Comm.SSHPort) state.Put("vm_address", address) return address, nil } diff --git a/builder/vmware/vmx/builder.go b/builder/vmware/vmx/builder.go index 24597f73c..1390da547 100644 --- a/builder/vmware/vmx/builder.go +++ b/builder/vmware/vmx/builder.go @@ -9,6 +9,7 @@ import ( "github.com/mitchellh/multistep" vmwcommon "github.com/mitchellh/packer/builder/vmware/common" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/packer" ) @@ -90,11 +91,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe VMName: b.config.VMName, Ctx: b.config.ctx, }, - &common.StepConnectSSH{ - SSHAddress: driver.SSHAddress, - SSHConfig: vmwcommon.SSHConfigFunc(&b.config.SSHConfig), - SSHWaitTimeout: b.config.SSHWaitTimeout, - Pty: !b.config.SSHSkipRequestPty, + &communicator.StepConnect{ + Config: &b.config.SSHConfig.Comm, + SSHAddress: driver.SSHAddress, + SSHConfig: vmwcommon.SSHConfigFunc(&b.config.SSHConfig), }, &vmwcommon.StepUploadTools{ RemoteType: b.config.RemoteType, From 68e4734caf5256ad19f9fabf8d7c5b924849dead Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 18:55:37 -0400 Subject: [PATCH 365/956] builder/null: pass unit tests --- builder/null/config_test.go | 7 ++++- helper/communicator/testing.go | 47 ++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 helper/communicator/testing.go diff --git a/builder/null/config_test.go b/builder/null/config_test.go index 12123378d..a3b96421e 100644 --- a/builder/null/config_test.go +++ b/builder/null/config_test.go @@ -1,7 +1,10 @@ package null import ( + "os" "testing" + + "github.com/mitchellh/packer/helper/communicator" ) func testConfig() map[string]interface{} { @@ -97,7 +100,9 @@ func TestConfigPrepare_sshCredential(t *testing.T) { testConfigOk(t, warns, errs) // only ssh_private_key_file - raw["ssh_private_key_file"] = "good" + testFile := communicator.TestPEM(t) + defer os.Remove(testFile) + raw["ssh_private_key_file"] = testFile delete(raw, "ssh_password") _, warns, errs = NewConfig(raw) testConfigOk(t, warns, errs) diff --git a/helper/communicator/testing.go b/helper/communicator/testing.go new file mode 100644 index 000000000..1c89f15e3 --- /dev/null +++ b/helper/communicator/testing.go @@ -0,0 +1,47 @@ +package communicator + +import ( + "io/ioutil" + "testing" +) + +func TestPEM(t *testing.T) string { + tf, err := ioutil.TempFile("", "packer") + if err != nil { + t.Fatalf("err: %s", err) + } + tf.Write([]byte(TestPEMContents)) + tf.Close() + + return tf.Name() +} + +const TestPEMContents = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAxd4iamvrwRJvtNDGQSIbNvvIQN8imXTRWlRY62EvKov60vqu +hh+rDzFYAIIzlmrJopvOe0clqmi3mIP9dtkjPFrYflq52a2CF5q+BdwsJXuRHbJW +LmStZUwW1khSz93DhvhmK50nIaczW63u4EO/jJb3xj+wxR1Nkk9bxi3DDsYFt8SN +AzYx9kjlEYQ/+sI4/ATfmdV9h78SVotjScupd9KFzzi76gWq9gwyCBLRynTUWlyD +2UOfJRkOvhN6/jKzvYfVVwjPSfA9IMuooHdScmC4F6KBKJl/zf/zETM0XyzIDNmH +uOPbCiljq2WoRM+rY6ET84EO0kVXbfx8uxUsqQIDAQABAoIBAQCkPj9TF0IagbM3 +5BSs/CKbAWS4dH/D4bPlxx4IRCNirc8GUg+MRb04Xz0tLuajdQDqeWpr6iLZ0RKV +BvreLF+TOdV7DNQ4XE4gSdJyCtCaTHeort/aordL3l0WgfI7mVk0L/yfN1PEG4YG +E9q1TYcyrB3/8d5JwIkjabxERLglCcP+geOEJp+QijbvFIaZR/n2irlKW4gSy6ko +9B0fgUnhkHysSg49ChHQBPQ+o5BbpuLrPDFMiTPTPhdfsvGGcyCGeqfBA56oHcSF +K02Fg8OM+Bd1lb48LAN9nWWY4WbwV+9bkN3Ym8hO4c3a/Dxf2N7LtAQqWZzFjvM3 +/AaDvAgBAoGBAPLD+Xn1IYQPMB2XXCXfOuJewRY7RzoVWvMffJPDfm16O7wOiW5+ +2FmvxUDayk4PZy6wQMzGeGKnhcMMZTyaq2g/QtGfrvy7q1Lw2fB1VFlVblvqhoJa +nMJojjC4zgjBkXMHsRLeTmgUKyGs+fdFbfI6uejBnnf+eMVUMIdJ+6I9AoGBANCn +kWO9640dttyXURxNJ3lBr2H3dJOkmD6XS+u+LWqCSKQe691Y/fZ/ZL0Oc4Mhy7I6 +hsy3kDQ5k2V0fkaNODQIFJvUqXw2pMewUk8hHc9403f4fe9cPrL12rQ8WlQw4yoC +v2B61vNczCCUDtGxlAaw8jzSRaSI5s6ax3K7enbdAoGBAJB1WYDfA2CoAQO6y9Sl +b07A/7kQ8SN5DbPaqrDrBdJziBQxukoMJQXJeGFNUFD/DXFU5Fp2R7C86vXT7HIR +v6m66zH+CYzOx/YE6EsUJms6UP9VIVF0Rg/RU7teXQwM01ZV32LQ8mswhTH20o/3 +uqMHmxUMEhZpUMhrfq0isyApAoGAe1UxGTXfj9AqkIVYylPIq2HqGww7+jFmVEj1 +9Wi6S6Sq72ffnzzFEPkIQL/UA4TsdHMnzsYKFPSbbXLIWUeMGyVTmTDA5c0e5XIR +lPhMOKCAzv8w4VUzMnEkTzkFY5JqFCD/ojW57KvDdNZPVB+VEcdxyAW6aKELXMAc +eHLc1nkCgYEApm/motCTPN32nINZ+Vvywbv64ZD+gtpeMNP3CLrbe1X9O+H52AXa +1jCoOldWR8i2bs2NVPcKZgdo6fFULqE4dBX7Te/uYEIuuZhYLNzRO1IKU/YaqsXG +3bfQ8hKYcSnTfE0gPtLDnqCIxTocaGLSHeG3TH9fTw+dA8FvWpUztI4= +-----END RSA PRIVATE KEY----- +` From 115d583cff475b870028ded5d8374f4d881613e9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 19:23:33 -0400 Subject: [PATCH 366/956] helper/communicator: make host more generic --- builder/amazon/common/ssh.go | 6 +++--- builder/amazon/ebs/builder.go | 3 +-- builder/amazon/instance/builder.go | 3 +-- builder/digitalocean/builder.go | 6 +++--- builder/digitalocean/ssh.go | 8 ++++---- builder/googlecompute/builder.go | 6 +++--- builder/googlecompute/ssh.go | 7 +++---- builder/null/builder.go | 3 +-- builder/null/ssh.go | 6 ++---- builder/openstack/builder.go | 5 ++--- builder/openstack/ssh.go | 13 ++++++------- builder/parallels/common/ssh.go | 6 ++---- builder/parallels/iso/builder.go | 6 +++--- builder/parallels/pvm/builder.go | 6 +++--- builder/qemu/builder.go | 7 ++++--- builder/qemu/ssh.go | 10 ++++++---- builder/virtualbox/common/ssh.go | 10 ++++++---- builder/virtualbox/iso/builder.go | 7 ++++--- builder/virtualbox/ovf/builder.go | 7 ++++--- builder/vmware/common/driver.go | 4 ++-- builder/vmware/common/driver_fusion5.go | 4 ++-- builder/vmware/common/driver_mock.go | 16 ++++++++-------- builder/vmware/common/driver_player5.go | 4 ++-- builder/vmware/common/driver_workstation9.go | 4 ++-- builder/vmware/common/ssh.go | 6 +++--- builder/vmware/iso/builder.go | 6 +++--- builder/vmware/iso/driver_esx5.go | 4 ++-- builder/vmware/vmx/builder.go | 6 +++--- helper/communicator/step_connect.go | 18 ++++++++++-------- helper/communicator/step_connect_ssh.go | 19 +++++++++++++++---- 30 files changed, 113 insertions(+), 103 deletions(-) diff --git a/builder/amazon/common/ssh.go b/builder/amazon/common/ssh.go index 302a90beb..cf644eb25 100644 --- a/builder/amazon/common/ssh.go +++ b/builder/amazon/common/ssh.go @@ -10,9 +10,9 @@ import ( "golang.org/x/crypto/ssh" ) -// SSHAddress returns a function that can be given to the SSH communicator +// SSHHost returns a function that can be given to the SSH communicator // for determining the SSH address based on the instance DNS name. -func SSHAddress(e *ec2.EC2, port int, private bool) func(multistep.StateBag) (string, error) { +func SSHHost(e *ec2.EC2, private bool) func(multistep.StateBag) (string, error) { return func(state multistep.StateBag) (string, error) { for j := 0; j < 2; j++ { var host string @@ -28,7 +28,7 @@ func SSHAddress(e *ec2.EC2, port int, private bool) func(multistep.StateBag) (st } if host != "" { - return fmt.Sprintf("%s:%d", host, port), nil + return host, nil } r, err := e.DescribeInstances(&ec2.DescribeInstancesInput{ diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index a9adcf208..cd3cd8f05 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -115,9 +115,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, &communicator.StepConnect{ Config: &b.config.RunConfig.Comm, - SSHAddress: awscommon.SSHAddress( + Host: awscommon.SSHHost( ec2conn, - b.config.RunConfig.Comm.SSHPort, b.config.SSHPrivateIp), SSHConfig: awscommon.SSHConfig( b.config.RunConfig.Comm.SSHUsername), diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 09bda686a..d26cc63e3 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -200,9 +200,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, &communicator.StepConnect{ Config: &b.config.RunConfig.Comm, - SSHAddress: awscommon.SSHAddress( + Host: awscommon.SSHHost( ec2conn, - b.config.RunConfig.Comm.SSHPort, b.config.SSHPrivateIp), SSHConfig: awscommon.SSHConfig( b.config.RunConfig.Comm.SSHUsername), diff --git a/builder/digitalocean/builder.go b/builder/digitalocean/builder.go index d5f1b7a83..ec57ed2b2 100644 --- a/builder/digitalocean/builder.go +++ b/builder/digitalocean/builder.go @@ -54,9 +54,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe new(stepCreateDroplet), new(stepDropletInfo), &communicator.StepConnect{ - Config: &b.config.Comm, - SSHAddress: sshAddress, - SSHConfig: sshConfig, + Config: &b.config.Comm, + Host: commHost, + SSHConfig: sshConfig, }, new(common.StepProvision), new(stepShutdown), diff --git a/builder/digitalocean/ssh.go b/builder/digitalocean/ssh.go index 4d8d1f08c..5367dde1f 100644 --- a/builder/digitalocean/ssh.go +++ b/builder/digitalocean/ssh.go @@ -2,14 +2,14 @@ package digitalocean import ( "fmt" - "github.com/mitchellh/multistep" "golang.org/x/crypto/ssh" + + "github.com/mitchellh/multistep" ) -func sshAddress(state multistep.StateBag) (string, error) { - config := state.Get("config").(Config) +func commHost(state multistep.StateBag) (string, error) { ipAddress := state.Get("droplet_ip").(string) - return fmt.Sprintf("%s:%d", ipAddress, config.Comm.SSHPort), nil + return ipAddress, nil } func sshConfig(state multistep.StateBag) (*ssh.ClientConfig, error) { diff --git a/builder/googlecompute/builder.go b/builder/googlecompute/builder.go index 3ed576b50..0f6e4bd2f 100644 --- a/builder/googlecompute/builder.go +++ b/builder/googlecompute/builder.go @@ -62,9 +62,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Debug: b.config.PackerDebug, }, &communicator.StepConnect{ - Config: &b.config.Comm, - SSHAddress: sshAddress, - SSHConfig: sshConfig, + Config: &b.config.Comm, + Host: commHost, + SSHConfig: sshConfig, }, new(common.StepProvision), new(StepTeardownInstance), diff --git a/builder/googlecompute/ssh.go b/builder/googlecompute/ssh.go index 446648884..5b7940591 100644 --- a/builder/googlecompute/ssh.go +++ b/builder/googlecompute/ssh.go @@ -2,15 +2,14 @@ package googlecompute import ( "fmt" + "github.com/mitchellh/multistep" "golang.org/x/crypto/ssh" ) -// sshAddress returns the ssh address. -func sshAddress(state multistep.StateBag) (string, error) { - config := state.Get("config").(*Config) +func commHost(state multistep.StateBag) (string, error) { ipAddress := state.Get("instance_ip").(string) - return fmt.Sprintf("%s:%d", ipAddress, config.Comm.SSHPort), nil + return ipAddress, nil } // sshConfig returns the ssh configuration. diff --git a/builder/null/builder.go b/builder/null/builder.go index 925075ee0..fed303540 100644 --- a/builder/null/builder.go +++ b/builder/null/builder.go @@ -30,8 +30,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe steps := []multistep.Step{ &communicator.StepConnect{ Config: &b.config.CommConfig, - SSHAddress: SSHAddress( - b.config.CommConfig.SSHHost, b.config.CommConfig.SSHPort), + Host: CommHost(b.config.CommConfig.SSHHost), SSHConfig: SSHConfig( b.config.CommConfig.SSHUsername, b.config.CommConfig.SSHPassword, diff --git a/builder/null/ssh.go b/builder/null/ssh.go index e6ac9ab16..483390e86 100644 --- a/builder/null/ssh.go +++ b/builder/null/ssh.go @@ -8,11 +8,9 @@ import ( "io/ioutil" ) -// SSHAddress returns a function that can be given to the SSH communicator -// for determining the SSH address -func SSHAddress(host string, port int) func(multistep.StateBag) (string, error) { +func CommHost(host string) func(multistep.StateBag) (string, error) { return func(state multistep.StateBag) (string, error) { - return fmt.Sprintf("%s:%d", host, port), nil + return host, nil } } diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index 6d178c6ef..69ab6a016 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -92,10 +92,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, &communicator.StepConnect{ Config: &b.config.RunConfig.Comm, - SSHAddress: SSHAddress( + Host: CommHost( computeClient, - b.config.SSHInterface, - b.config.RunConfig.Comm.SSHPort), + b.config.SSHInterface), SSHConfig: SSHConfig(b.config.RunConfig.Comm.SSHUsername), }, &common.StepProvision{}, diff --git a/builder/openstack/ssh.go b/builder/openstack/ssh.go index 76c2686b1..dc4c91771 100644 --- a/builder/openstack/ssh.go +++ b/builder/openstack/ssh.go @@ -13,22 +13,21 @@ import ( "golang.org/x/crypto/ssh" ) -// SSHAddress returns a function that can be given to the SSH communicator -// for determining the SSH address based on the server AccessIPv4 setting.. -func SSHAddress( +// CommHost looks up the host for the communicator. +func CommHost( client *gophercloud.ServiceClient, - sshinterface string, port int) func(multistep.StateBag) (string, error) { + sshinterface string) func(multistep.StateBag) (string, error) { return func(state multistep.StateBag) (string, error) { s := state.Get("server").(*servers.Server) // If we have a floating IP, use that ip := state.Get("access_ip").(*floatingip.FloatingIP) if ip != nil && ip.IP != "" { - return fmt.Sprintf("%s:%d", ip.IP, port), nil + return ip.IP, nil } if s.AccessIPv4 != "" { - return fmt.Sprintf("%s:%d", s.AccessIPv4, port), nil + return s.AccessIPv4, nil } // Get all the addresses associated with this server. This @@ -53,7 +52,7 @@ func SSHAddress( } } if addr != "" { - return fmt.Sprintf("%s:%d", addr, port), nil + return addr, nil } } } diff --git a/builder/parallels/common/ssh.go b/builder/parallels/common/ssh.go index 827124677..9e0b2b907 100644 --- a/builder/parallels/common/ssh.go +++ b/builder/parallels/common/ssh.go @@ -1,15 +1,13 @@ package common import ( - "fmt" - "github.com/mitchellh/multistep" commonssh "github.com/mitchellh/packer/common/ssh" packerssh "github.com/mitchellh/packer/communicator/ssh" "golang.org/x/crypto/ssh" ) -func SSHAddress(state multistep.StateBag) (string, error) { +func CommHost(state multistep.StateBag) (string, error) { vmName := state.Get("vmName").(string) driver := state.Get("driver").(Driver) @@ -23,7 +21,7 @@ func SSHAddress(state multistep.StateBag) (string, error) { return "", err } - return fmt.Sprintf("%s:22", ip), nil + return ip, nil } func SSHConfigFunc(config SSHConfig) func(multistep.StateBag) (*ssh.ClientConfig, error) { diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index 9758e6bec..46fa73687 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -247,9 +247,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Ctx: b.config.ctx, }, &communicator.StepConnect{ - Config: &b.config.SSHConfig.Comm, - SSHAddress: parallelscommon.SSHAddress, - SSHConfig: parallelscommon.SSHConfigFunc(b.config.SSHConfig), + Config: &b.config.SSHConfig.Comm, + Host: parallelscommon.CommHost, + SSHConfig: parallelscommon.SSHConfigFunc(b.config.SSHConfig), }, ¶llelscommon.StepUploadVersion{ Path: b.config.PrlctlVersionFile, diff --git a/builder/parallels/pvm/builder.go b/builder/parallels/pvm/builder.go index 1ddd64202..0e71ea0f2 100644 --- a/builder/parallels/pvm/builder.go +++ b/builder/parallels/pvm/builder.go @@ -83,9 +83,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Ctx: b.config.ctx, }, &communicator.StepConnect{ - Config: &b.config.SSHConfig.Comm, - SSHAddress: parallelscommon.SSHAddress, - SSHConfig: parallelscommon.SSHConfigFunc(b.config.SSHConfig), + Config: &b.config.SSHConfig.Comm, + Host: parallelscommon.CommHost, + SSHConfig: parallelscommon.SSHConfigFunc(b.config.SSHConfig), }, ¶llelscommon.StepUploadVersion{ Path: b.config.PrlctlVersionFile, diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index 2a1a2fe1c..9df908989 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -393,9 +393,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &stepBootWait{}, &stepTypeBootCommand{}, &communicator.StepConnect{ - Config: &b.config.Comm, - SSHAddress: sshAddress, - SSHConfig: sshConfig, + Config: &b.config.Comm, + Host: commHost, + SSHConfig: sshConfig, + SSHPort: commPort, }, new(common.StepProvision), new(stepShutdown), diff --git a/builder/qemu/ssh.go b/builder/qemu/ssh.go index 17dc36de9..498d3fbe9 100644 --- a/builder/qemu/ssh.go +++ b/builder/qemu/ssh.go @@ -1,17 +1,19 @@ package qemu import ( - "fmt" - "github.com/mitchellh/multistep" commonssh "github.com/mitchellh/packer/common/ssh" "github.com/mitchellh/packer/communicator/ssh" gossh "golang.org/x/crypto/ssh" ) -func sshAddress(state multistep.StateBag) (string, error) { +func commHost(state multistep.StateBag) (string, error) { + return "127.0.0.1", nil +} + +func commPort(state multistep.StateBag) (int, error) { sshHostPort := state.Get("sshHostPort").(uint) - return fmt.Sprintf("127.0.0.1:%d", sshHostPort), nil + return int(sshHostPort), nil } func sshConfig(state multistep.StateBag) (*gossh.ClientConfig, error) { diff --git a/builder/virtualbox/common/ssh.go b/builder/virtualbox/common/ssh.go index c20ac2836..2584528dd 100644 --- a/builder/virtualbox/common/ssh.go +++ b/builder/virtualbox/common/ssh.go @@ -1,17 +1,19 @@ package common import ( - "fmt" - "github.com/mitchellh/multistep" commonssh "github.com/mitchellh/packer/common/ssh" "github.com/mitchellh/packer/communicator/ssh" gossh "golang.org/x/crypto/ssh" ) -func SSHAddress(state multistep.StateBag) (string, error) { +func CommHost(state multistep.StateBag) (string, error) { + return "127.0.0.1", nil +} + +func SSHPort(state multistep.StateBag) (int, error) { sshHostPort := state.Get("sshHostPort").(uint) - return fmt.Sprintf("127.0.0.1:%d", sshHostPort), nil + return int(sshHostPort), nil } func SSHConfigFunc(config SSHConfig) func(multistep.StateBag) (*gossh.ClientConfig, error) { diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go index 226de0527..f37c65c5a 100644 --- a/builder/virtualbox/iso/builder.go +++ b/builder/virtualbox/iso/builder.go @@ -273,9 +273,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Ctx: b.config.ctx, }, &communicator.StepConnect{ - Config: &b.config.SSHConfig.Comm, - SSHAddress: vboxcommon.SSHAddress, - SSHConfig: vboxcommon.SSHConfigFunc(b.config.SSHConfig), + Config: &b.config.SSHConfig.Comm, + Host: vboxcommon.CommHost, + SSHConfig: vboxcommon.SSHConfigFunc(b.config.SSHConfig), + SSHPort: vboxcommon.SSHPort, }, &vboxcommon.StepUploadVersion{ Path: b.config.VBoxVersionFile, diff --git a/builder/virtualbox/ovf/builder.go b/builder/virtualbox/ovf/builder.go index 05ec6159f..c35f2a50f 100644 --- a/builder/virtualbox/ovf/builder.go +++ b/builder/virtualbox/ovf/builder.go @@ -102,9 +102,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Ctx: b.config.ctx, }, &communicator.StepConnect{ - Config: &b.config.SSHConfig.Comm, - SSHAddress: vboxcommon.SSHAddress, - SSHConfig: vboxcommon.SSHConfigFunc(b.config.SSHConfig), + Config: &b.config.SSHConfig.Comm, + Host: vboxcommon.CommHost, + SSHConfig: vboxcommon.SSHConfigFunc(b.config.SSHConfig), + SSHPort: vboxcommon.SSHPort, }, &vboxcommon.StepUploadVersion{ Path: b.config.VBoxVersionFile, diff --git a/builder/vmware/common/driver.go b/builder/vmware/common/driver.go index ee8dbc30e..c06c8fd92 100644 --- a/builder/vmware/common/driver.go +++ b/builder/vmware/common/driver.go @@ -29,9 +29,9 @@ type Driver interface { // Checks if the VMX file at the given path is running. IsRunning(string) (bool, error) - // SSHAddress returns the SSH address for the VM that is being + // CommHost returns the host address for the VM that is being // managed by this driver. - SSHAddress(multistep.StateBag) (string, error) + CommHost(multistep.StateBag) (string, error) // Start starts a VM specified by the path to the VMX given. Start(string, bool) error diff --git a/builder/vmware/common/driver_fusion5.go b/builder/vmware/common/driver_fusion5.go index 3a295e731..a10f11902 100644 --- a/builder/vmware/common/driver_fusion5.go +++ b/builder/vmware/common/driver_fusion5.go @@ -69,8 +69,8 @@ func (d *Fusion5Driver) IsRunning(vmxPath string) (bool, error) { return false, nil } -func (d *Fusion5Driver) SSHAddress(state multistep.StateBag) (string, error) { - return SSHAddressFunc(d.SSHConfig)(state) +func (d *Fusion5Driver) CommHost(state multistep.StateBag) (string, error) { + return CommHost(d.SSHConfig)(state) } func (d *Fusion5Driver) Start(vmxPath string, headless bool) error { diff --git a/builder/vmware/common/driver_mock.go b/builder/vmware/common/driver_mock.go index 6aa0d02c0..fcd80a51b 100644 --- a/builder/vmware/common/driver_mock.go +++ b/builder/vmware/common/driver_mock.go @@ -29,10 +29,10 @@ type DriverMock struct { IsRunningResult bool IsRunningErr error - SSHAddressCalled bool - SSHAddressState multistep.StateBag - SSHAddressResult string - SSHAddressErr error + CommHostCalled bool + CommHostState multistep.StateBag + CommHostResult string + CommHostErr error StartCalled bool StartPath string @@ -92,10 +92,10 @@ func (d *DriverMock) IsRunning(path string) (bool, error) { return d.IsRunningResult, d.IsRunningErr } -func (d *DriverMock) SSHAddress(state multistep.StateBag) (string, error) { - d.SSHAddressCalled = true - d.SSHAddressState = state - return d.SSHAddressResult, d.SSHAddressErr +func (d *DriverMock) CommHost(state multistep.StateBag) (string, error) { + d.CommHostCalled = true + d.CommHostState = state + return d.CommHostResult, d.CommHostErr } func (d *DriverMock) Start(path string, headless bool) error { diff --git a/builder/vmware/common/driver_player5.go b/builder/vmware/common/driver_player5.go index 5bb80a0f2..1552e92ea 100644 --- a/builder/vmware/common/driver_player5.go +++ b/builder/vmware/common/driver_player5.go @@ -97,8 +97,8 @@ func (d *Player5Driver) IsRunning(vmxPath string) (bool, error) { return false, nil } -func (d *Player5Driver) SSHAddress(state multistep.StateBag) (string, error) { - return SSHAddressFunc(d.SSHConfig)(state) +func (d *Player5Driver) CommHost(state multistep.StateBag) (string, error) { + return CommHost(d.SSHConfig)(state) } func (d *Player5Driver) Start(vmxPath string, headless bool) error { diff --git a/builder/vmware/common/driver_workstation9.go b/builder/vmware/common/driver_workstation9.go index 4c72c72b3..debcefcbc 100644 --- a/builder/vmware/common/driver_workstation9.go +++ b/builder/vmware/common/driver_workstation9.go @@ -70,8 +70,8 @@ func (d *Workstation9Driver) IsRunning(vmxPath string) (bool, error) { return false, nil } -func (d *Workstation9Driver) SSHAddress(state multistep.StateBag) (string, error) { - return SSHAddressFunc(d.SSHConfig)(state) +func (d *Workstation9Driver) CommHost(state multistep.StateBag) (string, error) { + return CommHost(d.SSHConfig)(state) } func (d *Workstation9Driver) Start(vmxPath string, headless bool) error { diff --git a/builder/vmware/common/ssh.go b/builder/vmware/common/ssh.go index 7181e90bf..86e184bb5 100644 --- a/builder/vmware/common/ssh.go +++ b/builder/vmware/common/ssh.go @@ -13,13 +13,13 @@ import ( gossh "golang.org/x/crypto/ssh" ) -func SSHAddressFunc(config *SSHConfig) func(multistep.StateBag) (string, error) { +func CommHost(config *SSHConfig) func(multistep.StateBag) (string, error) { return func(state multistep.StateBag) (string, error) { driver := state.Get("driver").(Driver) vmxPath := state.Get("vmx_path").(string) if config.Comm.SSHHost != "" { - return fmt.Sprintf("%s:%d", config.Comm.SSHHost, config.Comm.SSHPort), nil + return config.Comm.SSHHost, nil } log.Println("Lookup up IP information...") @@ -62,7 +62,7 @@ func SSHAddressFunc(config *SSHConfig) func(multistep.StateBag) (string, error) } log.Printf("Detected IP: %s", ipAddress) - return fmt.Sprintf("%s:%d", ipAddress, config.Comm.SSHPort), nil + return ipAddress, nil } } diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index 9599df675..38ba3a4a1 100644 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -300,9 +300,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Ctx: b.config.ctx, }, &communicator.StepConnect{ - Config: &b.config.SSHConfig.Comm, - SSHAddress: driver.SSHAddress, - SSHConfig: vmwcommon.SSHConfigFunc(&b.config.SSHConfig), + Config: &b.config.SSHConfig.Comm, + Host: driver.CommHost, + SSHConfig: vmwcommon.SSHConfigFunc(&b.config.SSHConfig), }, &vmwcommon.StepUploadTools{ RemoteType: b.config.RemoteType, diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index 26642f3d7..8162db468 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -218,7 +218,7 @@ func (d *ESX5Driver) VNCAddress(portMin, portMax uint) (string, uint, error) { return d.Host, vncPort, nil } -func (d *ESX5Driver) SSHAddress(state multistep.StateBag) (string, error) { +func (d *ESX5Driver) CommHost(state multistep.StateBag) (string, error) { config := state.Get("config").(*Config) if address, ok := state.GetOk("vm_address"); ok { @@ -253,7 +253,7 @@ func (d *ESX5Driver) SSHAddress(state multistep.StateBag) (string, error) { return "", errors.New("VM network port found, but no IP address") } - address := fmt.Sprintf("%s:%d", record["IPAddress"], config.Comm.SSHPort) + address := record["IPAddress"] state.Put("vm_address", address) return address, nil } diff --git a/builder/vmware/vmx/builder.go b/builder/vmware/vmx/builder.go index 1390da547..aa86d3669 100644 --- a/builder/vmware/vmx/builder.go +++ b/builder/vmware/vmx/builder.go @@ -92,9 +92,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Ctx: b.config.ctx, }, &communicator.StepConnect{ - Config: &b.config.SSHConfig.Comm, - SSHAddress: driver.SSHAddress, - SSHConfig: vmwcommon.SSHConfigFunc(&b.config.SSHConfig), + Config: &b.config.SSHConfig.Comm, + Host: driver.CommHost, + SSHConfig: vmwcommon.SSHConfigFunc(&b.config.SSHConfig), }, &vmwcommon.StepUploadTools{ RemoteType: b.config.RemoteType, diff --git a/helper/communicator/step_connect.go b/helper/communicator/step_connect.go index e6338027e..a31dd4eb8 100644 --- a/helper/communicator/step_connect.go +++ b/helper/communicator/step_connect.go @@ -15,15 +15,16 @@ type StepConnect struct { // Config is the communicator config struct Config *Config + // Host should return a host that can be connected to for communicator + // connections. + Host func(multistep.StateBag) (string, error) + // The fields below are callbacks to assist with connecting to SSH. // - // SSHAddress should return the default host to connect to for SSH. - // This is only called if ssh_host isn't specified in the config. - // // SSHConfig should return the default configuration for // connecting via SSH. - SSHAddress func(multistep.StateBag) (string, error) - SSHConfig func(multistep.StateBag) (*gossh.ClientConfig, error) + SSHConfig func(multistep.StateBag) (*gossh.ClientConfig, error) + SSHPort func(multistep.StateBag) (int, error) substep multistep.Step } @@ -32,9 +33,10 @@ func (s *StepConnect) Run(state multistep.StateBag) multistep.StepAction { typeMap := map[string]multistep.Step{ "none": nil, "ssh": &StepConnectSSH{ - Config: s.Config, - SSHAddress: s.SSHAddress, - SSHConfig: s.SSHConfig, + Config: s.Config, + Host: s.Host, + SSHConfig: s.SSHConfig, + SSHPort: s.SSHPort, }, } diff --git a/helper/communicator/step_connect_ssh.go b/helper/communicator/step_connect_ssh.go index 9be653c01..0b54bae9d 100644 --- a/helper/communicator/step_connect_ssh.go +++ b/helper/communicator/step_connect_ssh.go @@ -18,9 +18,10 @@ import ( // In general, you should use StepConnect. type StepConnectSSH struct { // All the fields below are documented on StepConnect - Config *Config - SSHAddress func(multistep.StateBag) (string, error) - SSHConfig func(multistep.StateBag) (*gossh.ClientConfig, error) + Config *Config + Host func(multistep.StateBag) (string, error) + SSHConfig func(multistep.StateBag) (*gossh.ClientConfig, error) + SSHPort func(multistep.StateBag) (int, error) } func (s *StepConnectSSH) Run(state multistep.StateBag) multistep.StepAction { @@ -95,11 +96,19 @@ func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan stru first = false // First we request the TCP connection information - address, err := s.SSHAddress(state) + host, err := s.Host(state) if err != nil { log.Printf("[DEBUG] Error getting SSH address: %s", err) continue } + port := s.Config.SSHPort + if s.SSHPort != nil { + port, err = s.SSHPort(state) + if err != nil { + log.Printf("[DEBUG] Error getting SSH port: %s", err) + continue + } + } // Retrieve the SSH configuration sshConfig, err := s.SSHConfig(state) @@ -108,6 +117,8 @@ func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan stru continue } + address := fmt.Sprintf("%s:%d", host, port) + // Attempt to connect to SSH port connFunc := ssh.ConnectFunc("tcp", address) nc, err := connFunc() From c3cc9e844e7a036f7939c792353539e441d3244c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 19:24:57 -0400 Subject: [PATCH 367/956] helper/communicator: fix vet --- helper/communicator/config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helper/communicator/config_test.go b/helper/communicator/config_test.go index 029c9fe35..dc1bd965d 100644 --- a/helper/communicator/config_test.go +++ b/helper/communicator/config_test.go @@ -19,7 +19,7 @@ func TestConfigType(t *testing.T) { } if c.Type != "ssh" { - t.Fatal("bad: %#v", c) + t.Fatalf("bad: %#v", c) } } From ea86cb4c7e6c3ca3e8f894d8277ce91bc478b8bf Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 19:30:16 -0400 Subject: [PATCH 368/956] builder/qemu: use proper ssh port [GH-2074] --- builder/qemu/step_run.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/builder/qemu/step_run.go b/builder/qemu/step_run.go index 4f64c4d8b..816a3d3d3 100644 --- a/builder/qemu/step_run.go +++ b/builder/qemu/step_run.go @@ -80,7 +80,8 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error defaultArgs["-name"] = vmName defaultArgs["-machine"] = fmt.Sprintf("type=%s", config.MachineType) - defaultArgs["-netdev"] = fmt.Sprintf("user,id=user.0,hostfwd=tcp::%v-:22", sshHostPort) + defaultArgs["-netdev"] = fmt.Sprintf( + "user,id=user.0,hostfwd=tcp::%v-:%d", sshHostPort, config.SSHPort) defaultArgs["-device"] = fmt.Sprintf("%s,netdev=user.0", config.NetDevice) defaultArgs["-drive"] = fmt.Sprintf("file=%s,if=%s,cache=%s,discard=%s", imgPath, config.DiskInterface, config.DiskCache, config.DiskDiscard) if !config.DiskImage { From 8d0904e296065a2951ef0856d2ce9b5e9c4b6c7e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 19:39:42 -0400 Subject: [PATCH 369/956] helper/communicator: configurable handshake attempts [GH-1988] --- helper/communicator/config.go | 21 +++++++++++++-------- helper/communicator/step_connect_ssh.go | 6 ++++-- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/helper/communicator/config.go b/helper/communicator/config.go index a2d93a480..6a5e7b97b 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -12,14 +12,15 @@ import ( // Config is the common configuration that communicators allow within // a builder. type Config struct { - Type string `mapstructure:"communicator"` - SSHHost string `mapstructure:"ssh_host"` - SSHPort int `mapstructure:"ssh_port"` - SSHUsername string `mapstructure:"ssh_username"` - SSHPassword string `mapstructure:"ssh_password"` - SSHPrivateKey string `mapstructure:"ssh_private_key_file"` - SSHPty bool `mapstructure:"ssh_pty"` - SSHTimeout time.Duration `mapstructure:"ssh_timeout"` + Type string `mapstructure:"communicator"` + SSHHost string `mapstructure:"ssh_host"` + SSHPort int `mapstructure:"ssh_port"` + SSHUsername string `mapstructure:"ssh_username"` + SSHPassword string `mapstructure:"ssh_password"` + SSHPrivateKey string `mapstructure:"ssh_private_key_file"` + SSHPty bool `mapstructure:"ssh_pty"` + SSHTimeout time.Duration `mapstructure:"ssh_timeout"` + SSHHandshakeAttempts int `mapstructure:"ssh_handshake_attempts"` } func (c *Config) Prepare(ctx *interpolate.Context) []error { @@ -35,6 +36,10 @@ func (c *Config) Prepare(ctx *interpolate.Context) []error { c.SSHTimeout = 5 * time.Minute } + if c.SSHHandshakeAttempts == 0 { + c.SSHHandshakeAttempts = 10 + } + // Validation var errs []error if c.Type == "ssh" { diff --git a/helper/communicator/step_connect_ssh.go b/helper/communicator/step_connect_ssh.go index 0b54bae9d..4b664fe4c 100644 --- a/helper/communicator/step_connect_ssh.go +++ b/helper/communicator/step_connect_ssh.go @@ -149,8 +149,10 @@ func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan stru handshakeAttempts += 1 } - if handshakeAttempts < 10 { - // Try to connect via SSH a handful of times + if handshakeAttempts < s.Config.SSHHandshakeAttempts { + // Try to connect via SSH a handful of times. We sleep here + // so we don't get a ton of authentication errors back to back. + time.Sleep(2 * time.Second) continue } From 74b9da5b23bef88dae9ca5918aabc764da630766 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 19:48:25 -0400 Subject: [PATCH 370/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c933040c1..4391393d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,8 @@ FEATURES: * **New config function: `template_dir`**: The directory to the template being built. This should be used for template-relative paths. [GH-54] + * **Disable SSH:** Set `communicator` to "none" in any builder to disable SSH + connections. Note that provisioners won't work if this is done. [GH-1591] IMPROVEMENTS: From b2f8eb68e8f431f803531a87821a3238faf821dc Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Sat, 13 Jun 2015 17:15:49 -0700 Subject: [PATCH 371/956] Enable ssh agent forwarding #1066 --- CHANGELOG.md | 2 + communicator/ssh/communicator.go | 50 +++++++++++++++++++ .../docs/provisioners/shell.html.markdown | 27 ++++++++++ 3 files changed, 79 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4391393d0..9e26c7818 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,8 @@ FEATURES: being built. This should be used for template-relative paths. [GH-54] * **Disable SSH:** Set `communicator` to "none" in any builder to disable SSH connections. Note that provisioners won't work if this is done. [GH-1591] + * **SSH Agent Forwarding:** SSH Agent Forwarding will now be enabled + to allow access to remote servers such as private git repos. [GH-1066] IMPROVEMENTS: diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 217afc940..193ecae42 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -7,6 +7,7 @@ import ( "fmt" "github.com/mitchellh/packer/packer" "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" "io" "io/ioutil" "log" @@ -226,10 +227,59 @@ func (c *comm) reconnect() (err error) { if sshConn != nil { c.client = ssh.NewClient(sshConn, sshChan, req) } + c.connectToAgent() return } +func (c *comm) connectToAgent() { + if c.client == nil { + return + } + + // open connection to the local agent + socketLocation := os.Getenv("SSH_AUTH_SOCK") + if socketLocation == "" { + log.Printf("no local agent socket") + return + } + agentConn, err := net.Dial("unix", socketLocation) + if err != nil { + log.Printf("could not connect to local agent socket: %s", socketLocation) + return + } + + // create agent and add in auth + forwardingAgent := agent.NewClient(agentConn) + if forwardingAgent == nil { + log.Printf("could not create agent client") + agentConn.Close() + return + } + + // add callback for forwarding agent to SSH config + // XXX - might want to handle reconnects appending multiple callbacks + auth := ssh.PublicKeysCallback(forwardingAgent.Signers) + c.config.SSHConfig.Auth = append(c.config.SSHConfig.Auth, auth) + agent.ForwardToAgent(c.client, forwardingAgent) + + // Setup a session to request agent forwarding + session, err := c.newSession() + if err != nil { + return + } + defer session.Close() + + err = agent.RequestAgentForwarding(session) + if err != nil { + log.Printf("RequestAgentForwarding:", err) + return + } + + log.Printf("agent forwarding enabled") + return +} + func (c *comm) scpSession(scpCommand string, f func(io.Writer, *bufio.Reader) error) error { session, err := c.newSession() if err != nil { diff --git a/website/source/docs/provisioners/shell.html.markdown b/website/source/docs/provisioners/shell.html.markdown index e57910cb0..89a442a83 100644 --- a/website/source/docs/provisioners/shell.html.markdown +++ b/website/source/docs/provisioners/shell.html.markdown @@ -146,6 +146,33 @@ on reboot or in your shell script. For example, on Gentoo: /etc/init.d/net.eth0 stop ``` +## SSH Agent Forwarding + +Some provisioning requires connecting to remote SSH servers from within the +packer instance. The below example is for pulling code from a private git +repository utilizing openssh on the client. Make sure you are running +`ssh-agent` and add your git repo ssh keys into it using `ssh-add /path/to/key`. +When the packer instance needs access to the ssh keys the agent will forward +the request back to your `ssh-agent`. + +Note: when provisioning via git you should add the git server keys into +the `~/.ssh/known_hosts` file otherwise the git command could hang awaiting +input. This can be done by copying the file in via the +[file provisioner](/docs/provisioners/file.html) (more secure) +or using `ssh-keyscan` to populate the file (less secure). An example of the +latter accessing github would be: + +``` +{ + "type": "shell", + "inline": [ + "sudo apt-get install -y git", + "ssh-keyscan github.com >> ~/.ssh/known_hosts", + "git clone git@github.com:exampleorg/myprivaterepo.git" + ] +} +``` + ## Troubleshooting *My shell script doesn't work correctly on Ubuntu* From acf31c31a13bff4c35aa15f12f194119ad5c317d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 19:00:28 -0700 Subject: [PATCH 372/956] communicator/ssh: update logging --- communicator/ssh/communicator.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 193ecae42..8fd9ba91e 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -240,19 +240,19 @@ func (c *comm) connectToAgent() { // open connection to the local agent socketLocation := os.Getenv("SSH_AUTH_SOCK") if socketLocation == "" { - log.Printf("no local agent socket") + log.Printf("[INFO] no local agent socket, will not connect agent") return } agentConn, err := net.Dial("unix", socketLocation) if err != nil { - log.Printf("could not connect to local agent socket: %s", socketLocation) + log.Printf("[ERROR] could not connect to local agent socket: %s", socketLocation) return } // create agent and add in auth forwardingAgent := agent.NewClient(agentConn) if forwardingAgent == nil { - log.Printf("could not create agent client") + log.Printf("[ERROR] Could not create agent client") agentConn.Close() return } @@ -272,11 +272,11 @@ func (c *comm) connectToAgent() { err = agent.RequestAgentForwarding(session) if err != nil { - log.Printf("RequestAgentForwarding:", err) + log.Printf("[ERROR] RequestAgentForwarding: %#v", err) return } - log.Printf("agent forwarding enabled") + log.Printf("[INFO] agent forwarding enabled") return } From 0c0f876654ef15e8157677f24b5028bd51fb3cc4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 22:00:40 -0700 Subject: [PATCH 373/956] communicator/winrm --- communicator/winrm/communicator.go | 129 ++++++++++++++++++++++++ communicator/winrm/communicator_test.go | 94 +++++++++++++++++ communicator/winrm/config.go | 14 +++ communicator/winrm/time.go | 32 ++++++ communicator/winrm/time_test.go | 36 +++++++ 5 files changed, 305 insertions(+) create mode 100644 communicator/winrm/communicator.go create mode 100644 communicator/winrm/communicator_test.go create mode 100644 communicator/winrm/config.go create mode 100644 communicator/winrm/time.go create mode 100644 communicator/winrm/time_test.go diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go new file mode 100644 index 000000000..d0b7eb76c --- /dev/null +++ b/communicator/winrm/communicator.go @@ -0,0 +1,129 @@ +package winrm + +import ( + "fmt" + "io" + "log" + + "github.com/masterzen/winrm/winrm" + "github.com/mitchellh/packer/packer" + "github.com/packer-community/winrmcp/winrmcp" + + // This import is a bit strange, but it's needed so `make updatedeps` + // can see and download it + _ "github.com/dylanmei/winrmtest" +) + +// Communicator represents the WinRM communicator +type Communicator struct { + config *Config + client *winrm.Client + endpoint *winrm.Endpoint +} + +// New creates a new communicator implementation over WinRM. +func New(config *Config) (*Communicator, error) { + endpoint := &winrm.Endpoint{ + Host: config.Host, + Port: config.Port, + + /* + TODO + HTTPS: connInfo.HTTPS, + Insecure: connInfo.Insecure, + CACert: connInfo.CACert, + */ + } + + // Create the client + params := winrm.DefaultParameters() + params.Timeout = formatDuration(config.Timeout) + client, err := winrm.NewClientWithParameters( + endpoint, config.Username, config.Password, params) + if err != nil { + return nil, err + } + + // Create the shell to verify the connection + log.Printf("[DEBUG] connecting to remote shell using WinRM") + shell, err := client.CreateShell() + if err != nil { + log.Printf("[ERROR] connection error: %s", err) + return nil, err + } + + if err := shell.Close(); err != nil { + log.Printf("[ERROR] error closing connection: %s", err) + return nil, err + } + + return &Communicator{ + config: config, + client: client, + endpoint: endpoint, + }, nil +} + +// Start implementation of communicator.Communicator interface +func (c *Communicator) Start(rc *packer.RemoteCmd) error { + shell, err := c.client.CreateShell() + if err != nil { + return err + } + + log.Printf("[INFO] starting remote command: %s", rc.Command) + cmd, err := shell.Execute(rc.Command) + if err != nil { + return err + } + + go runCommand(shell, cmd, rc) + return nil +} + +func runCommand(shell *winrm.Shell, cmd *winrm.Command, rc *packer.RemoteCmd) { + defer shell.Close() + + go io.Copy(rc.Stdout, cmd.Stdout) + go io.Copy(rc.Stderr, cmd.Stderr) + + cmd.Wait() + rc.SetExited(cmd.ExitCode()) +} + +// Upload implementation of communicator.Communicator interface +func (c *Communicator) Upload(path string, input io.Reader) error { + wcp, err := c.newCopyClient() + if err != nil { + return err + } + log.Printf("Uploading file to '%s'", path) + return wcp.Write(path, input) +} + +// UploadScript implementation of communicator.Communicator interface +func (c *Communicator) UploadScript(path string, input io.Reader) error { + return c.Upload(path, input) +} + +// UploadDir implementation of communicator.Communicator interface +func (c *Communicator) UploadDir(dst string, src string) error { + log.Printf("Uploading dir '%s' to '%s'", src, dst) + wcp, err := c.newCopyClient() + if err != nil { + return err + } + return wcp.Copy(src, dst) +} + +func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { + addr := fmt.Sprintf("%s:%d", c.endpoint.Host, c.endpoint.Port) + return winrmcp.New(addr, &winrmcp.Config{ + Auth: winrmcp.Auth{ + User: c.config.Username, + Password: c.config.Password, + }, + OperationTimeout: c.config.Timeout, + MaxOperationsPerShell: 15, // lowest common denominator + }) +} diff --git a/communicator/winrm/communicator_test.go b/communicator/winrm/communicator_test.go new file mode 100644 index 000000000..73ac6d7b2 --- /dev/null +++ b/communicator/winrm/communicator_test.go @@ -0,0 +1,94 @@ +package winrm + +import ( + "bytes" + "io" + "testing" + "time" + + "github.com/dylanmei/winrmtest" + "github.com/mitchellh/packer/packer" +) + +func newMockWinRMServer(t *testing.T) *winrmtest.Remote { + wrm := winrmtest.NewRemote() + + wrm.CommandFunc( + winrmtest.MatchText("echo foo"), + func(out, err io.Writer) int { + out.Write([]byte("foo")) + return 0 + }) + + wrm.CommandFunc( + winrmtest.MatchPattern(`^echo c29tZXRoaW5n >> ".*"$`), + func(out, err io.Writer) int { + return 0 + }) + + wrm.CommandFunc( + winrmtest.MatchPattern(`^powershell.exe -EncodedCommand .*$`), + func(out, err io.Writer) int { + return 0 + }) + + wrm.CommandFunc( + winrmtest.MatchText("powershell"), + func(out, err io.Writer) int { + return 0 + }) + + return wrm +} + +func TestStart(t *testing.T) { + wrm := newMockWinRMServer(t) + defer wrm.Close() + + c, err := New(&Config{ + Host: wrm.Host, + Port: wrm.Port, + Username: "user", + Password: "pass", + Timeout: 30 * time.Second, + }) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + var cmd packer.RemoteCmd + stdout := new(bytes.Buffer) + cmd.Command = "echo foo" + cmd.Stdout = stdout + + err = c.Start(&cmd) + if err != nil { + t.Fatalf("error executing remote command: %s", err) + } + cmd.Wait() + + if stdout.String() != "foo" { + t.Fatalf("bad command response: expected %q, got %q", "foo", stdout.String()) + } +} + +func TestUpload(t *testing.T) { + wrm := newMockWinRMServer(t) + defer wrm.Close() + + c, err := New(&Config{ + Host: wrm.Host, + Port: wrm.Port, + Username: "user", + Password: "pass", + Timeout: 30 * time.Second, + }) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + err = c.Upload("C:/Temp/terraform.cmd", bytes.NewReader([]byte("something"))) + if err != nil { + t.Fatalf("error uploading file: %s", err) + } +} diff --git a/communicator/winrm/config.go b/communicator/winrm/config.go new file mode 100644 index 000000000..32c082987 --- /dev/null +++ b/communicator/winrm/config.go @@ -0,0 +1,14 @@ +package winrm + +import ( + "time" +) + +// Config is used to configure the WinRM connection +type Config struct { + Host string + Port int + Username string + Password string + Timeout time.Duration +} diff --git a/communicator/winrm/time.go b/communicator/winrm/time.go new file mode 100644 index 000000000..f8fb6fe8d --- /dev/null +++ b/communicator/winrm/time.go @@ -0,0 +1,32 @@ +package winrm + +import ( + "fmt" + "time" +) + +// formatDuration formats the given time.Duration into an ISO8601 +// duration string. +func formatDuration(duration time.Duration) string { + // We're not supporting negative durations + if duration.Seconds() <= 0 { + return "PT0S" + } + + h := int(duration.Hours()) + m := int(duration.Minutes()) - (h * 60) + s := int(duration.Seconds()) - (h*3600 + m*60) + + res := "PT" + if h > 0 { + res = fmt.Sprintf("%s%dH", res, h) + } + if m > 0 { + res = fmt.Sprintf("%s%dM", res, m) + } + if s > 0 { + res = fmt.Sprintf("%s%dS", res, s) + } + + return res +} diff --git a/communicator/winrm/time_test.go b/communicator/winrm/time_test.go new file mode 100644 index 000000000..4daf4cedf --- /dev/null +++ b/communicator/winrm/time_test.go @@ -0,0 +1,36 @@ +package winrm + +import ( + "testing" + "time" +) + +func TestFormatDuration(t *testing.T) { + // Test complex duration with hours, minutes, seconds + d := time.Duration(3701) * time.Second + s := formatDuration(d) + if s != "PT1H1M41S" { + t.Fatalf("bad ISO 8601 duration string: %s", s) + } + + // Test only minutes duration + d = time.Duration(20) * time.Minute + s = formatDuration(d) + if s != "PT20M" { + t.Fatalf("bad ISO 8601 duration string for 20M: %s", s) + } + + // Test only seconds + d = time.Duration(1) * time.Second + s = formatDuration(d) + if s != "PT1S" { + t.Fatalf("bad ISO 8601 duration string for 1S: %s", s) + } + + // Test negative duration (unsupported) + d = time.Duration(-1) * time.Second + s = formatDuration(d) + if s != "PT0S" { + t.Fatalf("bad ISO 8601 duration string for negative: %s", s) + } +} From 7a3975805465d1ef31855465e71254dc34be7851 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 22:05:48 -0700 Subject: [PATCH 374/956] helper/communicator: WinRM stuff --- communicator/winrm/communicator.go | 14 +-- helper/communicator/config.go | 66 ++++++++--- helper/communicator/step_connect_winrm.go | 134 ++++++++++++++++++++++ helper/communicator/winrm.go | 8 ++ 4 files changed, 202 insertions(+), 20 deletions(-) create mode 100644 helper/communicator/step_connect_winrm.go create mode 100644 helper/communicator/winrm.go diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go index d0b7eb76c..82686e2a7 100644 --- a/communicator/winrm/communicator.go +++ b/communicator/winrm/communicator.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "log" + "os" "github.com/masterzen/winrm/winrm" "github.com/mitchellh/packer/packer" @@ -92,7 +93,7 @@ func runCommand(shell *winrm.Shell, cmd *winrm.Command, rc *packer.RemoteCmd) { } // Upload implementation of communicator.Communicator interface -func (c *Communicator) Upload(path string, input io.Reader) error { +func (c *Communicator) Upload(path string, input io.Reader, _ *os.FileInfo) error { wcp, err := c.newCopyClient() if err != nil { return err @@ -101,13 +102,8 @@ func (c *Communicator) Upload(path string, input io.Reader) error { return wcp.Write(path, input) } -// UploadScript implementation of communicator.Communicator interface -func (c *Communicator) UploadScript(path string, input io.Reader) error { - return c.Upload(path, input) -} - // UploadDir implementation of communicator.Communicator interface -func (c *Communicator) UploadDir(dst string, src string) error { +func (c *Communicator) UploadDir(dst string, src string, exclude []string) error { log.Printf("Uploading dir '%s' to '%s'", src, dst) wcp, err := c.newCopyClient() if err != nil { @@ -116,6 +112,10 @@ func (c *Communicator) UploadDir(dst string, src string) error { return wcp.Copy(src, dst) } +func (c *Communicator) Download(src string, dst io.Writer) error { + panic("download not implemented") +} + func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { addr := fmt.Sprintf("%s:%d", c.endpoint.Host, c.endpoint.Port) return winrmcp.New(addr, &winrmcp.Config{ diff --git a/helper/communicator/config.go b/helper/communicator/config.go index a2d93a480..f0cb78df7 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -12,7 +12,9 @@ import ( // Config is the common configuration that communicators allow within // a builder. type Config struct { - Type string `mapstructure:"communicator"` + Type string `mapstructure:"communicator"` + + // SSH SSHHost string `mapstructure:"ssh_host"` SSHPort int `mapstructure:"ssh_port"` SSHUsername string `mapstructure:"ssh_username"` @@ -20,6 +22,13 @@ type Config struct { SSHPrivateKey string `mapstructure:"ssh_private_key_file"` SSHPty bool `mapstructure:"ssh_pty"` SSHTimeout time.Duration `mapstructure:"ssh_timeout"` + + // WinRM + WinRMUser string `mapstructure:"winrm_username"` + WinRMPassword string `mapstructure:"winrm_password"` + WinRMHost string `mapstructure:"winrm_host"` + WinRMPort int `mapstructure:"winrm_port"` + WinRMTimeout time.Duration `mapstructure:"winrm_timeout"` } func (c *Config) Prepare(ctx *interpolate.Context) []error { @@ -27,6 +36,22 @@ func (c *Config) Prepare(ctx *interpolate.Context) []error { c.Type = "ssh" } + var errs []error + switch c.Type { + case "ssh": + if es := c.prepareSSH(ctx); len(es) > 0 { + errs = append(errs, es...) + } + case "winrm": + if es := c.prepareWinRM(ctx); len(es) > 0 { + errs = append(errs, es...) + } + } + + return errs +} + +func (c *Config) prepareSSH(ctx *interpolate.Context) []error { if c.SSHPort == 0 { c.SSHPort = 22 } @@ -37,21 +62,36 @@ func (c *Config) Prepare(ctx *interpolate.Context) []error { // Validation var errs []error - if c.Type == "ssh" { - if c.SSHUsername == "" { - errs = append(errs, errors.New("An ssh_username must be specified")) - } + if c.SSHUsername == "" { + errs = append(errs, errors.New("An ssh_username must be specified")) + } - if c.SSHPrivateKey != "" { - if _, err := os.Stat(c.SSHPrivateKey); err != nil { - errs = append(errs, fmt.Errorf( - "ssh_private_key_file is invalid: %s", err)) - } else if _, err := SSHFileSigner(c.SSHPrivateKey); err != nil { - errs = append(errs, fmt.Errorf( - "ssh_private_key_file is invalid: %s", err)) - } + if c.SSHPrivateKey != "" { + if _, err := os.Stat(c.SSHPrivateKey); err != nil { + errs = append(errs, fmt.Errorf( + "ssh_private_key_file is invalid: %s", err)) + } else if _, err := SSHFileSigner(c.SSHPrivateKey); err != nil { + errs = append(errs, fmt.Errorf( + "ssh_private_key_file is invalid: %s", err)) } } return errs } + +func (c *Config) prepareWinRM(ctx *interpolate.Context) []error { + if c.WinRMPort == 0 { + c.WinRMPort = 5985 + } + + if c.WinRMTimeout == 0 { + c.WinRMTimeout = 30 * time.Minute + } + + var errs []error + if c.WinRMUser == "" { + errs = append(errs, errors.New("winrm_username must be specified.")) + } + + return errs +} diff --git a/helper/communicator/step_connect_winrm.go b/helper/communicator/step_connect_winrm.go new file mode 100644 index 000000000..bdd0c1499 --- /dev/null +++ b/helper/communicator/step_connect_winrm.go @@ -0,0 +1,134 @@ +package communicator + +import ( + "errors" + "fmt" + "log" + "time" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/communicator/winrm" + "github.com/mitchellh/packer/packer" +) + +// StepConnectWinRM is a multistep Step implementation that waits for WinRM +// to become available. It gets the connection information from a single +// configuration when creating the step. +// +// Uses: +// ui packer.Ui +// +// Produces: +// communicator packer.Communicator +type StepConnectWinRM struct { + // All the fields below are documented on StepConnect + Config *Config + Host func(multistep.StateBag) (string, error) + WinRMConfig func(multistep.StateBag) (*WinRMConfig, error) +} + +func (s *StepConnectWinRM) Run(state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + + var comm packer.Communicator + var err error + + cancel := make(chan struct{}) + waitDone := make(chan bool, 1) + go func() { + ui.Say("Waiting for WinRM to become available...") + comm, err = s.waitForWinRM(state, cancel) + waitDone <- true + }() + + log.Printf("Waiting for WinRM, up to timeout: %s", s.Config.WinRMTimeout) + timeout := time.After(s.Config.WinRMTimeout) +WaitLoop: + for { + // Wait for either WinRM to become available, a timeout to occur, + // or an interrupt to come through. + select { + case <-waitDone: + if err != nil { + ui.Error(fmt.Sprintf("Error waiting for WinRM: %s", err)) + return multistep.ActionHalt + } + + ui.Say("Connected to WinRM!") + state.Put("communicator", comm) + break WaitLoop + case <-timeout: + err := fmt.Errorf("Timeout waiting for WinRM.") + state.Put("error", err) + ui.Error(err.Error()) + close(cancel) + return multistep.ActionHalt + case <-time.After(1 * time.Second): + if _, ok := state.GetOk(multistep.StateCancelled); ok { + // The step sequence was cancelled, so cancel waiting for WinRM + // and just start the halting process. + close(cancel) + log.Println("Interrupt detected, quitting waiting for WinRM.") + return multistep.ActionHalt + } + } + } + + return multistep.ActionContinue +} + +func (s *StepConnectWinRM) Cleanup(multistep.StateBag) { +} + +func (s *StepConnectWinRM) waitForWinRM(state multistep.StateBag, cancel <-chan struct{}) (packer.Communicator, error) { + var comm packer.Communicator + for { + select { + case <-cancel: + log.Println("[INFO] WinRM wait cancelled. Exiting loop.") + return nil, errors.New("WinRM wait cancelled") + case <-time.After(5 * time.Second): + } + + host, err := s.Host(state) + if err != nil { + log.Printf("[DEBUG] Error getting WinRM host: %s", err) + continue + } + port := s.Config.WinRMPort + + user := s.Config.WinRMUser + password := s.Config.WinRMPassword + if s.WinRMConfig != nil { + config, err := s.WinRMConfig(state) + if err != nil { + log.Printf("[DEBUG] Error getting WinRM config: %s", err) + continue + } + + if config.Username != "" { + user = config.Username + } + if config.Password != "" { + password = config.Password + } + } + + log.Println("[INFO] Attempting WinRM connection...") + comm, err = winrm.New(&winrm.Config{ + Host: host, + Port: port, + Username: user, + Password: password, + Timeout: s.Config.WinRMTimeout, + }) + if err != nil { + log.Printf("[ERROR] WinRM connection err: %s", err) + continue + } + + break + } + + return comm, nil +} diff --git a/helper/communicator/winrm.go b/helper/communicator/winrm.go new file mode 100644 index 000000000..afdf2569d --- /dev/null +++ b/helper/communicator/winrm.go @@ -0,0 +1,8 @@ +package communicator + +// WinRMConfig is configuration that can be returned at runtime to +// dynamically configure WinRM. +type WinRMConfig struct { + Username string + Password string +} From 4be10b428a78126b012fe6d18b78f790d19b4c58 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 22:07:17 -0700 Subject: [PATCH 375/956] helper/communicator: hook up WinRM --- helper/communicator/step_connect.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/helper/communicator/step_connect.go b/helper/communicator/step_connect.go index a31dd4eb8..ce77333e1 100644 --- a/helper/communicator/step_connect.go +++ b/helper/communicator/step_connect.go @@ -26,6 +26,12 @@ type StepConnect struct { SSHConfig func(multistep.StateBag) (*gossh.ClientConfig, error) SSHPort func(multistep.StateBag) (int, error) + // The fields below are callbacks to assist with connecting to WinRM. + // + // WinRMConfig should return the default configuration for + // connecting via WinRM. + WinRMConfig func(multistep.StateBag) (*WinRMConfig, error) + substep multistep.Step } @@ -38,6 +44,11 @@ func (s *StepConnect) Run(state multistep.StateBag) multistep.StepAction { SSHConfig: s.SSHConfig, SSHPort: s.SSHPort, }, + "winrm": &StepConnectWinRM{ + Config: s.Config, + Host: s.Host, + WinRMConfig: s.WinRMConfig, + }, } step, ok := typeMap[s.Config.Type] From 6077c796f52ee5d427b17fe6ba5a6cfd95a7353f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 22:10:50 -0700 Subject: [PATCH 376/956] communicator/winrm: fix failing test --- communicator/winrm/communicator_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/communicator/winrm/communicator_test.go b/communicator/winrm/communicator_test.go index 73ac6d7b2..5c29a7403 100644 --- a/communicator/winrm/communicator_test.go +++ b/communicator/winrm/communicator_test.go @@ -87,7 +87,7 @@ func TestUpload(t *testing.T) { t.Fatalf("error creating communicator: %s", err) } - err = c.Upload("C:/Temp/terraform.cmd", bytes.NewReader([]byte("something"))) + err = c.Upload("C:/Temp/terraform.cmd", bytes.NewReader([]byte("something")), nil) if err != nil { t.Fatalf("error uploading file: %s", err) } From 114027095554d6ed2d43ad9441967a2f37b7e2f3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 22:13:54 -0700 Subject: [PATCH 377/956] update CHANGELOG --- CHANGELOG.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e26c7818..79e703edd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,12 +13,14 @@ BACKWARDS INCOMPATIBILITIES: FEATURES: - * **New config function: `template_dir`**: The directory to the template - being built. This should be used for template-relative paths. [GH-54] + * **WinRM:** You can now connect via WinRM with almost every builder. + See the docs for more info. [GH-2239] * **Disable SSH:** Set `communicator` to "none" in any builder to disable SSH connections. Note that provisioners won't work if this is done. [GH-1591] * **SSH Agent Forwarding:** SSH Agent Forwarding will now be enabled to allow access to remote servers such as private git repos. [GH-1066] + * **New config function: `template_dir`**: The directory to the template + being built. This should be used for template-relative paths. [GH-54] IMPROVEMENTS: From fd4e0e9da47f461e820ba4f87b780a66fbebac5a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 22:35:45 -0700 Subject: [PATCH 378/956] builder/amazon: StepGetPassword --- builder/amazon/common/run_config.go | 6 + builder/amazon/common/step_get_password.go | 155 +++++++++++++++++++++ builder/amazon/ebs/builder.go | 4 + 3 files changed, 165 insertions(+) create mode 100644 builder/amazon/common/step_get_password.go diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index 6dec07b39..5589a5578 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "os" + "time" "github.com/mitchellh/packer/common/uuid" "github.com/mitchellh/packer/helper/communicator" @@ -27,6 +28,7 @@ type RunConfig struct { TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name"` UserData string `mapstructure:"user_data"` UserDataFile string `mapstructure:"user_data_file"` + WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout"` VpcId string `mapstructure:"vpc_id"` // Communicator settings @@ -40,6 +42,10 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { "packer %s", uuid.TimeOrderedUUID()) } + if c.WindowsPasswordTimeout == 0 { + c.WindowsPasswordTimeout = 10 * time.Minute + } + // Validation errs := c.Comm.Prepare(ctx) if c.SourceAmi == "" { diff --git a/builder/amazon/common/step_get_password.go b/builder/amazon/common/step_get_password.go new file mode 100644 index 000000000..9d982b10b --- /dev/null +++ b/builder/amazon/common/step_get_password.go @@ -0,0 +1,155 @@ +package common + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/helper/communicator" + "github.com/mitchellh/packer/packer" +) + +// StepGetPassword reads the password from a Windows server and sets it +// on the WinRM config. +type StepGetPassword struct { + Comm *communicator.Config + Timeout time.Duration +} + +func (s *StepGetPassword) Run(state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + image := state.Get("source_image").(*ec2.Image) + + // Skip if we're not Windows... + if *image.Platform != "windows" { + log.Printf("[INFO] Not Windows, skipping get password...") + return multistep.ActionContinue + } + + // If we already have a password, skip it + if s.Comm.WinRMPassword != "" { + ui.Say("Skipping waiting for password since WinRM password set...") + return multistep.ActionContinue + } + + // Get the password + var password string + var err error + cancel := make(chan struct{}) + waitDone := make(chan bool, 1) + go func() { + ui.Say("Waiting for auto-generated password for instance...") + password, err = s.waitForPassword(state, cancel) + waitDone <- true + }() + + timeout := time.After(s.Timeout) +WaitLoop: + for { + // Wait for either SSH to become available, a timeout to occur, + // or an interrupt to come through. + select { + case <-waitDone: + if err != nil { + ui.Error(fmt.Sprintf("Error waiting for password: %s", err)) + state.Put("error", err) + return multistep.ActionHalt + } + + ui.Message("Password retrieved!") + s.Comm.WinRMPassword = password + break WaitLoop + case <-timeout: + err := fmt.Errorf("Timeout waiting for password.") + state.Put("error", err) + ui.Error(err.Error()) + close(cancel) + return multistep.ActionHalt + case <-time.After(1 * time.Second): + if _, ok := state.GetOk(multistep.StateCancelled); ok { + // The step sequence was cancelled, so cancel waiting for password + // and just start the halting process. + close(cancel) + log.Println("[WARN] Interrupt detected, quitting waiting for password.") + return multistep.ActionHalt + } + } + } + return multistep.ActionContinue +} + +func (s *StepGetPassword) Cleanup(multistep.StateBag) {} + +func (s *StepGetPassword) waitForPassword(state multistep.StateBag, cancel <-chan struct{}) (string, error) { + ec2conn := state.Get("ec2").(*ec2.EC2) + instance := state.Get("instance").(*ec2.Instance) + privateKey := state.Get("privateKey").(string) + + for { + select { + case <-cancel: + log.Println("[INFO] Retrieve password wait cancelled. Exiting loop.") + return "", errors.New("Retrieve password wait cancelled") + case <-time.After(5 * time.Second): + } + + resp, err := ec2conn.GetPasswordData(&ec2.GetPasswordDataInput{ + InstanceID: instance.InstanceID, + }) + if err != nil { + err := fmt.Errorf("Error retrieving auto-generated instance password: %s", err) + return "", err + } + + if resp.PasswordData != nil && *resp.PasswordData != "" { + decryptedPassword, err := decryptPasswordDataWithPrivateKey( + *resp.PasswordData, []byte(privateKey)) + if err != nil { + err := fmt.Errorf("Error decrypting auto-generated instance password: %s", err) + return "", err + } + + return decryptedPassword, nil + } + } +} + +func decryptPasswordDataWithPrivateKey(passwordData string, pemBytes []byte) (string, error) { + encryptedPasswd, err := base64.StdEncoding.DecodeString(passwordData) + if err != nil { + return "", err + } + + block, _ := pem.Decode(pemBytes) + var asn1Bytes []byte + if _, ok := block.Headers["DEK-Info"]; ok { + return "", errors.New("encrypted private key isn't yet supported") + /* + asn1Bytes, err = x509.DecryptPEMBlock(block, password) + if err != nil { + return "", err + } + */ + } else { + asn1Bytes = block.Bytes + } + + key, err := x509.ParsePKCS1PrivateKey(asn1Bytes) + if err != nil { + return "", err + } + + out, err := rsa.DecryptPKCS1v15(nil, key, encryptedPasswd) + if err != nil { + return "", err + } + + return string(out), nil +} diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index cd3cd8f05..f61b258f4 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -113,6 +113,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe BlockDevices: b.config.BlockDevices, Tags: b.config.RunTags, }, + &awscommon.StepGetPassword{ + Comm: &b.config.RunConfig.Comm, + Timeout: b.config.WindowsPasswordTimeout, + }, &communicator.StepConnect{ Config: &b.config.RunConfig.Comm, Host: awscommon.SSHHost( From d23f254b7675dd1e49b5a393b27e38e0a6214c32 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 22:39:19 -0700 Subject: [PATCH 379/956] builder/amazon: don't get password if platform not set on image --- builder/amazon/common/step_get_password.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/common/step_get_password.go b/builder/amazon/common/step_get_password.go index 9d982b10b..a2ef04952 100644 --- a/builder/amazon/common/step_get_password.go +++ b/builder/amazon/common/step_get_password.go @@ -28,7 +28,7 @@ func (s *StepGetPassword) Run(state multistep.StateBag) multistep.StepAction { image := state.Get("source_image").(*ec2.Image) // Skip if we're not Windows... - if *image.Platform != "windows" { + if image.Platform == nil || *image.Platform != "windows" { log.Printf("[INFO] Not Windows, skipping get password...") return multistep.ActionContinue } From 022a115d190a4c6eef332bf3358af75991d26c3e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 22:42:10 -0700 Subject: [PATCH 380/956] builder/amazon: improve messaging --- builder/amazon/common/step_get_password.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/builder/amazon/common/step_get_password.go b/builder/amazon/common/step_get_password.go index a2ef04952..0fdd467eb 100644 --- a/builder/amazon/common/step_get_password.go +++ b/builder/amazon/common/step_get_password.go @@ -46,6 +46,9 @@ func (s *StepGetPassword) Run(state multistep.StateBag) multistep.StepAction { waitDone := make(chan bool, 1) go func() { ui.Say("Waiting for auto-generated password for instance...") + ui.Message( + "It is normal for this process to take up to 15 minutes,\n" + + "but it usually takes around 5. Please wait.") password, err = s.waitForPassword(state, cancel) waitDone <- true }() From 1d94e0f8e38ec69c31b7a28549f06c26687e7f23 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 22:50:02 -0700 Subject: [PATCH 381/956] template: abslute path for template path --- template/parse.go | 8 ++++++++ template/parse_test.go | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/template/parse.go b/template/parse.go index dbb29569d..4a7069dea 100644 --- a/template/parse.go +++ b/template/parse.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "os" + "path/filepath" "sort" "github.com/hashicorp/go-multierror" @@ -317,6 +318,13 @@ func ParseFile(path string) (*Template, error) { return nil, err } + if !filepath.IsAbs(path) { + path, err = filepath.Abs(path) + if err != nil { + return nil, err + } + } + tpl.Path = path return tpl, nil } diff --git a/template/parse_test.go b/template/parse_test.go index 9abca2f77..fa5477a4f 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -1,6 +1,7 @@ package template import ( + "path/filepath" "reflect" "strings" "testing" @@ -306,7 +307,7 @@ func TestParse(t *testing.T) { } for _, tc := range cases { - path := fixtureDir(tc.File) + path, _ := filepath.Abs(fixtureDir(tc.File)) tpl, err := ParseFile(fixtureDir(tc.File)) if (err != nil) != tc.Err { t.Fatalf("err: %s", err) From dc8c94890a8f926c621f062fc693c20ee93a21be Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 22:56:36 -0700 Subject: [PATCH 382/956] helper/config: copy template path properly --- helper/config/decode.go | 1 + 1 file changed, 1 insertion(+) diff --git a/helper/config/decode.go b/helper/config/decode.go index 20554da61..1088fd19b 100644 --- a/helper/config/decode.go +++ b/helper/config/decode.go @@ -42,6 +42,7 @@ func Decode(target interface{}, config *DecodeOpts, raws ...interface{}) error { if config.InterpolateContext == nil { config.InterpolateContext = ctx } else { + config.InterpolateContext.TemplatePath = ctx.TemplatePath config.InterpolateContext.UserVariables = ctx.UserVariables } ctx = config.InterpolateContext From 8f6ecfd9e31c8d57b228ff656da7144ce304dab3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 23:12:59 -0700 Subject: [PATCH 383/956] builder/amazon: various fixes (minor) to get things going --- builder/amazon/common/step_get_password.go | 8 ++++++-- .../amazon/common/step_run_source_instance.go | 8 ++++++++ builder/amazon/common/step_security_group.go | 16 ++++++++++------ builder/amazon/ebs/builder.go | 2 +- builder/amazon/instance/builder.go | 2 +- helper/communicator/config.go | 12 ++++++++++++ 6 files changed, 38 insertions(+), 10 deletions(-) diff --git a/builder/amazon/common/step_get_password.go b/builder/amazon/common/step_get_password.go index 0fdd467eb..37cfe3af6 100644 --- a/builder/amazon/common/step_get_password.go +++ b/builder/amazon/common/step_get_password.go @@ -48,7 +48,9 @@ func (s *StepGetPassword) Run(state multistep.StateBag) multistep.StepAction { ui.Say("Waiting for auto-generated password for instance...") ui.Message( "It is normal for this process to take up to 15 minutes,\n" + - "but it usually takes around 5. Please wait.") + "but it usually takes around 5. Please wait. After the\n" + + "password is read, it will printed out below. Since it should\n" + + "be a temporary password, this should be a minimal security risk.") password, err = s.waitForPassword(state, cancel) waitDone <- true }() @@ -66,7 +68,7 @@ WaitLoop: return multistep.ActionHalt } - ui.Message("Password retrieved!") + ui.Message(fmt.Sprintf(" \nPassword retrieved: %s", password)) s.Comm.WinRMPassword = password break WaitLoop case <-timeout: @@ -121,6 +123,8 @@ func (s *StepGetPassword) waitForPassword(state multistep.StateBag, cancel <-cha return decryptedPassword, nil } + + log.Printf("[DEBUG] Password is blank, will retry...") } } diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index 92dafa564..021432e77 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -1,6 +1,7 @@ package common import ( + "encoding/base64" "fmt" "io/ioutil" "log" @@ -53,7 +54,14 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi return multistep.ActionHalt } + // Test if it is encoded already, and if not, encode it + if _, err := base64.StdEncoding.DecodeString(string(contents)); err != nil { + log.Printf("[DEBUG] base64 encoding user data...") + contents = []byte(base64.StdEncoding.EncodeToString(contents)) + } + userData = string(contents) + } ui.Say("Launching a source AWS instance...") diff --git a/builder/amazon/common/step_security_group.go b/builder/amazon/common/step_security_group.go index d870fd1c3..b65ebb408 100644 --- a/builder/amazon/common/step_security_group.go +++ b/builder/amazon/common/step_security_group.go @@ -9,12 +9,13 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common/uuid" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/packer" ) type StepSecurityGroup struct { + CommConfig *communicator.Config SecurityGroupIds []string - SSHPort int VpcId string createdGroupId string @@ -30,8 +31,9 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionContinue } - if s.SSHPort == 0 { - panic("SSHPort must be set to a non-zero value.") + port := s.CommConfig.Port() + if port == 0 { + panic("port must be set to a non-zero value.") } // Create the group @@ -57,15 +59,17 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction { req := &ec2.AuthorizeSecurityGroupIngressInput{ GroupID: groupResp.GroupID, IPProtocol: aws.String("tcp"), - FromPort: aws.Long(int64(s.SSHPort)), - ToPort: aws.Long(int64(s.SSHPort)), + FromPort: aws.Long(int64(port)), + ToPort: aws.Long(int64(port)), CIDRIP: aws.String("0.0.0.0/0"), } // We loop and retry this a few times because sometimes the security // group isn't available immediately because AWS resources are eventaully // consistent. - ui.Say("Authorizing SSH access on the temporary security group...") + ui.Say(fmt.Sprintf( + "Authorizing access to port %d the temporary security group...", + port)) for i := 0; i < 5; i++ { _, err = ec2conn.AuthorizeSecurityGroupIngress(req) if err == nil { diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index f61b258f4..162c06e28 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -94,7 +94,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, &awscommon.StepSecurityGroup{ SecurityGroupIds: b.config.SecurityGroupIds, - SSHPort: b.config.RunConfig.Comm.SSHPort, + CommConfig: &b.config.RunConfig.Comm, VpcId: b.config.VpcId, }, &awscommon.StepRunSourceInstance{ diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index d26cc63e3..ffe1c2da6 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -179,8 +179,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, }, &awscommon.StepSecurityGroup{ + CommConfig: &b.config.RunConfig.Comm, SecurityGroupIds: b.config.SecurityGroupIds, - SSHPort: b.config.RunConfig.Comm.SSHPort, VpcId: b.config.VpcId, }, &awscommon.StepRunSourceInstance{ diff --git a/helper/communicator/config.go b/helper/communicator/config.go index f0cb78df7..72dc69b7e 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -31,6 +31,18 @@ type Config struct { WinRMTimeout time.Duration `mapstructure:"winrm_timeout"` } +// Port returns the port that will be used for access based on config. +func (c *Config) Port() int { + switch c.Type { + case "ssh": + return c.SSHPort + case "winrm": + return c.WinRMPort + default: + return 0 + } +} + func (c *Config) Prepare(ctx *interpolate.Context) []error { if c.Type == "" { c.Type = "ssh" From e9d916a7bcd3a99ee3161efc1ac69f1c95255634 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 13 Jun 2015 23:14:48 -0700 Subject: [PATCH 384/956] builder/amazon: don't print windows password --- builder/amazon/common/step_get_password.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/builder/amazon/common/step_get_password.go b/builder/amazon/common/step_get_password.go index 37cfe3af6..ab51f4394 100644 --- a/builder/amazon/common/step_get_password.go +++ b/builder/amazon/common/step_get_password.go @@ -48,9 +48,7 @@ func (s *StepGetPassword) Run(state multistep.StateBag) multistep.StepAction { ui.Say("Waiting for auto-generated password for instance...") ui.Message( "It is normal for this process to take up to 15 minutes,\n" + - "but it usually takes around 5. Please wait. After the\n" + - "password is read, it will printed out below. Since it should\n" + - "be a temporary password, this should be a minimal security risk.") + "but it usually takes around 5. Please wait.") password, err = s.waitForPassword(state, cancel) waitDone <- true }() @@ -68,7 +66,7 @@ WaitLoop: return multistep.ActionHalt } - ui.Message(fmt.Sprintf(" \nPassword retrieved: %s", password)) + ui.Message(fmt.Sprintf(" \nPassword retrieved!")) s.Comm.WinRMPassword = password break WaitLoop case <-timeout: From b51761ca36e72e8f772e2e179d50b42a8c362a9f Mon Sep 17 00:00:00 2001 From: Marcin Matlaszek Date: Sun, 14 Jun 2015 17:35:50 +0200 Subject: [PATCH 385/956] Fix network interface spec when requesting spot. --- builder/amazon/common/step_run_source_instance.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index 92dafa564..71345ade7 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -174,11 +174,15 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi ImageID: &s.SourceAMI, InstanceType: &s.InstanceType, UserData: &userData, - SecurityGroupIDs: securityGroupIds, IAMInstanceProfile: &ec2.IAMInstanceProfileSpecification{Name: &s.IamInstanceProfile}, - SubnetID: &s.SubnetId, NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ - &ec2.InstanceNetworkInterfaceSpecification{AssociatePublicIPAddress: &s.AssociatePublicIpAddress}, + &ec2.InstanceNetworkInterfaceSpecification{ + DeviceIndex: aws.Long(0), + AssociatePublicIPAddress: &s.AssociatePublicIpAddress, + SubnetID: &s.SubnetId, + Groups: securityGroupIds, + DeleteOnTermination: aws.Boolean(true), + }, }, Placement: &ec2.SpotPlacement{ AvailabilityZone: &availabilityZone, From 101e5986dcef11c688b5ecfb23511abc90a5d5bd Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 14 Jun 2015 10:50:18 -0700 Subject: [PATCH 386/956] builder/amazon: enable windows for instance type too --- builder/amazon/instance/builder.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index ffe1c2da6..ec62394ee 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -198,6 +198,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe BlockDevices: b.config.BlockDevices, Tags: b.config.RunTags, }, + &awscommon.StepGetPassword{ + Comm: &b.config.RunConfig.Comm, + Timeout: b.config.WindowsPasswordTimeout, + }, &communicator.StepConnect{ Config: &b.config.RunConfig.Comm, Host: awscommon.SSHHost( From b2e9277d3b8cec19e6e935629d8888927bd4c95c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 14 Jun 2015 10:51:34 -0700 Subject: [PATCH 387/956] website: update for Windows AWS instances --- website/source/docs/builders/amazon-ebs.html.markdown | 4 ++++ website/source/docs/builders/amazon-instance.html.markdown | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 0ff9522df..088e6e974 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -168,6 +168,10 @@ each category, the available configuration keys are alphabetized. * `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID in order to create a temporary security group within the VPC. +* `windows_password_timeout` (string) - The timeout for waiting for + a Windows password for Windows instances. Defaults to 20 minutes. + Example value: "10m" + ## Basic Example Here is a basic example. It is completely valid except for the access keys: diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index ae5fbff27..249313160 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -209,6 +209,10 @@ each category, the available configuration keys are alphabetized. it is perfectly okay to create this directory as part of the provisioning process. +* `windows_password_timeout` (string) - The timeout for waiting for + a Windows password for Windows instances. Defaults to 20 minutes. + Example value: "10m" + ## Basic Example Here is a basic example. It is completely valid except for the access keys: From cf570a71dc9e279c4791521ca571336a147b8885 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 14 Jun 2015 10:53:03 -0700 Subject: [PATCH 388/956] update CHANGELOG --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 79e703edd..4a11f23d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,9 @@ FEATURES: * **WinRM:** You can now connect via WinRM with almost every builder. See the docs for more info. [GH-2239] + * **Windows AWS Support:** Windows AMIs can now be built without any + external plugins: Packer will start a Windows instance, get the + admin password, and can use WinRM (above) to connect through. [GH-2240] * **Disable SSH:** Set `communicator` to "none" in any builder to disable SSH connections. Note that provisioners won't work if this is done. [GH-1591] * **SSH Agent Forwarding:** SSH Agent Forwarding will now be enabled From 040ff0706d6b514d5a5b927982efe857bfc528c1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 14 Jun 2015 11:01:28 -0700 Subject: [PATCH 389/956] provisioner/powershell --- plugin/provisioner-powershell/main.go | 15 + provisioner/powershell/elevated.go | 87 +++ provisioner/powershell/powershell.go | 17 + provisioner/powershell/provisioner.go | 459 ++++++++++++++ provisioner/powershell/provisioner_test.go | 656 +++++++++++++++++++++ 5 files changed, 1234 insertions(+) create mode 100644 plugin/provisioner-powershell/main.go create mode 100644 provisioner/powershell/elevated.go create mode 100644 provisioner/powershell/powershell.go create mode 100644 provisioner/powershell/provisioner.go create mode 100644 provisioner/powershell/provisioner_test.go diff --git a/plugin/provisioner-powershell/main.go b/plugin/provisioner-powershell/main.go new file mode 100644 index 000000000..672bdb43f --- /dev/null +++ b/plugin/provisioner-powershell/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "github.com/mitchellh/packer/packer/plugin" + "github.com/mitchellh/packer/provisioner/powershell" +) + +func main() { + server, err := plugin.Server() + if err != nil { + panic(err) + } + server.RegisterProvisioner(new(powershell.Provisioner)) + server.Serve() +} diff --git a/provisioner/powershell/elevated.go b/provisioner/powershell/elevated.go new file mode 100644 index 000000000..00bc72e4a --- /dev/null +++ b/provisioner/powershell/elevated.go @@ -0,0 +1,87 @@ +package powershell + +import ( + "text/template" +) + +type elevatedOptions struct { + User string + Password string + TaskName string + TaskDescription string + EncodedCommand string +} + +var elevatedTemplate = template.Must(template.New("ElevatedCommand").Parse(` +$name = "{{.TaskName}}" +$log = "$env:TEMP\$name.out" +$s = New-Object -ComObject "Schedule.Service" +$s.Connect() +$t = $s.NewTask($null) +$t.XmlText = @' + + + + {{.TaskDescription}} + + + + {{.User}} + Password + HighestAvailable + + + + IgnoreNew + false + false + true + false + false + + false + false + + true + true + false + false + false + PT24H + 4 + + + + cmd + /c powershell.exe -EncodedCommand {{.EncodedCommand}} > %TEMP%\{{.TaskName}}.out 2>&1 + + + +'@ +$f = $s.GetFolder("\") +$f.RegisterTaskDefinition($name, $t, 6, "{{.User}}", "{{.Password}}", 1, $null) | Out-Null +$t = $f.GetTask("\$name") +$t.Run($null) | Out-Null +$timeout = 10 +$sec = 0 +while ((!($t.state -eq 4)) -and ($sec -lt $timeout)) { + Start-Sleep -s 1 + $sec++ +} +function SlurpOutput($l) { + if (Test-Path $log) { + Get-Content $log | select -skip $l | ForEach { + $l += 1 + Write-Host "$_" + } + } + return $l +} +$line = 0 +do { + Start-Sleep -m 100 + $line = SlurpOutput $line +} while (!($t.state -eq 3)) +$result = $t.LastTaskResult +[System.Runtime.Interopservices.Marshal]::ReleaseComObject($s) | Out-Null +exit $result`)) diff --git a/provisioner/powershell/powershell.go b/provisioner/powershell/powershell.go new file mode 100644 index 000000000..1f5a7ffad --- /dev/null +++ b/provisioner/powershell/powershell.go @@ -0,0 +1,17 @@ +package powershell + +import ( + "encoding/base64" +) + +func powershellEncode(buffer []byte) string { + // 2 byte chars to make PowerShell happy + wideCmd := "" + for _, b := range buffer { + wideCmd += string(b) + "\x00" + } + + // Base64 encode the command + input := []uint8(wideCmd) + return base64.StdEncoding.EncodeToString(input) +} diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go new file mode 100644 index 000000000..0c2454d0d --- /dev/null +++ b/provisioner/powershell/provisioner.go @@ -0,0 +1,459 @@ +// This package implements a provisioner for Packer that executes +// shell scripts within the remote machine. +package powershell + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "sort" + "strings" + "time" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/common/uuid" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" +) + +const DefaultRemotePath = "c:/Windows/Temp/script.ps1" + +var retryableSleep = 2 * time.Second + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + + // If true, the script contains binary and line endings will not be + // converted from Windows to Unix-style. + Binary bool + + // An inline script to execute. Multiple strings are all executed + // in the context of a single shell. + Inline []string + + // The local path of the shell script to upload and execute. + Script string + + // An array of multiple scripts to run. + Scripts []string + + // An array of environment variables that will be injected before + // your command(s) are executed. + Vars []string `mapstructure:"environment_vars"` + + // The remote path where the local shell script will be uploaded to. + // This should be set to a writable file that is in a pre-existing directory. + RemotePath string `mapstructure:"remote_path"` + + // The command used to execute the script. The '{{ .Path }}' variable + // should be used to specify where the script goes, {{ .Vars }} + // can be used to inject the environment_vars into the environment. + ExecuteCommand string `mapstructure:"execute_command"` + + // The command used to execute the elevated script. The '{{ .Path }}' variable + // should be used to specify where the script goes, {{ .Vars }} + // can be used to inject the environment_vars into the environment. + ElevatedExecuteCommand string `mapstructure:"elevated_execute_command"` + + // The timeout for retrying to start the process. Until this timeout + // is reached, if the provisioner can't start a process, it retries. + // This can be set high to allow for reboots. + StartRetryTimeout time.Duration `mapstructure:"start_retry_timeout"` + + // This is used in the template generation to format environment variables + // inside the `ExecuteCommand` template. + EnvVarFormat string + + // This is used in the template generation to format environment variables + // inside the `ElevatedExecuteCommand` template. + ElevatedEnvVarFormat string `mapstructure:"elevated_env_var_format"` + + // Instructs the communicator to run the remote script as a + // Windows scheduled task, effectively elevating the remote + // user by impersonating a logged-in user + ElevatedUser string `mapstructure:"elevated_user"` + ElevatedPassword string `mapstructure:"elevated_password"` + + // Valid Exit Codes - 0 is not always the only valid error code! + // See http://www.symantec.com/connect/articles/windows-system-error-codes-exit-codes-description for examples + // such as 3010 - "The requested operation is successful. Changes will not be effective until the system is rebooted." + ValidExitCodes []int `mapstructure:"valid_exit_codes"` + + ctx interpolate.Context +} + +type Provisioner struct { + config Config + communicator packer.Communicator +} + +type ExecuteCommandTemplate struct { + Vars string + Path string +} + +func (p *Provisioner) Prepare(raws ...interface{}) error { + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "execute_command", + }, + }, + }, raws...) + if err != nil { + return err + } + + if p.config.EnvVarFormat == "" { + p.config.EnvVarFormat = `$env:%s=\"%s\"; ` + } + + if p.config.ElevatedEnvVarFormat == "" { + p.config.ElevatedEnvVarFormat = `$env:%s="%s"; ` + } + + if p.config.ExecuteCommand == "" { + p.config.ExecuteCommand = `powershell "& { {{.Vars}}{{.Path}}; exit $LastExitCode}"` + } + + if p.config.ElevatedExecuteCommand == "" { + p.config.ElevatedExecuteCommand = `{{.Vars}}{{.Path}}` + } + + if p.config.Inline != nil && len(p.config.Inline) == 0 { + p.config.Inline = nil + } + + if p.config.StartRetryTimeout == 0 { + p.config.StartRetryTimeout = 5 * time.Minute + } + + if p.config.RemotePath == "" { + p.config.RemotePath = DefaultRemotePath + } + + if p.config.Scripts == nil { + p.config.Scripts = make([]string, 0) + } + + if p.config.Vars == nil { + p.config.Vars = make([]string, 0) + } + + if p.config.ValidExitCodes == nil { + p.config.ValidExitCodes = []int{0} + } + + var errs error + if p.config.Script != "" && len(p.config.Scripts) > 0 { + errs = packer.MultiErrorAppend(errs, + errors.New("Only one of script or scripts can be specified.")) + } + + if p.config.ElevatedUser != "" && p.config.ElevatedPassword == "" { + errs = packer.MultiErrorAppend(errs, + errors.New("Must supply an 'elevated_password' if 'elevated_user' provided")) + } + + if p.config.ElevatedUser == "" && p.config.ElevatedPassword != "" { + errs = packer.MultiErrorAppend(errs, + errors.New("Must supply an 'elevated_user' if 'elevated_password' provided")) + } + + if p.config.Script != "" { + p.config.Scripts = []string{p.config.Script} + } + + if len(p.config.Scripts) == 0 && p.config.Inline == nil { + errs = packer.MultiErrorAppend(errs, + errors.New("Either a script file or inline script must be specified.")) + } else if len(p.config.Scripts) > 0 && p.config.Inline != nil { + errs = packer.MultiErrorAppend(errs, + errors.New("Only a script file or an inline script can be specified, not both.")) + } + + for _, path := range p.config.Scripts { + if _, err := os.Stat(path); err != nil { + errs = packer.MultiErrorAppend(errs, + fmt.Errorf("Bad script '%s': %s", path, err)) + } + } + + // Do a check for bad environment variables, such as '=foo', 'foobar' + for _, kv := range p.config.Vars { + vs := strings.SplitN(kv, "=", 2) + if len(vs) != 2 || vs[0] == "" { + errs = packer.MultiErrorAppend(errs, + fmt.Errorf("Environment variable not in format 'key=value': %s", kv)) + } + } + + if errs != nil { + return errs + } + + return nil +} + +// Takes the inline scripts, concatenates them +// into a temporary file and returns a string containing the location +// of said file. +func extractScript(p *Provisioner) (string, error) { + temp, err := ioutil.TempFile(os.TempDir(), "packer-powershell-provisioner") + if err != nil { + return "", err + } + defer temp.Close() + writer := bufio.NewWriter(temp) + for _, command := range p.config.Inline { + log.Printf("Found command: %s", command) + if _, err := writer.WriteString(command + "\n"); err != nil { + return "", fmt.Errorf("Error preparing shell script: %s", err) + } + } + + if err := writer.Flush(); err != nil { + return "", fmt.Errorf("Error preparing shell script: %s", err) + } + + return temp.Name(), nil +} + +func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { + ui.Say(fmt.Sprintf("Provisioning with Powershell...")) + p.communicator = comm + + scripts := make([]string, len(p.config.Scripts)) + copy(scripts, p.config.Scripts) + + // Build our variables up by adding in the build name and builder type + envVars := make([]string, len(p.config.Vars)+2) + envVars[0] = "PACKER_BUILD_NAME=" + p.config.PackerBuildName + envVars[1] = "PACKER_BUILDER_TYPE=" + p.config.PackerBuilderType + copy(envVars, p.config.Vars) + + if p.config.Inline != nil { + temp, err := extractScript(p) + if err != nil { + ui.Error(fmt.Sprintf("Unable to extract inline scripts into a file: %s", err)) + } + scripts = append(scripts, temp) + } + + for _, path := range scripts { + ui.Say(fmt.Sprintf("Provisioning with shell script: %s", path)) + + log.Printf("Opening %s for reading", path) + f, err := os.Open(path) + if err != nil { + return fmt.Errorf("Error opening shell script: %s", err) + } + defer f.Close() + + command, err := p.createCommandText() + if err != nil { + return fmt.Errorf("Error processing command: %s", err) + } + + // Upload the file and run the command. Do this in the context of + // a single retryable function so that we don't end up with + // the case that the upload succeeded, a restart is initiated, + // and then the command is executed but the file doesn't exist + // any longer. + var cmd *packer.RemoteCmd + err = p.retryable(func() error { + if _, err := f.Seek(0, 0); err != nil { + return err + } + + if err := comm.Upload(p.config.RemotePath, f, nil); err != nil { + return fmt.Errorf("Error uploading script: %s", err) + } + + cmd = &packer.RemoteCmd{Command: command} + return cmd.StartWithUi(comm, ui) + }) + if err != nil { + return err + } + + // Close the original file since we copied it + f.Close() + + // Check exit code against allowed codes (likely just 0) + validExitCode := false + for _, v := range p.config.ValidExitCodes { + if cmd.ExitStatus == v { + validExitCode = true + } + } + if !validExitCode { + return fmt.Errorf("Script exited with non-zero exit status: %d. Allowed exit codes are: %s", cmd.ExitStatus, p.config.ValidExitCodes) + } + } + + return nil +} + +func (p *Provisioner) Cancel() { + // Just hard quit. It isn't a big deal if what we're doing keeps + // running on the other side. + os.Exit(0) +} + +// retryable will retry the given function over and over until a +// non-error is returned. +func (p *Provisioner) retryable(f func() error) error { + startTimeout := time.After(p.config.StartRetryTimeout) + for { + var err error + if err = f(); err == nil { + return nil + } + + // Create an error and log it + err = fmt.Errorf("Retryable error: %s", err) + log.Printf(err.Error()) + + // Check if we timed out, otherwise we retry. It is safe to + // retry since the only error case above is if the command + // failed to START. + select { + case <-startTimeout: + return err + default: + time.Sleep(retryableSleep) + } + } +} + +func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string, err error) { + flattened = "" + envVars := make(map[string]string) + + // Always available Packer provided env vars + envVars["PACKER_BUILD_NAME"] = p.config.PackerBuildName + envVars["PACKER_BUILDER_TYPE"] = p.config.PackerBuilderType + + // Split vars into key/value components + for _, envVar := range p.config.Vars { + keyValue := strings.Split(envVar, "=") + if len(keyValue) != 2 { + err = errors.New("Shell provisioner environment variables must be in key=value format") + return + } + envVars[keyValue[0]] = keyValue[1] + } + + // Create a list of env var keys in sorted order + var keys []string + for k := range envVars { + keys = append(keys, k) + } + sort.Strings(keys) + format := p.config.EnvVarFormat + if elevated { + format = p.config.ElevatedEnvVarFormat + } + + // Re-assemble vars using OS specific format pattern and flatten + for _, key := range keys { + flattened += fmt.Sprintf(format, key, envVars[key]) + } + return +} + +func (p *Provisioner) createCommandText() (command string, err error) { + // Create environment variables to set before executing the command + flattenedEnvVars, err := p.createFlattenedEnvVars(false) + if err != nil { + return "", err + } + + p.config.ctx.Data = &ExecuteCommandTemplate{ + Vars: flattenedEnvVars, + Path: p.config.RemotePath, + } + command, err = interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) + if err != nil { + return "", fmt.Errorf("Error processing command: %s", err) + } + + // Return the interpolated command + if p.config.ElevatedUser == "" { + return command, nil + } + + // Can't double escape the env vars, lets create shiny new ones + flattenedEnvVars, err = p.createFlattenedEnvVars(true) + p.config.ctx.Data = &ExecuteCommandTemplate{ + Vars: flattenedEnvVars, + Path: p.config.RemotePath, + } + command, err = interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) + if err != nil { + return "", fmt.Errorf("Error processing command: %s", err) + } + + // OK so we need an elevated shell runner to wrap our command, this is going to have its own path + // generate the script and update the command runner in the process + path, err := p.generateElevatedRunner(command) + + // Return the path to the elevated shell wrapper + command = fmt.Sprintf("powershell -executionpolicy bypass -file \"%s\"", path) + + return +} + +func (p *Provisioner) generateElevatedRunner(command string) (uploadedPath string, err error) { + log.Printf("Building elevated command wrapper for: %s", command) + + // generate command + var buffer bytes.Buffer + err = elevatedTemplate.Execute(&buffer, elevatedOptions{ + User: p.config.ElevatedUser, + Password: p.config.ElevatedPassword, + TaskDescription: "Packer elevated task", + TaskName: fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()), + EncodedCommand: powershellEncode([]byte(command + "; exit $LASTEXITCODE")), + }) + + if err != nil { + fmt.Printf("Error creating elevated template: %s", err) + return "", err + } + + tmpFile, err := ioutil.TempFile(os.TempDir(), "packer-elevated-shell.ps1") + writer := bufio.NewWriter(tmpFile) + if _, err := writer.WriteString(string(buffer.Bytes())); err != nil { + return "", fmt.Errorf("Error preparing elevated shell script: %s", err) + } + + if err := writer.Flush(); err != nil { + return "", fmt.Errorf("Error preparing elevated shell script: %s", err) + } + tmpFile.Close() + f, err := os.Open(tmpFile.Name()) + if err != nil { + return "", fmt.Errorf("Error opening temporary elevated shell script: %s", err) + } + defer f.Close() + + uuid := uuid.TimeOrderedUUID() + path := fmt.Sprintf(`${env:TEMP}\packer-elevated-shell-%s.ps1`, uuid) + log.Printf("Uploading elevated shell wrapper for command [%s] to [%s] from [%s]", command, path, tmpFile.Name()) + err = p.communicator.Upload(path, f, nil) + if err != nil { + return "", fmt.Errorf("Error preparing elevated shell script: %s", err) + } + + // CMD formatted Path required for this op + path = fmt.Sprintf("%s-%s.ps1", "%TEMP%\\packer-elevated-shell", uuid) + return path, err +} diff --git a/provisioner/powershell/provisioner_test.go b/provisioner/powershell/provisioner_test.go new file mode 100644 index 000000000..78484c184 --- /dev/null +++ b/provisioner/powershell/provisioner_test.go @@ -0,0 +1,656 @@ +package powershell + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + //"log" + "os" + "regexp" + "strings" + "testing" + "time" + + "github.com/mitchellh/packer/packer" +) + +func testConfig() map[string]interface{} { + return map[string]interface{}{ + "inline": []interface{}{"foo", "bar"}, + } +} + +func init() { + //log.SetOutput(ioutil.Discard) +} + +func TestProvisionerPrepare_extractScript(t *testing.T) { + config := testConfig() + p := new(Provisioner) + _ = p.Prepare(config) + file, err := extractScript(p) + if err != nil { + t.Fatalf("Should not be error: %s", err) + } + t.Logf("File: %s", file) + if strings.Index(file, os.TempDir()) != 0 { + t.Fatalf("Temp file should reside in %s. File location: %s", os.TempDir(), file) + } + + // File contents should contain 2 lines concatenated by newlines: foo\nbar + readFile, err := ioutil.ReadFile(file) + expectedContents := "foo\nbar\n" + s := string(readFile[:]) + if s != expectedContents { + t.Fatalf("Expected generated inlineScript to equal '%s', got '%s'", expectedContents, s) + } +} + +func TestProvisioner_Impl(t *testing.T) { + var raw interface{} + raw = &Provisioner{} + if _, ok := raw.(packer.Provisioner); !ok { + t.Fatalf("must be a Provisioner") + } +} + +func TestProvisionerPrepare_Defaults(t *testing.T) { + var p Provisioner + config := testConfig() + + err := p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + if p.config.RemotePath != DefaultRemotePath { + t.Errorf("unexpected remote path: %s", p.config.RemotePath) + } + + if p.config.ElevatedUser != "" { + t.Error("expected elevated_user to be empty") + } + if p.config.ElevatedPassword != "" { + t.Error("expected elevated_password to be empty") + } + + if p.config.ExecuteCommand != "powershell \"& { {{.Vars}}{{.Path}}; exit $LastExitCode}\"" { + t.Fatalf("Default command should be powershell \"& { {{.Vars}}{{.Path}}; exit $LastExitCode}\", but got %s", p.config.ExecuteCommand) + } + + if p.config.ElevatedExecuteCommand != "{{.Vars}}{{.Path}}" { + t.Fatalf("Default command should be powershell {{.Vars}}{{.Path}}, but got %s", p.config.ElevatedExecuteCommand) + } + + if p.config.ValidExitCodes == nil { + t.Fatalf("ValidExitCodes should not be nil") + } + if p.config.ValidExitCodes != nil { + expCodes := []int{0} + for i, v := range p.config.ValidExitCodes { + if v != expCodes[i] { + t.Fatalf("Expected ValidExitCodes don't match actual") + } + } + } + + if p.config.ElevatedEnvVarFormat != `$env:%s="%s"; ` { + t.Fatalf("Default command should be powershell \"{{.Vars}}{{.Path}}\", but got %s", p.config.ElevatedEnvVarFormat) + } +} + +func TestProvisionerPrepare_Config(t *testing.T) { + config := testConfig() + config["elevated_user"] = "{{user `user`}}" + config["elevated_password"] = "{{user `password`}}" + config[packer.UserVariablesConfigKey] = map[string]string{ + "user": "myusername", + "password": "mypassword", + } + + var p Provisioner + err := p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + if p.config.ElevatedUser != "myusername" { + t.Fatalf("Expected 'myusername' for key `elevated_user`: %s", p.config.ElevatedUser) + } + if p.config.ElevatedPassword != "mypassword" { + t.Fatalf("Expected 'mypassword' for key `elevated_password`: %s", p.config.ElevatedPassword) + } + +} + +func TestProvisionerPrepare_InvalidKey(t *testing.T) { + var p Provisioner + config := testConfig() + + // Add a random key + config["i_should_not_be_valid"] = true + err := p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } +} + +func TestProvisionerPrepare_Elevated(t *testing.T) { + var p Provisioner + config := testConfig() + + // Add a random key + config["elevated_user"] = "vagrant" + err := p.Prepare(config) + + if err == nil { + t.Fatal("should have error (only provided elevated_user)") + } + + config["elevated_password"] = "vagrant" + err = p.Prepare(config) + + if err != nil { + t.Fatal("should not have error") + } +} + +func TestProvisionerPrepare_Script(t *testing.T) { + config := testConfig() + delete(config, "inline") + + config["script"] = "/this/should/not/exist" + p := new(Provisioner) + err := p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } + + // Test with a good one + tf, err := ioutil.TempFile("", "packer") + if err != nil { + t.Fatalf("error tempfile: %s", err) + } + defer os.Remove(tf.Name()) + + config["script"] = tf.Name() + p = new(Provisioner) + err = p.Prepare(config) + if err != nil { + t.Fatalf("should not have error: %s", err) + } +} + +func TestProvisionerPrepare_ScriptAndInline(t *testing.T) { + var p Provisioner + config := testConfig() + + delete(config, "inline") + delete(config, "script") + err := p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } + + // Test with both + tf, err := ioutil.TempFile("", "packer") + if err != nil { + t.Fatalf("error tempfile: %s", err) + } + defer os.Remove(tf.Name()) + + config["inline"] = []interface{}{"foo"} + config["script"] = tf.Name() + err = p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } +} + +func TestProvisionerPrepare_ScriptAndScripts(t *testing.T) { + var p Provisioner + config := testConfig() + + // Test with both + tf, err := ioutil.TempFile("", "packer") + if err != nil { + t.Fatalf("error tempfile: %s", err) + } + defer os.Remove(tf.Name()) + + config["inline"] = []interface{}{"foo"} + config["scripts"] = []string{tf.Name()} + err = p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } +} + +func TestProvisionerPrepare_Scripts(t *testing.T) { + config := testConfig() + delete(config, "inline") + + config["scripts"] = []string{} + p := new(Provisioner) + err := p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } + + // Test with a good one + tf, err := ioutil.TempFile("", "packer") + if err != nil { + t.Fatalf("error tempfile: %s", err) + } + defer os.Remove(tf.Name()) + + config["scripts"] = []string{tf.Name()} + p = new(Provisioner) + err = p.Prepare(config) + if err != nil { + t.Fatalf("should not have error: %s", err) + } +} + +func TestProvisionerPrepare_EnvironmentVars(t *testing.T) { + config := testConfig() + + // Test with a bad case + config["environment_vars"] = []string{"badvar", "good=var"} + p := new(Provisioner) + err := p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } + + // Test with a trickier case + config["environment_vars"] = []string{"=bad"} + p = new(Provisioner) + err = p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } + + // Test with a good case + // Note: baz= is a real env variable, just empty + config["environment_vars"] = []string{"FOO=bar", "baz="} + p = new(Provisioner) + err = p.Prepare(config) + if err != nil { + t.Fatalf("should not have error: %s", err) + } +} + +func TestProvisionerQuote_EnvironmentVars(t *testing.T) { + config := testConfig() + + config["environment_vars"] = []string{"keyone=valueone", "keytwo=value\ntwo", "keythree='valuethree'", "keyfour='value\nfour'"} + p := new(Provisioner) + p.Prepare(config) + + expectedValue := "keyone=valueone" + if p.config.Vars[0] != expectedValue { + t.Fatalf("%s should be equal to %s", p.config.Vars[0], expectedValue) + } + + expectedValue = "keytwo=value\ntwo" + if p.config.Vars[1] != expectedValue { + t.Fatalf("%s should be equal to %s", p.config.Vars[1], expectedValue) + } + + expectedValue = "keythree='valuethree'" + if p.config.Vars[2] != expectedValue { + t.Fatalf("%s should be equal to %s", p.config.Vars[2], expectedValue) + } + + expectedValue = "keyfour='value\nfour'" + if p.config.Vars[3] != expectedValue { + t.Fatalf("%s should be equal to %s", p.config.Vars[3], expectedValue) + } +} + +func testUi() *packer.BasicUi { + return &packer.BasicUi{ + Reader: new(bytes.Buffer), + Writer: new(bytes.Buffer), + ErrorWriter: new(bytes.Buffer), + } +} + +func testObjects() (packer.Ui, packer.Communicator) { + ui := testUi() + return ui, new(packer.MockCommunicator) +} + +func TestProvisionerProvision_ValidExitCodes(t *testing.T) { + config := testConfig() + delete(config, "inline") + + // Defaults provided by Packer + config["remote_path"] = "c:/Windows/Temp/inlineScript.bat" + config["inline"] = []string{"whoami"} + ui := testUi() + p := new(Provisioner) + + // Defaults provided by Packer + p.config.PackerBuildName = "vmware" + p.config.PackerBuilderType = "iso" + p.config.ValidExitCodes = []int{0, 200} + comm := new(packer.MockCommunicator) + comm.StartExitStatus = 200 + p.Prepare(config) + err := p.Provision(ui, comm) + if err != nil { + t.Fatal("should not have error") + } +} + +func TestProvisionerProvision_InvalidExitCodes(t *testing.T) { + config := testConfig() + delete(config, "inline") + + // Defaults provided by Packer + config["remote_path"] = "c:/Windows/Temp/inlineScript.bat" + config["inline"] = []string{"whoami"} + ui := testUi() + p := new(Provisioner) + + // Defaults provided by Packer + p.config.PackerBuildName = "vmware" + p.config.PackerBuilderType = "iso" + p.config.ValidExitCodes = []int{0, 200} + comm := new(packer.MockCommunicator) + comm.StartExitStatus = 201 // Invalid! + p.Prepare(config) + err := p.Provision(ui, comm) + if err == nil { + t.Fatal("should have error") + } +} + +func TestProvisionerProvision_Inline(t *testing.T) { + config := testConfig() + delete(config, "inline") + + // Defaults provided by Packer + config["remote_path"] = "c:/Windows/Temp/inlineScript.bat" + config["inline"] = []string{"whoami"} + ui := testUi() + p := new(Provisioner) + + // Defaults provided by Packer + p.config.PackerBuildName = "vmware" + p.config.PackerBuilderType = "iso" + comm := new(packer.MockCommunicator) + p.Prepare(config) + err := p.Provision(ui, comm) + if err != nil { + t.Fatal("should not have error") + } + + expectedCommand := `powershell "& { $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; c:/Windows/Temp/inlineScript.bat; exit $LastExitCode}"` + + // Should run the command without alteration + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) + } + + envVars := make([]string, 2) + envVars[0] = "FOO=BAR" + envVars[1] = "BAR=BAZ" + config["environment_vars"] = envVars + config["remote_path"] = "c:/Windows/Temp/inlineScript.bat" + + p.Prepare(config) + err = p.Provision(ui, comm) + if err != nil { + t.Fatal("should not have error") + } + + expectedCommand = `powershell "& { $env:BAR=\"BAZ\"; $env:FOO=\"BAR\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; c:/Windows/Temp/inlineScript.bat; exit $LastExitCode}"` + + // Should run the command without alteration + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be: %s, got: %s", expectedCommand, comm.StartCmd.Command) + } +} + +func TestProvisionerProvision_Scripts(t *testing.T) { + tempFile, _ := ioutil.TempFile("", "packer") + defer os.Remove(tempFile.Name()) + config := testConfig() + delete(config, "inline") + config["scripts"] = []string{tempFile.Name()} + config["packer_build_name"] = "foobuild" + config["packer_builder_type"] = "footype" + ui := testUi() + + p := new(Provisioner) + comm := new(packer.MockCommunicator) + p.Prepare(config) + err := p.Provision(ui, comm) + if err != nil { + t.Fatal("should not have error") + } + + //powershell -Command "$env:PACKER_BUILDER_TYPE=''"; powershell -Command "$env:PACKER_BUILD_NAME='foobuild'"; powershell -Command c:/Windows/Temp/script.ps1 + expectedCommand := `powershell "& { $env:PACKER_BUILDER_TYPE=\"footype\"; $env:PACKER_BUILD_NAME=\"foobuild\"; c:/Windows/Temp/script.ps1; exit $LastExitCode}"` + + // Should run the command without alteration + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be %s NOT %s", expectedCommand, comm.StartCmd.Command) + } +} + +func TestProvisionerProvision_ScriptsWithEnvVars(t *testing.T) { + tempFile, _ := ioutil.TempFile("", "packer") + config := testConfig() + ui := testUi() + defer os.Remove(tempFile.Name()) + delete(config, "inline") + + config["scripts"] = []string{tempFile.Name()} + config["packer_build_name"] = "foobuild" + config["packer_builder_type"] = "footype" + + // Env vars - currently should not effect them + envVars := make([]string, 2) + envVars[0] = "FOO=BAR" + envVars[1] = "BAR=BAZ" + config["environment_vars"] = envVars + + p := new(Provisioner) + comm := new(packer.MockCommunicator) + p.Prepare(config) + err := p.Provision(ui, comm) + if err != nil { + t.Fatal("should not have error") + } + + expectedCommand := `powershell "& { $env:BAR=\"BAZ\"; $env:FOO=\"BAR\"; $env:PACKER_BUILDER_TYPE=\"footype\"; $env:PACKER_BUILD_NAME=\"foobuild\"; c:/Windows/Temp/script.ps1; exit $LastExitCode}"` + + // Should run the command without alteration + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be %s NOT %s", expectedCommand, comm.StartCmd.Command) + } +} + +func TestProvisionerProvision_UISlurp(t *testing.T) { + // UI should be called n times + + // UI should receive following messages / output +} + +func TestProvisioner_createFlattenedElevatedEnvVars_windows(t *testing.T) { + config := testConfig() + + p := new(Provisioner) + err := p.Prepare(config) + if err != nil { + t.Fatalf("should not have error preparing config: %s", err) + } + + // Defaults provided by Packer + p.config.PackerBuildName = "vmware" + p.config.PackerBuilderType = "iso" + + // no user env var + flattenedEnvVars, err := p.createFlattenedEnvVars(true) + if err != nil { + t.Fatalf("should not have error creating flattened env vars: %s", err) + } + if flattenedEnvVars != "$env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; " { + t.Fatalf("unexpected flattened env vars: %s", flattenedEnvVars) + } + + // single user env var + p.config.Vars = []string{"FOO=bar"} + + flattenedEnvVars, err = p.createFlattenedEnvVars(true) + if err != nil { + t.Fatalf("should not have error creating flattened env vars: %s", err) + } + if flattenedEnvVars != "$env:FOO=\"bar\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; " { + t.Fatalf("unexpected flattened env vars: %s", flattenedEnvVars) + } + + // multiple user env vars + p.config.Vars = []string{"FOO=bar", "BAZ=qux"} + + flattenedEnvVars, err = p.createFlattenedEnvVars(true) + if err != nil { + t.Fatalf("should not have error creating flattened env vars: %s", err) + } + if flattenedEnvVars != "$env:BAZ=\"qux\"; $env:FOO=\"bar\"; $env:PACKER_BUILDER_TYPE=\"iso\"; $env:PACKER_BUILD_NAME=\"vmware\"; " { + t.Fatalf("unexpected flattened env vars: %s", flattenedEnvVars) + } +} + +func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { + config := testConfig() + + p := new(Provisioner) + err := p.Prepare(config) + if err != nil { + t.Fatalf("should not have error preparing config: %s", err) + } + + // Defaults provided by Packer + p.config.PackerBuildName = "vmware" + p.config.PackerBuilderType = "iso" + + // no user env var + flattenedEnvVars, err := p.createFlattenedEnvVars(false) + if err != nil { + t.Fatalf("should not have error creating flattened env vars: %s", err) + } + if flattenedEnvVars != "$env:PACKER_BUILDER_TYPE=\\\"iso\\\"; $env:PACKER_BUILD_NAME=\\\"vmware\\\"; " { + t.Fatalf("unexpected flattened env vars: %s", flattenedEnvVars) + } + + // single user env var + p.config.Vars = []string{"FOO=bar"} + + flattenedEnvVars, err = p.createFlattenedEnvVars(false) + if err != nil { + t.Fatalf("should not have error creating flattened env vars: %s", err) + } + if flattenedEnvVars != "$env:FOO=\\\"bar\\\"; $env:PACKER_BUILDER_TYPE=\\\"iso\\\"; $env:PACKER_BUILD_NAME=\\\"vmware\\\"; " { + t.Fatalf("unexpected flattened env vars: %s", flattenedEnvVars) + } + + // multiple user env vars + p.config.Vars = []string{"FOO=bar", "BAZ=qux"} + + flattenedEnvVars, err = p.createFlattenedEnvVars(false) + if err != nil { + t.Fatalf("should not have error creating flattened env vars: %s", err) + } + if flattenedEnvVars != "$env:BAZ=\\\"qux\\\"; $env:FOO=\\\"bar\\\"; $env:PACKER_BUILDER_TYPE=\\\"iso\\\"; $env:PACKER_BUILD_NAME=\\\"vmware\\\"; " { + t.Fatalf("unexpected flattened env vars: %s", flattenedEnvVars) + } +} + +func TestProvision_createCommandText(t *testing.T) { + + config := testConfig() + p := new(Provisioner) + comm := new(packer.MockCommunicator) + p.communicator = comm + _ = p.Prepare(config) + + // Non-elevated + cmd, _ := p.createCommandText() + if cmd != "powershell \"& { $env:PACKER_BUILDER_TYPE=\\\"\\\"; $env:PACKER_BUILD_NAME=\\\"\\\"; c:/Windows/Temp/script.ps1; exit $LastExitCode}\"" { + t.Fatalf("Got unexpected non-elevated command: %s", cmd) + } + + // Elevated + p.config.ElevatedUser = "vagrant" + p.config.ElevatedPassword = "vagrant" + cmd, _ = p.createCommandText() + matched, _ := regexp.MatchString("powershell -executionpolicy bypass -file \"%TEMP%(.{1})packer-elevated-shell.*", cmd) + if !matched { + t.Fatalf("Got unexpected elevated command: %s", cmd) + } +} + +func TestProvision_generateElevatedShellRunner(t *testing.T) { + + // Non-elevated + config := testConfig() + p := new(Provisioner) + p.Prepare(config) + comm := new(packer.MockCommunicator) + p.communicator = comm + path, err := p.generateElevatedRunner("whoami") + + if err != nil { + t.Fatalf("Did not expect error: %s", err.Error()) + } + + if comm.UploadCalled != true { + t.Fatalf("Should have uploaded file") + } + + matched, _ := regexp.MatchString("%TEMP%(.{1})packer-elevated-shell.*", path) + if !matched { + t.Fatalf("Got unexpected file: %s", path) + } +} + +func TestRetryable(t *testing.T) { + config := testConfig() + + count := 0 + retryMe := func() error { + t.Logf("RetryMe, attempt number %d", count) + if count == 2 { + return nil + } + count++ + return errors.New(fmt.Sprintf("Still waiting %d more times...", 2-count)) + } + retryableSleep = 50 * time.Millisecond + p := new(Provisioner) + p.config.StartRetryTimeout = 155 * time.Millisecond + err := p.Prepare(config) + err = p.retryable(retryMe) + if err != nil { + t.Fatalf("should not have error retrying funuction") + } + + count = 0 + p.config.StartRetryTimeout = 10 * time.Millisecond + err = p.Prepare(config) + err = p.retryable(retryMe) + if err == nil { + t.Fatalf("should have error retrying funuction") + } +} + +func TestCancel(t *testing.T) { + // Don't actually call Cancel() as it performs an os.Exit(0) + // which kills the 'go test' tool +} From 9364809d01bc62a5eba910f7aaa0b466ebaf2e49 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 14 Jun 2015 11:08:32 -0700 Subject: [PATCH 390/956] website: document powershell --- .../provisioners/powershell.html.markdown | 82 +++++++++++++++++++ website/source/layouts/docs.erb | 1 + 2 files changed, 83 insertions(+) create mode 100644 website/source/docs/provisioners/powershell.html.markdown diff --git a/website/source/docs/provisioners/powershell.html.markdown b/website/source/docs/provisioners/powershell.html.markdown new file mode 100644 index 000000000..69cb90b9a --- /dev/null +++ b/website/source/docs/provisioners/powershell.html.markdown @@ -0,0 +1,82 @@ +--- +layout: "docs" +page_title: "PowerShell Provisioner" +description: |- + The shell Packer provisioner provisions machines built by Packer using shell scripts. Shell provisioning is the easiest way to get software installed and configured on a machine. +--- + +# PowerShell Provisioner + +Type: `powershell` + +The PowerShell Packer provisioner runs PowerShell scripts on Windows machines. +It assumes that the communicator in use is WinRM. + +## Basic Example + +The example below is fully functional. + +```javascript +{ + "type": "powershell", + "inline": ["dir c:\\"] +} +``` + +## Configuration Reference + +The reference of available configuration options is listed below. The only +required element is either "inline" or "script". Every other option is optional. + +Exactly _one_ of the following is required: + +* `inline` (array of strings) - This is an array of commands to execute. + The commands are concatenated by newlines and turned into a single file, + so they are all executed within the same context. This allows you to + change directories in one command and use something in the directory in + the next and so on. Inline scripts are the easiest way to pull off simple + tasks within the machine. + +* `script` (string) - The path to a script to upload and execute in the machine. + This path can be absolute or relative. If it is relative, it is relative + to the working directory when Packer is executed. + +* `scripts` (array of strings) - An array of scripts to execute. The scripts + will be uploaded and executed in the order specified. Each script is executed + in isolation, so state such as variables from one script won't carry on to + the next. + +Optional parameters: + +* `binary` (boolean) - If true, specifies that the script(s) are binary + files, and Packer should therefore not convert Windows line endings to + Unix line endings (if there are any). By default this is false. + +* `environment_vars` (array of strings) - An array of key/value pairs + to inject prior to the execute_command. The format should be + `key=value`. Packer injects some environmental variables by default + into the environment, as well, which are covered in the section below. + +* `execute_command` (string) - The command to use to execute the script. + By default this is `powershell "& { {{.Vars}}{{.Path}}; exit $LastExitCode}"`. + The value of this is treated as [configuration template](/docs/templates/configuration-templates.html). + There are two available variables: `Path`, which is + the path to the script to run, and `Vars`, which is the list of + `environment_vars`, if configured. + +* `elevated_user` and `elevated_password` (string) - If specified, + the PowerShell script will be run with elevated privileges using + the given Windows user. + +* `remote_path` (string) - The path where the script will be uploaded to + in the machine. This defaults to "/tmp/script.sh". This value must be + a writable location and any parent directories must already exist. + +* `start_retry_timeout` (string) - The amount of time to attempt to + _start_ the remote process. By default this is "5m" or 5 minutes. This + setting exists in order to deal with times when SSH may restart, such as + a system reboot. Set this to a higher value if reboots take a longer + amount of time. + +* `valid_exit_codes` (list of ints) - Valid exit codes for the script. + By default this is just 0. diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 84cae0b05..2d83962bf 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -49,6 +49,7 @@
    • Provisioners

    • Shell Scripts
    • File Uploads
    • +
    • PowerShell
    • Ansible
    • Chef Client
    • Chef Solo
    • From 840ddb4f20d2203445c26c90e9192bd20d6fc0dd Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 14 Jun 2015 11:14:47 -0700 Subject: [PATCH 391/956] provisioner/windows-restart --- plugin/provisioner-windows-restart/main.go | 15 + provisioner/windows-restart/provisioner.go | 194 ++++++++++ .../windows-restart/provisioner_test.go | 355 ++++++++++++++++++ 3 files changed, 564 insertions(+) create mode 100644 plugin/provisioner-windows-restart/main.go create mode 100644 provisioner/windows-restart/provisioner.go create mode 100644 provisioner/windows-restart/provisioner_test.go diff --git a/plugin/provisioner-windows-restart/main.go b/plugin/provisioner-windows-restart/main.go new file mode 100644 index 000000000..0adf82216 --- /dev/null +++ b/plugin/provisioner-windows-restart/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "github.com/mitchellh/packer/packer/plugin" + "github.com/mitchellh/packer/provisioner/windows-restart" +) + +func main() { + server, err := plugin.Server() + if err != nil { + panic(err) + } + server.RegisterProvisioner(new(restart.Provisioner)) + server.Serve() +} diff --git a/provisioner/windows-restart/provisioner.go b/provisioner/windows-restart/provisioner.go new file mode 100644 index 000000000..234980183 --- /dev/null +++ b/provisioner/windows-restart/provisioner.go @@ -0,0 +1,194 @@ +package restart + +import ( + "fmt" + "log" + "time" + + "github.com/masterzen/winrm/winrm" + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" +) + +var DefaultRestartCommand = "shutdown /r /c \"packer restart\" /t 5 && net stop winrm" +var DefaultRestartCheckCommand = winrm.Powershell(`echo "${env:COMPUTERNAME} restarted."`) +var retryableSleep = 5 * time.Second + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + + // The command used to restart the guest machine + RestartCommand string `mapstructure:"restart_command"` + + // The command used to check if the guest machine has restarted + // The output of this command will be displayed to the user + RestartCheckCommand string `mapstructure:"restart_check_command"` + + // The timeout for waiting for the machine to restart + RestartTimeout time.Duration `mapstructure:"restart_timeout"` + + ctx interpolate.Context +} + +type Provisioner struct { + config Config + comm packer.Communicator + ui packer.Ui + cancel chan struct{} +} + +func (p *Provisioner) Prepare(raws ...interface{}) error { + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "execute_command", + }, + }, + }, raws...) + if err != nil { + return err + } + + if p.config.RestartCommand == "" { + p.config.RestartCommand = DefaultRestartCommand + } + + if p.config.RestartCheckCommand == "" { + p.config.RestartCheckCommand = DefaultRestartCheckCommand + } + + if p.config.RestartTimeout == 0 { + p.config.RestartTimeout = 5 * time.Minute + } + + return nil +} + +func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { + ui.Say("Restarting Machine") + p.comm = comm + p.ui = ui + p.cancel = make(chan struct{}) + + var cmd *packer.RemoteCmd + command := p.config.RestartCommand + err := p.retryable(func() error { + cmd = &packer.RemoteCmd{Command: command} + return cmd.StartWithUi(comm, ui) + }) + + if err != nil { + return err + } + + if cmd.ExitStatus != 0 { + return fmt.Errorf("Restart script exited with non-zero exit status: %d", cmd.ExitStatus) + } + + return waitForRestart(p) +} + +var waitForRestart = func(p *Provisioner) error { + ui := p.ui + ui.Say("Waiting for machine to restart...") + waitDone := make(chan bool, 1) + timeout := time.After(p.config.RestartTimeout) + var err error + + go func() { + log.Printf("Waiting for machine to become available...") + err = waitForCommunicator(p) + waitDone <- true + }() + + log.Printf("Waiting for machine to reboot with timeout: %s", p.config.RestartTimeout) + +WaitLoop: + for { + // Wait for either WinRM to become available, a timeout to occur, + // or an interrupt to come through. + select { + case <-waitDone: + if err != nil { + ui.Error(fmt.Sprintf("Error waiting for WinRM: %s", err)) + return err + } + + ui.Say("Machine successfully restarted, moving on") + close(p.cancel) + break WaitLoop + case <-timeout: + err := fmt.Errorf("Timeout waiting for WinRM.") + ui.Error(err.Error()) + close(p.cancel) + return err + case <-p.cancel: + close(waitDone) + return fmt.Errorf("Interrupt detected, quitting waiting for machine to restart") + break WaitLoop + } + } + + return nil + +} + +var waitForCommunicator = func(p *Provisioner) error { + cmd := &packer.RemoteCmd{Command: p.config.RestartCheckCommand} + + for { + select { + case <-p.cancel: + log.Println("Communicator wait cancelled, exiting loop") + return fmt.Errorf("Communicator wait cancelled") + case <-time.After(retryableSleep): + } + + log.Printf("Attempting to communicator to machine with: '%s'", cmd.Command) + + err := cmd.StartWithUi(p.comm, p.ui) + if err != nil { + log.Printf("Communication connection err: %s", err) + continue + } + + log.Printf("Connected to machine") + break + } + + return nil +} + +func (p *Provisioner) Cancel() { + log.Printf("Received interrupt Cancel()") + close(p.cancel) +} + +// retryable will retry the given function over and over until a +// non-error is returned. +func (p *Provisioner) retryable(f func() error) error { + startTimeout := time.After(p.config.RestartTimeout) + for { + var err error + if err = f(); err == nil { + return nil + } + + // Create an error and log it + err = fmt.Errorf("Retryable error: %s", err) + log.Printf(err.Error()) + + // Check if we timed out, otherwise we retry. It is safe to + // retry since the only error case above is if the command + // failed to START. + select { + case <-startTimeout: + return err + default: + time.Sleep(retryableSleep) + } + } +} diff --git a/provisioner/windows-restart/provisioner_test.go b/provisioner/windows-restart/provisioner_test.go new file mode 100644 index 000000000..f0f2766e3 --- /dev/null +++ b/provisioner/windows-restart/provisioner_test.go @@ -0,0 +1,355 @@ +package restart + +import ( + "bytes" + "errors" + "fmt" + "github.com/mitchellh/packer/packer" + "testing" + "time" +) + +func testConfig() map[string]interface{} { + return map[string]interface{}{} +} + +func TestProvisioner_Impl(t *testing.T) { + var raw interface{} + raw = &Provisioner{} + if _, ok := raw.(packer.Provisioner); !ok { + t.Fatalf("must be a Provisioner") + } +} + +func TestProvisionerPrepare_Defaults(t *testing.T) { + var p Provisioner + config := testConfig() + + err := p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + if p.config.RestartTimeout != 5*time.Minute { + t.Errorf("unexpected remote path: %s", p.config.RestartTimeout) + } + + if p.config.RestartCommand != "shutdown /r /c \"packer restart\" /t 5 && net stop winrm" { + t.Errorf("unexpected remote path: %s", p.config.RestartCommand) + } +} + +func TestProvisionerPrepare_ConfigRetryTimeout(t *testing.T) { + var p Provisioner + config := testConfig() + config["restart_timeout"] = "1m" + + err := p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + if p.config.RestartTimeout != 1*time.Minute { + t.Errorf("unexpected remote path: %s", p.config.RestartTimeout) + } +} + +func TestProvisionerPrepare_ConfigErrors(t *testing.T) { + var p Provisioner + config := testConfig() + config["restart_timeout"] = "m" + + err := p.Prepare(config) + if err == nil { + t.Fatal("Expected error parsing restart_timeout but did not receive one.") + } +} + +func TestProvisionerPrepare_InvalidKey(t *testing.T) { + var p Provisioner + config := testConfig() + + // Add a random key + config["i_should_not_be_valid"] = true + err := p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } +} + +func testUi() *packer.BasicUi { + return &packer.BasicUi{ + Reader: new(bytes.Buffer), + Writer: new(bytes.Buffer), + ErrorWriter: new(bytes.Buffer), + } +} + +func TestProvisionerProvision_Success(t *testing.T) { + config := testConfig() + + // Defaults provided by Packer + ui := testUi() + p := new(Provisioner) + + // Defaults provided by Packer + comm := new(packer.MockCommunicator) + p.Prepare(config) + waitForCommunicatorOld := waitForCommunicator + waitForCommunicator = func(p *Provisioner) error { + return nil + } + err := p.Provision(ui, comm) + if err != nil { + t.Fatal("should not have error") + } + + expectedCommand := DefaultRestartCommand + + // Should run the command without alteration + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) + } + // Set this back! + waitForCommunicator = waitForCommunicatorOld +} + +func TestProvisionerProvision_CustomCommand(t *testing.T) { + config := testConfig() + + // Defaults provided by Packer + ui := testUi() + p := new(Provisioner) + expectedCommand := "specialrestart.exe -NOW" + config["restart_command"] = expectedCommand + + // Defaults provided by Packer + comm := new(packer.MockCommunicator) + p.Prepare(config) + waitForCommunicatorOld := waitForCommunicator + waitForCommunicator = func(p *Provisioner) error { + return nil + } + err := p.Provision(ui, comm) + if err != nil { + t.Fatal("should not have error") + } + + // Should run the command without alteration + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) + } + // Set this back! + waitForCommunicator = waitForCommunicatorOld +} + +func TestProvisionerProvision_RestartCommandFail(t *testing.T) { + config := testConfig() + ui := testUi() + p := new(Provisioner) + comm := new(packer.MockCommunicator) + comm.StartStderr = "WinRM terminated" + comm.StartExitStatus = 1 + + p.Prepare(config) + err := p.Provision(ui, comm) + if err == nil { + t.Fatal("should have error") + } +} +func TestProvisionerProvision_WaitForRestartFail(t *testing.T) { + config := testConfig() + + // Defaults provided by Packer + ui := testUi() + p := new(Provisioner) + + // Defaults provided by Packer + comm := new(packer.MockCommunicator) + p.Prepare(config) + waitForCommunicatorOld := waitForCommunicator + waitForCommunicator = func(p *Provisioner) error { + return fmt.Errorf("Machine did not restart properly") + } + err := p.Provision(ui, comm) + if err == nil { + t.Fatal("should have error") + } + + // Set this back! + waitForCommunicator = waitForCommunicatorOld +} + +func TestProvision_waitForRestartTimeout(t *testing.T) { + retryableSleep = 10 * time.Millisecond + config := testConfig() + config["restart_timeout"] = "1ms" + ui := testUi() + p := new(Provisioner) + comm := new(packer.MockCommunicator) + var err error + + p.Prepare(config) + waitForCommunicatorOld := waitForCommunicator + waitDone := make(chan bool) + + // Block until cancel comes through + waitForCommunicator = func(p *Provisioner) error { + for { + select { + case <-waitDone: + } + } + } + + go func() { + err = p.Provision(ui, comm) + waitDone <- true + }() + <-waitDone + + if err == nil { + t.Fatal("should not have error") + } + + // Set this back! + waitForCommunicator = waitForCommunicatorOld + +} + +func TestProvision_waitForCommunicator(t *testing.T) { + config := testConfig() + + // Defaults provided by Packer + ui := testUi() + p := new(Provisioner) + + // Defaults provided by Packer + comm := new(packer.MockCommunicator) + p.comm = comm + p.ui = ui + comm.StartStderr = "WinRM terminated" + comm.StartExitStatus = 1 + p.Prepare(config) + err := waitForCommunicator(p) + + if err != nil { + t.Fatal("should not have error, got: %s", err.Error()) + } + + expectedCommand := DefaultRestartCheckCommand + + // Should run the command without alteration + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) + } +} + +func TestProvision_waitForCommunicatorWithCancel(t *testing.T) { + config := testConfig() + + // Defaults provided by Packer + ui := testUi() + p := new(Provisioner) + + // Defaults provided by Packer + comm := new(packer.MockCommunicator) + p.comm = comm + p.ui = ui + retryableSleep = 10 * time.Millisecond + p.cancel = make(chan struct{}) + var err error + + comm.StartStderr = "WinRM terminated" + comm.StartExitStatus = 1 // Always fail + p.Prepare(config) + + // Run 2 goroutines; + // 1st to call waitForCommunicator (that will always fail) + // 2nd to cancel the operation + waitDone := make(chan bool) + go func() { + err = waitForCommunicator(p) + }() + + go func() { + p.Cancel() + waitDone <- true + }() + <-waitDone + + // Expect a Cancel error + if err == nil { + t.Fatalf("Should have err") + } +} + +func TestRetryable(t *testing.T) { + config := testConfig() + + count := 0 + retryMe := func() error { + t.Logf("RetryMe, attempt number %d", count) + if count == 2 { + return nil + } + count++ + return errors.New(fmt.Sprintf("Still waiting %d more times...", 2-count)) + } + retryableSleep = 50 * time.Millisecond + p := new(Provisioner) + p.config.RestartTimeout = 155 * time.Millisecond + err := p.Prepare(config) + err = p.retryable(retryMe) + if err != nil { + t.Fatalf("should not have error retrying funuction") + } + + count = 0 + p.config.RestartTimeout = 10 * time.Millisecond + err = p.Prepare(config) + err = p.retryable(retryMe) + if err == nil { + t.Fatalf("should have error retrying funuction") + } +} + +func TestProvision_Cancel(t *testing.T) { + config := testConfig() + + // Defaults provided by Packer + ui := testUi() + p := new(Provisioner) + + var err error + + comm := new(packer.MockCommunicator) + p.Prepare(config) + waitDone := make(chan bool) + + // Block until cancel comes through + waitForCommunicator = func(p *Provisioner) error { + for { + select { + case <-waitDone: + } + } + } + + // Create two go routines to provision and cancel in parallel + // Provision will block until cancel happens + go func() { + err = p.Provision(ui, comm) + waitDone <- true + }() + + go func() { + p.Cancel() + }() + <-waitDone + + // Expect interupt error + if err == nil { + t.Fatal("should have error") + } +} From 506a657775088e57e69769c9e6c66b493937e249 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 14 Jun 2015 11:17:50 -0700 Subject: [PATCH 392/956] website: doc windows-restart --- .../docs/provisioners/windows-restart.html.md | 43 +++++++++++++++++++ website/source/layouts/docs.erb | 1 + 2 files changed, 44 insertions(+) create mode 100644 website/source/docs/provisioners/windows-restart.html.md diff --git a/website/source/docs/provisioners/windows-restart.html.md b/website/source/docs/provisioners/windows-restart.html.md new file mode 100644 index 000000000..a1b65cae1 --- /dev/null +++ b/website/source/docs/provisioners/windows-restart.html.md @@ -0,0 +1,43 @@ +--- +layout: "docs" +page_title: "Windows Restart Provisioner" +description: |- + The Windows restart provisioner restarts a Windows machine and waits for it to come back up. +--- + +# Windows Restart Provisioner + +Type: `windows-restart` + +The Windows restart provisioner initiates a reboot on a Windows machine +and waits for the machine to come back online. + +The Windows provisioning process often requires multiple reboots, and this +provisioner helps to ease that process. + +## Basic Example + +The example below is fully functional. + +```javascript +{ + "type": "windows-restart" +} +``` + +## Configuration Reference + +The reference of available configuration options is listed below. + +Optional parameters: + +* `restart_command` (string) - The command to execute to initiate the + restart. By default this is `shutdown /r /c "packer restart" /t 5 && net stop winrm`. + A key action of this is to stop WinRM so that Packer can detect it + is rebooting. + +* `restart_check_command` (string) - A command to execute to check if the + restart succeeded. This will be done in a loop. + +* `restart_timeout` (string) - The timeout to wait for the restart. + By default this is 5 minutes. Example value: "5m" diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 2d83962bf..8b524b087 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -56,6 +56,7 @@
    • Puppet Masterless
    • Puppet Server
    • Salt
    • +
    • Windows Restart
    • Custom
    From b25b7d1fb22bf187c3740dfab04f1d11ad6ccece Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 14 Jun 2015 11:19:26 -0700 Subject: [PATCH 393/956] communicator/winrm: log exit code of processes --- communicator/winrm/communicator.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go index 82686e2a7..804580843 100644 --- a/communicator/winrm/communicator.go +++ b/communicator/winrm/communicator.go @@ -89,7 +89,10 @@ func runCommand(shell *winrm.Shell, cmd *winrm.Command, rc *packer.RemoteCmd) { go io.Copy(rc.Stderr, cmd.Stderr) cmd.Wait() - rc.SetExited(cmd.ExitCode()) + + code := cmd.ExitCode() + log.Printf("[INFO] command '%s' exited with code: %d", rc.Command, code) + rc.SetExited(code) } // Upload implementation of communicator.Communicator interface From 339a4ccdab01525b1ca36bd8529d9feac71b2103 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 14 Jun 2015 11:23:29 -0700 Subject: [PATCH 394/956] provisioner/windows-shell --- plugin/provisioner-windows-shell/main.go | 15 + provisioner/windows-shell/provisioner.go | 324 +++++++++++++ provisioner/windows-shell/provisioner_test.go | 441 ++++++++++++++++++ 3 files changed, 780 insertions(+) create mode 100644 plugin/provisioner-windows-shell/main.go create mode 100644 provisioner/windows-shell/provisioner.go create mode 100644 provisioner/windows-shell/provisioner_test.go diff --git a/plugin/provisioner-windows-shell/main.go b/plugin/provisioner-windows-shell/main.go new file mode 100644 index 000000000..342a8ed9b --- /dev/null +++ b/plugin/provisioner-windows-shell/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "github.com/mitchellh/packer/packer/plugin" + "github.com/mitchellh/packer/provisioner/windows-shell" +) + +func main() { + server, err := plugin.Server() + if err != nil { + panic(err) + } + server.RegisterProvisioner(new(shell.Provisioner)) + server.Serve() +} diff --git a/provisioner/windows-shell/provisioner.go b/provisioner/windows-shell/provisioner.go new file mode 100644 index 000000000..50c0aaeb1 --- /dev/null +++ b/provisioner/windows-shell/provisioner.go @@ -0,0 +1,324 @@ +// This package implements a provisioner for Packer that executes +// shell scripts within the remote machine. +package shell + +import ( + "bufio" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "sort" + "strings" + "time" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" +) + +const DefaultRemotePath = "c:/Windows/Temp/script.bat" + +var retryableSleep = 2 * time.Second + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + + // If true, the script contains binary and line endings will not be + // converted from Windows to Unix-style. + Binary bool + + // An inline script to execute. Multiple strings are all executed + // in the context of a single shell. + Inline []string + + // The local path of the shell script to upload and execute. + Script string + + // An array of multiple scripts to run. + Scripts []string + + // An array of environment variables that will be injected before + // your command(s) are executed. + Vars []string `mapstructure:"environment_vars"` + + // The remote path where the local shell script will be uploaded to. + // This should be set to a writable file that is in a pre-existing directory. + RemotePath string `mapstructure:"remote_path"` + + // The command used to execute the script. The '{{ .Path }}' variable + // should be used to specify where the script goes, {{ .Vars }} + // can be used to inject the environment_vars into the environment. + ExecuteCommand string `mapstructure:"execute_command"` + + // The timeout for retrying to start the process. Until this timeout + // is reached, if the provisioner can't start a process, it retries. + // This can be set high to allow for reboots. + StartRetryTimeout time.Duration `mapstructure:"start_retry_timeout"` + + // This is used in the template generation to format environment variables + // inside the `ExecuteCommand` template. + EnvVarFormat string + + ctx interpolate.Context +} + +type Provisioner struct { + config Config +} + +type ExecuteCommandTemplate struct { + Vars string + Path string +} + +func (p *Provisioner) Prepare(raws ...interface{}) error { + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "execute_command", + }, + }, + }, raws...) + if err != nil { + return err + } + + if p.config.EnvVarFormat == "" { + p.config.EnvVarFormat = `set "%s=%s" && ` + } + + if p.config.ExecuteCommand == "" { + p.config.ExecuteCommand = `{{.Vars}}"{{.Path}}"` + } + + if p.config.Inline != nil && len(p.config.Inline) == 0 { + p.config.Inline = nil + } + + if p.config.StartRetryTimeout == 0 { + p.config.StartRetryTimeout = 5 * time.Minute + } + + if p.config.RemotePath == "" { + p.config.RemotePath = DefaultRemotePath + } + + if p.config.Scripts == nil { + p.config.Scripts = make([]string, 0) + } + + if p.config.Vars == nil { + p.config.Vars = make([]string, 0) + } + + var errs error + if p.config.Script != "" && len(p.config.Scripts) > 0 { + errs = packer.MultiErrorAppend(errs, + errors.New("Only one of script or scripts can be specified.")) + } + + if p.config.Script != "" { + p.config.Scripts = []string{p.config.Script} + } + + if len(p.config.Scripts) == 0 && p.config.Inline == nil { + errs = packer.MultiErrorAppend(errs, + errors.New("Either a script file or inline script must be specified.")) + } else if len(p.config.Scripts) > 0 && p.config.Inline != nil { + errs = packer.MultiErrorAppend(errs, + errors.New("Only a script file or an inline script can be specified, not both.")) + } + + for _, path := range p.config.Scripts { + if _, err := os.Stat(path); err != nil { + errs = packer.MultiErrorAppend(errs, + fmt.Errorf("Bad script '%s': %s", path, err)) + } + } + + // Do a check for bad environment variables, such as '=foo', 'foobar' + for _, kv := range p.config.Vars { + vs := strings.SplitN(kv, "=", 2) + if len(vs) != 2 || vs[0] == "" { + errs = packer.MultiErrorAppend(errs, + fmt.Errorf("Environment variable not in format 'key=value': %s", kv)) + } + } + + if errs != nil { + return errs + } + + return nil +} + +// This function takes the inline scripts, concatenates them +// into a temporary file and returns a string containing the location +// of said file. +func extractScript(p *Provisioner) (string, error) { + temp, err := ioutil.TempFile(os.TempDir(), "packer-windows-shell-provisioner") + if err != nil { + log.Printf("Unable to create temporary file for inline scripts: %s", err) + return "", err + } + writer := bufio.NewWriter(temp) + for _, command := range p.config.Inline { + log.Printf("Found command: %s", command) + if _, err := writer.WriteString(command + "\n"); err != nil { + return "", fmt.Errorf("Error preparing shell script: %s", err) + } + } + + if err := writer.Flush(); err != nil { + return "", fmt.Errorf("Error preparing shell script: %s", err) + } + + temp.Close() + + return temp.Name(), nil +} + +func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { + ui.Say(fmt.Sprintf("Provisioning with windows-shell...")) + scripts := make([]string, len(p.config.Scripts)) + copy(scripts, p.config.Scripts) + + // Build our variables up by adding in the build name and builder type + envVars := make([]string, len(p.config.Vars)+2) + envVars[0] = "PACKER_BUILD_NAME=" + p.config.PackerBuildName + envVars[1] = "PACKER_BUILDER_TYPE=" + p.config.PackerBuilderType + + copy(envVars, p.config.Vars) + + if p.config.Inline != nil { + temp, err := extractScript(p) + if err != nil { + ui.Error(fmt.Sprintf("Unable to extract inline scripts into a file: %s", err)) + } + scripts = append(scripts, temp) + } + + for _, path := range scripts { + ui.Say(fmt.Sprintf("Provisioning with shell script: %s", path)) + + log.Printf("Opening %s for reading", path) + f, err := os.Open(path) + if err != nil { + return fmt.Errorf("Error opening shell script: %s", err) + } + defer f.Close() + + // Create environment variables to set before executing the command + flattendVars, err := p.createFlattenedEnvVars() + if err != nil { + return err + } + + // Compile the command + p.config.ctx.Data = &ExecuteCommandTemplate{ + Vars: flattendVars, + Path: p.config.RemotePath, + } + command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) + if err != nil { + return fmt.Errorf("Error processing command: %s", err) + } + + // Upload the file and run the command. Do this in the context of + // a single retryable function so that we don't end up with + // the case that the upload succeeded, a restart is initiated, + // and then the command is executed but the file doesn't exist + // any longer. + var cmd *packer.RemoteCmd + err = p.retryable(func() error { + if _, err := f.Seek(0, 0); err != nil { + return err + } + + if err := comm.Upload(p.config.RemotePath, f, nil); err != nil { + return fmt.Errorf("Error uploading script: %s", err) + } + + cmd = &packer.RemoteCmd{Command: command} + return cmd.StartWithUi(comm, ui) + }) + if err != nil { + return err + } + + // Close the original file since we copied it + f.Close() + + if cmd.ExitStatus != 0 { + return fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) + } + } + + return nil +} + +func (p *Provisioner) Cancel() { + // Just hard quit. It isn't a big deal if what we're doing keeps + // running on the other side. + os.Exit(0) +} + +// retryable will retry the given function over and over until a +// non-error is returned. +func (p *Provisioner) retryable(f func() error) error { + startTimeout := time.After(p.config.StartRetryTimeout) + for { + var err error + if err = f(); err == nil { + return nil + } + + // Create an error and log it + err = fmt.Errorf("Retryable error: %s", err) + log.Printf(err.Error()) + + // Check if we timed out, otherwise we retry. It is safe to + // retry since the only error case above is if the command + // failed to START. + select { + case <-startTimeout: + return err + default: + time.Sleep(retryableSleep) + } + } +} + +func (p *Provisioner) createFlattenedEnvVars() (flattened string, err error) { + flattened = "" + envVars := make(map[string]string) + + // Always available Packer provided env vars + envVars["PACKER_BUILD_NAME"] = p.config.PackerBuildName + envVars["PACKER_BUILDER_TYPE"] = p.config.PackerBuilderType + + // Split vars into key/value components + for _, envVar := range p.config.Vars { + keyValue := strings.Split(envVar, "=") + if len(keyValue) != 2 { + err = errors.New("Shell provisioner environment variables must be in key=value format") + return + } + envVars[keyValue[0]] = keyValue[1] + } + // Create a list of env var keys in sorted order + var keys []string + for k := range envVars { + keys = append(keys, k) + } + sort.Strings(keys) + // Re-assemble vars using OS specific format pattern and flatten + for _, key := range keys { + flattened += fmt.Sprintf(p.config.EnvVarFormat, key, envVars[key]) + } + return +} diff --git a/provisioner/windows-shell/provisioner_test.go b/provisioner/windows-shell/provisioner_test.go new file mode 100644 index 000000000..5c4dddd90 --- /dev/null +++ b/provisioner/windows-shell/provisioner_test.go @@ -0,0 +1,441 @@ +package shell + +import ( + "bytes" + "errors" + "fmt" + "github.com/mitchellh/packer/packer" + "io/ioutil" + "log" + "os" + "strings" + "testing" + "time" +) + +func testConfig() map[string]interface{} { + return map[string]interface{}{ + "inline": []interface{}{"foo", "bar"}, + } +} + +func TestProvisionerPrepare_extractScript(t *testing.T) { + config := testConfig() + p := new(Provisioner) + _ = p.Prepare(config) + file, err := extractScript(p) + if err != nil { + t.Fatalf("Should not be error: %s", err) + } + log.Printf("File: %s", file) + if strings.Index(file, os.TempDir()) != 0 { + t.Fatalf("Temp file should reside in %s. File location: %s", os.TempDir(), file) + } + + // File contents should contain 2 lines concatenated by newlines: foo\nbar + readFile, err := ioutil.ReadFile(file) + expectedContents := "foo\nbar\n" + s := string(readFile[:]) + if s != expectedContents { + t.Fatalf("Expected generated inlineScript to equal '%s', got '%s'", expectedContents, s) + } +} + +func TestProvisioner_Impl(t *testing.T) { + var raw interface{} + raw = &Provisioner{} + if _, ok := raw.(packer.Provisioner); !ok { + t.Fatalf("must be a Provisioner") + } +} + +func TestProvisionerPrepare_Defaults(t *testing.T) { + var p Provisioner + config := testConfig() + + err := p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + if p.config.RemotePath != DefaultRemotePath { + t.Errorf("unexpected remote path: %s", p.config.RemotePath) + } + + if p.config.ExecuteCommand != "{{.Vars}}\"{{.Path}}\"" { + t.Fatalf("Default command should be powershell {{.Vars}}\"{{.Path}}\", but got %s", p.config.ExecuteCommand) + } +} + +func TestProvisionerPrepare_Config(t *testing.T) { + +} + +func TestProvisionerPrepare_InvalidKey(t *testing.T) { + var p Provisioner + config := testConfig() + + // Add a random key + config["i_should_not_be_valid"] = true + err := p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } +} + +func TestProvisionerPrepare_Script(t *testing.T) { + config := testConfig() + delete(config, "inline") + + config["script"] = "/this/should/not/exist" + p := new(Provisioner) + err := p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } + + // Test with a good one + tf, err := ioutil.TempFile("", "packer") + if err != nil { + t.Fatalf("error tempfile: %s", err) + } + defer os.Remove(tf.Name()) + + config["script"] = tf.Name() + p = new(Provisioner) + err = p.Prepare(config) + if err != nil { + t.Fatalf("should not have error: %s", err) + } +} + +func TestProvisionerPrepare_ScriptAndInline(t *testing.T) { + var p Provisioner + config := testConfig() + + delete(config, "inline") + delete(config, "script") + err := p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } + + // Test with both + tf, err := ioutil.TempFile("", "packer") + if err != nil { + t.Fatalf("error tempfile: %s", err) + } + defer os.Remove(tf.Name()) + + config["inline"] = []interface{}{"foo"} + config["script"] = tf.Name() + err = p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } +} + +func TestProvisionerPrepare_ScriptAndScripts(t *testing.T) { + var p Provisioner + config := testConfig() + + // Test with both + tf, err := ioutil.TempFile("", "packer") + if err != nil { + t.Fatalf("error tempfile: %s", err) + } + defer os.Remove(tf.Name()) + + config["inline"] = []interface{}{"foo"} + config["scripts"] = []string{tf.Name()} + err = p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } +} + +func TestProvisionerPrepare_Scripts(t *testing.T) { + config := testConfig() + delete(config, "inline") + + config["scripts"] = []string{} + p := new(Provisioner) + err := p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } + + // Test with a good one + tf, err := ioutil.TempFile("", "packer") + if err != nil { + t.Fatalf("error tempfile: %s", err) + } + defer os.Remove(tf.Name()) + + config["scripts"] = []string{tf.Name()} + p = new(Provisioner) + err = p.Prepare(config) + if err != nil { + t.Fatalf("should not have error: %s", err) + } +} + +func TestProvisionerPrepare_EnvironmentVars(t *testing.T) { + config := testConfig() + + // Test with a bad case + config["environment_vars"] = []string{"badvar", "good=var"} + p := new(Provisioner) + err := p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } + + // Test with a trickier case + config["environment_vars"] = []string{"=bad"} + p = new(Provisioner) + err = p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } + + // Test with a good case + // Note: baz= is a real env variable, just empty + config["environment_vars"] = []string{"FOO=bar", "baz="} + p = new(Provisioner) + err = p.Prepare(config) + if err != nil { + t.Fatalf("should not have error: %s", err) + } +} + +func TestProvisionerQuote_EnvironmentVars(t *testing.T) { + config := testConfig() + + config["environment_vars"] = []string{"keyone=valueone", "keytwo=value\ntwo", "keythree='valuethree'", "keyfour='value\nfour'"} + p := new(Provisioner) + p.Prepare(config) + + expectedValue := "keyone=valueone" + if p.config.Vars[0] != expectedValue { + t.Fatalf("%s should be equal to %s", p.config.Vars[0], expectedValue) + } + + expectedValue = "keytwo=value\ntwo" + if p.config.Vars[1] != expectedValue { + t.Fatalf("%s should be equal to %s", p.config.Vars[1], expectedValue) + } + + expectedValue = "keythree='valuethree'" + if p.config.Vars[2] != expectedValue { + t.Fatalf("%s should be equal to %s", p.config.Vars[2], expectedValue) + } + + expectedValue = "keyfour='value\nfour'" + if p.config.Vars[3] != expectedValue { + t.Fatalf("%s should be equal to %s", p.config.Vars[3], expectedValue) + } +} + +func testUi() *packer.BasicUi { + return &packer.BasicUi{ + Reader: new(bytes.Buffer), + Writer: new(bytes.Buffer), + ErrorWriter: new(bytes.Buffer), + } +} + +func testObjects() (packer.Ui, packer.Communicator) { + ui := testUi() + return ui, new(packer.MockCommunicator) +} + +func TestProvisionerProvision_Inline(t *testing.T) { + config := testConfig() + delete(config, "inline") + + // Defaults provided by Packer + config["remote_path"] = "c:/Windows/Temp/inlineScript.bat" + config["inline"] = []string{"whoami"} + ui := testUi() + p := new(Provisioner) + + // Defaults provided by Packer + p.config.PackerBuildName = "vmware" + p.config.PackerBuilderType = "iso" + comm := new(packer.MockCommunicator) + p.Prepare(config) + err := p.Provision(ui, comm) + if err != nil { + t.Fatal("should not have error") + } + + expectedCommand := `set "PACKER_BUILDER_TYPE=iso" && set "PACKER_BUILD_NAME=vmware" && "c:/Windows/Temp/inlineScript.bat"` + + // Should run the command without alteration + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be: %s, got %s", expectedCommand, comm.StartCmd.Command) + } + + envVars := make([]string, 2) + envVars[0] = "FOO=BAR" + envVars[1] = "BAR=BAZ" + config["environment_vars"] = envVars + config["remote_path"] = "c:/Windows/Temp/inlineScript.bat" + + p.Prepare(config) + err = p.Provision(ui, comm) + if err != nil { + t.Fatal("should not have error") + } + + expectedCommand = `set "BAR=BAZ" && set "FOO=BAR" && set "PACKER_BUILDER_TYPE=iso" && set "PACKER_BUILD_NAME=vmware" && "c:/Windows/Temp/inlineScript.bat"` + + // Should run the command without alteration + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be: %s, got: %s", expectedCommand, comm.StartCmd.Command) + } +} + +func TestProvisionerProvision_Scripts(t *testing.T) { + tempFile, _ := ioutil.TempFile("", "packer") + defer os.Remove(tempFile.Name()) + config := testConfig() + delete(config, "inline") + config["scripts"] = []string{tempFile.Name()} + config["packer_build_name"] = "foobuild" + config["packer_builder_type"] = "footype" + ui := testUi() + + p := new(Provisioner) + comm := new(packer.MockCommunicator) + p.Prepare(config) + err := p.Provision(ui, comm) + if err != nil { + t.Fatal("should not have error") + } + + //powershell -Command "$env:PACKER_BUILDER_TYPE=''"; powershell -Command "$env:PACKER_BUILD_NAME='foobuild'"; powershell -Command c:/Windows/Temp/script.ps1 + expectedCommand := `set "PACKER_BUILDER_TYPE=footype" && set "PACKER_BUILD_NAME=foobuild" && "c:/Windows/Temp/script.bat"` + + // Should run the command without alteration + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be %s NOT %s", expectedCommand, comm.StartCmd.Command) + } +} + +func TestProvisionerProvision_ScriptsWithEnvVars(t *testing.T) { + tempFile, _ := ioutil.TempFile("", "packer") + config := testConfig() + ui := testUi() + defer os.Remove(tempFile.Name()) + delete(config, "inline") + + config["scripts"] = []string{tempFile.Name()} + config["packer_build_name"] = "foobuild" + config["packer_builder_type"] = "footype" + + // Env vars - currently should not effect them + envVars := make([]string, 2) + envVars[0] = "FOO=BAR" + envVars[1] = "BAR=BAZ" + config["environment_vars"] = envVars + + p := new(Provisioner) + comm := new(packer.MockCommunicator) + p.Prepare(config) + err := p.Provision(ui, comm) + if err != nil { + t.Fatal("should not have error") + } + + expectedCommand := `set "BAR=BAZ" && set "FOO=BAR" && set "PACKER_BUILDER_TYPE=footype" && set "PACKER_BUILD_NAME=foobuild" && "c:/Windows/Temp/script.bat"` + + // Should run the command without alteration + if comm.StartCmd.Command != expectedCommand { + t.Fatalf("Expect command to be %s NOT %s", expectedCommand, comm.StartCmd.Command) + } +} + +func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) { + config := testConfig() + + p := new(Provisioner) + err := p.Prepare(config) + if err != nil { + t.Fatalf("should not have error preparing config: %s", err) + } + + // Defaults provided by Packer + p.config.PackerBuildName = "vmware" + p.config.PackerBuilderType = "iso" + + // no user env var + flattenedEnvVars, err := p.createFlattenedEnvVars() + if err != nil { + t.Fatalf("should not have error creating flattened env vars: %s", err) + } + expectedEnvVars := `set "PACKER_BUILDER_TYPE=iso" && set "PACKER_BUILD_NAME=vmware" && ` + if flattenedEnvVars != expectedEnvVars { + t.Fatalf("expected flattened env vars to be: %s, got: %s", expectedEnvVars, flattenedEnvVars) + } + + // single user env var + p.config.Vars = []string{"FOO=bar"} + + flattenedEnvVars, err = p.createFlattenedEnvVars() + if err != nil { + t.Fatalf("should not have error creating flattened env vars: %s", err) + } + expectedEnvVars = `set "FOO=bar" && set "PACKER_BUILDER_TYPE=iso" && set "PACKER_BUILD_NAME=vmware" && ` + if flattenedEnvVars != expectedEnvVars { + t.Fatalf("expected flattened env vars to be: %s, got: %s", expectedEnvVars, flattenedEnvVars) + } + + // multiple user env vars + p.config.Vars = []string{"FOO=bar", "BAZ=qux"} + + flattenedEnvVars, err = p.createFlattenedEnvVars() + if err != nil { + t.Fatalf("should not have error creating flattened env vars: %s", err) + } + expectedEnvVars = `set "BAZ=qux" && set "FOO=bar" && set "PACKER_BUILDER_TYPE=iso" && set "PACKER_BUILD_NAME=vmware" && ` + if flattenedEnvVars != expectedEnvVars { + t.Fatalf("expected flattened env vars to be: %s, got: %s", expectedEnvVars, flattenedEnvVars) + } +} + +func TestRetryable(t *testing.T) { + config := testConfig() + + count := 0 + retryMe := func() error { + log.Printf("RetryMe, attempt number %d", count) + if count == 2 { + return nil + } + count++ + return errors.New(fmt.Sprintf("Still waiting %d more times...", 2-count)) + } + retryableSleep = 50 * time.Millisecond + p := new(Provisioner) + p.config.StartRetryTimeout = 155 * time.Millisecond + err := p.Prepare(config) + err = p.retryable(retryMe) + if err != nil { + t.Fatalf("should not have error retrying funuction") + } + + count = 0 + p.config.StartRetryTimeout = 10 * time.Millisecond + err = p.Prepare(config) + err = p.retryable(retryMe) + if err == nil { + t.Fatalf("should have error retrying funuction") + } +} + +func TestCancel(t *testing.T) { + // Don't actually call Cancel() as it performs an os.Exit(0) + // which kills the 'go test' tool +} From e1530c39dc120211612e19c50e541bfaea117350 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 14 Jun 2015 11:27:48 -0700 Subject: [PATCH 395/956] website: windows-shell --- .../docs/provisioners/shell.html.markdown | 4 + .../docs/provisioners/windows-shell.html.md | 75 +++++++++++++++++++ website/source/layouts/docs.erb | 1 + 3 files changed, 80 insertions(+) create mode 100644 website/source/docs/provisioners/windows-shell.html.md diff --git a/website/source/docs/provisioners/shell.html.markdown b/website/source/docs/provisioners/shell.html.markdown index 89a442a83..75bf5e7d1 100644 --- a/website/source/docs/provisioners/shell.html.markdown +++ b/website/source/docs/provisioners/shell.html.markdown @@ -13,6 +13,10 @@ The shell Packer provisioner provisions machines built by Packer using shell scr Shell provisioning is the easiest way to get software installed and configured on a machine. +-> **Building Windows images?** You probably want to use the +[PowerShell](/docs/provisioners/powershell.html) or +[Windows Shell](/docs/provisioners/windows-shell.html) provisioners. + ## Basic Example The example below is fully functional. diff --git a/website/source/docs/provisioners/windows-shell.html.md b/website/source/docs/provisioners/windows-shell.html.md new file mode 100644 index 000000000..c758a5ebd --- /dev/null +++ b/website/source/docs/provisioners/windows-shell.html.md @@ -0,0 +1,75 @@ +--- +layout: "docs" +page_title: "Windows Shell Provisioner" +description: |- + The windows-shell Packer provisioner runs commands on Windows using the cmd shell. +--- + +# Windows Shell Provisioner + +Type: `windows-shell` + +The windows-shell Packer provisioner runs commands on a Windows machine +using `cmd`. It assumes it is running over WinRM. + +## Basic Example + +The example below is fully functional. + +```javascript +{ + "type": "windows-shell", + "inline": ["dir c:\\"] +} +``` + +## Configuration Reference + +The reference of available configuration options is listed below. The only +required element is either "inline" or "script". Every other option is optional. + +Exactly _one_ of the following is required: + +* `inline` (array of strings) - This is an array of commands to execute. + The commands are concatenated by newlines and turned into a single file, + so they are all executed within the same context. This allows you to + change directories in one command and use something in the directory in + the next and so on. Inline scripts are the easiest way to pull off simple + tasks within the machine. + +* `script` (string) - The path to a script to upload and execute in the machine. + This path can be absolute or relative. If it is relative, it is relative + to the working directory when Packer is executed. + +* `scripts` (array of strings) - An array of scripts to execute. The scripts + will be uploaded and executed in the order specified. Each script is executed + in isolation, so state such as variables from one script won't carry on to + the next. + +Optional parameters: + +* `binary` (boolean) - If true, specifies that the script(s) are binary + files, and Packer should therefore not convert Windows line endings to + Unix line endings (if there are any). By default this is false. + +* `environment_vars` (array of strings) - An array of key/value pairs + to inject prior to the execute_command. The format should be + `key=value`. Packer injects some environmental variables by default + into the environment, as well, which are covered in the section below. + +* `execute_command` (string) - The command to use to execute the script. + By default this is `{{ .Vars }}"{{ .Path }}"`. The value of this is + treated as [configuration template](/docs/templates/configuration-templates.html). + There are two available variables: `Path`, which is + the path to the script to run, and `Vars`, which is the list of + `environment_vars`, if configured. + +* `remote_path` (string) - The path where the script will be uploaded to + in the machine. This defaults to "/tmp/script.sh". This value must be + a writable location and any parent directories must already exist. + +* `start_retry_timeout` (string) - The amount of time to attempt to + _start_ the remote process. By default this is "5m" or 5 minutes. This + setting exists in order to deal with times when SSH may restart, such as + a system reboot. Set this to a higher value if reboots take a longer + amount of time. diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 8b524b087..8099b461a 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -50,6 +50,7 @@
  • Shell Scripts
  • File Uploads
  • PowerShell
  • +
  • Windows Shell
  • Ansible
  • Chef Client
  • Chef Solo
  • From ab6a330d86cbe2f0785c8d7eaf7b8529141bde63 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 14 Jun 2015 12:39:39 -0700 Subject: [PATCH 396/956] provisioner/*: fix go vet --- provisioner/powershell/provisioner.go | 4 +++- provisioner/windows-restart/provisioner_test.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index 0c2454d0d..b31781d88 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -294,7 +294,9 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } } if !validExitCode { - return fmt.Errorf("Script exited with non-zero exit status: %d. Allowed exit codes are: %s", cmd.ExitStatus, p.config.ValidExitCodes) + return fmt.Errorf( + "Script exited with non-zero exit status: %d. Allowed exit codes are: %v", + cmd.ExitStatus, p.config.ValidExitCodes) } } diff --git a/provisioner/windows-restart/provisioner_test.go b/provisioner/windows-restart/provisioner_test.go index f0f2766e3..d2a54d274 100644 --- a/provisioner/windows-restart/provisioner_test.go +++ b/provisioner/windows-restart/provisioner_test.go @@ -234,7 +234,7 @@ func TestProvision_waitForCommunicator(t *testing.T) { err := waitForCommunicator(p) if err != nil { - t.Fatal("should not have error, got: %s", err.Error()) + t.Fatalf("should not have error, got: %s", err.Error()) } expectedCommand := DefaultRestartCheckCommand From 7fc69828c51f3a825b194ba49627883fc76671fb Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 14 Jun 2015 21:47:53 -0700 Subject: [PATCH 397/956] builder/virtualbox: fix forwarding to work with WinRM --- builder/virtualbox/common/step_forward_ssh.go | 19 +++++++++++-------- builder/virtualbox/iso/builder.go | 2 +- builder/virtualbox/ovf/builder.go | 2 +- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/builder/virtualbox/common/step_forward_ssh.go b/builder/virtualbox/common/step_forward_ssh.go index fe6004281..11bbcff9f 100644 --- a/builder/virtualbox/common/step_forward_ssh.go +++ b/builder/virtualbox/common/step_forward_ssh.go @@ -2,11 +2,13 @@ package common import ( "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "log" "math/rand" "net" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/helper/communicator" + "github.com/mitchellh/packer/packer" ) // This step adds a NAT port forwarding definition so that SSH is available @@ -19,7 +21,7 @@ import ( // // Produces: type StepForwardSSH struct { - GuestPort uint + CommConfig *communicator.Config HostPortMin uint HostPortMax uint SkipNatMapping bool @@ -30,20 +32,21 @@ func (s *StepForwardSSH) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) vmName := state.Get("vmName").(string) - sshHostPort := s.GuestPort + guestPort := s.CommConfig.Port() + sshHostPort := guestPort if !s.SkipNatMapping { log.Printf("Looking for available SSH port between %d and %d", s.HostPortMin, s.HostPortMax) - var offset uint = 0 + offset := 0 portRange := int(s.HostPortMax - s.HostPortMin) if portRange > 0 { // Have to check if > 0 to avoid a panic - offset = uint(rand.Intn(portRange)) + offset = rand.Intn(portRange) } for { - sshHostPort = offset + s.HostPortMin + sshHostPort = offset + int(s.HostPortMin) log.Printf("Trying port: %d", sshHostPort) l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", sshHostPort)) if err == nil { @@ -57,7 +60,7 @@ func (s *StepForwardSSH) Run(state multistep.StateBag) multistep.StepAction { command := []string{ "modifyvm", vmName, "--natpf1", - fmt.Sprintf("packerssh,tcp,127.0.0.1,%d,,%d", sshHostPort, s.GuestPort), + fmt.Sprintf("packerssh,tcp,127.0.0.1,%d,,%d", sshHostPort, guestPort), } if err := driver.VBoxManage(command...); err != nil { err := fmt.Errorf("Error creating port forwarding rule: %s", err) diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go index f37c65c5a..dae107170 100644 --- a/builder/virtualbox/iso/builder.go +++ b/builder/virtualbox/iso/builder.go @@ -254,7 +254,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, new(vboxcommon.StepAttachFloppy), &vboxcommon.StepForwardSSH{ - GuestPort: uint(b.config.SSHConfig.Comm.SSHPort), + CommConfig: &b.config.SSHConfig.Comm, HostPortMin: b.config.SSHHostPortMin, HostPortMax: b.config.SSHHostPortMax, SkipNatMapping: b.config.SSHSkipNatMapping, diff --git a/builder/virtualbox/ovf/builder.go b/builder/virtualbox/ovf/builder.go index c35f2a50f..8b9932d54 100644 --- a/builder/virtualbox/ovf/builder.go +++ b/builder/virtualbox/ovf/builder.go @@ -83,7 +83,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, new(vboxcommon.StepAttachFloppy), &vboxcommon.StepForwardSSH{ - GuestPort: uint(b.config.SSHConfig.Comm.SSHPort), + CommConfig: &b.config.SSHConfig.Comm, HostPortMin: b.config.SSHHostPortMin, HostPortMax: b.config.SSHHostPortMax, SkipNatMapping: b.config.SSHSkipNatMapping, From cab2665119a0253448824e6b0ae4363bf6026cbf Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 14 Jun 2015 22:09:38 -0700 Subject: [PATCH 398/956] builder/docker: support custom communicators --- builder/docker/builder.go | 11 +++- builder/docker/comm.go | 52 +++++++++++++++++++ builder/docker/config.go | 10 ++++ builder/docker/driver.go | 4 ++ builder/docker/driver_docker.go | 17 ++++++ builder/docker/driver_mock.go | 11 ++++ ...ep_provision.go => step_connect_docker.go} | 11 ++-- helper/communicator/step_connect.go | 8 +++ 8 files changed, 117 insertions(+), 7 deletions(-) create mode 100644 builder/docker/comm.go rename builder/docker/{step_provision.go => step_connect_docker.go} (68%) diff --git a/builder/docker/builder.go b/builder/docker/builder.go index 96a79b02d..89880aacc 100644 --- a/builder/docker/builder.go +++ b/builder/docker/builder.go @@ -5,6 +5,7 @@ import ( "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/packer" ) @@ -42,7 +43,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &StepTempDir{}, &StepPull{}, &StepRun{}, - &StepProvision{}, + &communicator.StepConnect{ + Config: &b.config.Comm, + Host: commHost, + SSHConfig: sshConfig(&b.config.Comm), + CustomConnect: map[string]multistep.Step{ + "docker": &StepConnectDocker{}, + }, + }, + &common.StepProvision{}, } if b.config.Commit { diff --git a/builder/docker/comm.go b/builder/docker/comm.go new file mode 100644 index 000000000..a42d12525 --- /dev/null +++ b/builder/docker/comm.go @@ -0,0 +1,52 @@ +package docker + +import ( + "fmt" + "io/ioutil" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/communicator/ssh" + "github.com/mitchellh/packer/helper/communicator" + gossh "golang.org/x/crypto/ssh" +) + +func commHost(state multistep.StateBag) (string, error) { + containerId := state.Get("container_id").(string) + driver := state.Get("driver").(Driver) + return driver.IPAddress(containerId) +} + +func sshConfig(comm *communicator.Config) func(state multistep.StateBag) (*gossh.ClientConfig, error) { + return func(state multistep.StateBag) (*gossh.ClientConfig, error) { + if comm.SSHPrivateKey != "" { + // key based auth + bytes, err := ioutil.ReadFile(comm.SSHPrivateKey) + if err != nil { + return nil, fmt.Errorf("Error setting up SSH config: %s", err) + } + privateKey := string(bytes) + + signer, err := gossh.ParsePrivateKey([]byte(privateKey)) + if err != nil { + return nil, fmt.Errorf("Error setting up SSH config: %s", err) + } + + return &gossh.ClientConfig{ + User: comm.SSHUsername, + Auth: []gossh.AuthMethod{ + gossh.PublicKeys(signer), + }, + }, nil + } else { + // password based auth + return &gossh.ClientConfig{ + User: comm.SSHUsername, + Auth: []gossh.AuthMethod{ + gossh.Password(comm.SSHPassword), + gossh.KeyboardInteractive( + ssh.PasswordKeyboardInteractive(comm.SSHPassword)), + }, + }, nil + } + } +} diff --git a/builder/docker/config.go b/builder/docker/config.go index d5801c8ba..34fda4309 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -6,6 +6,7 @@ import ( "github.com/mitchellh/mapstructure" "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/communicator" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" @@ -13,6 +14,7 @@ import ( type Config struct { common.PackerConfig `mapstructure:",squash"` + Comm communicator.Config `mapstructure:",squash"` Commit bool ExportPath string `mapstructure:"export_path"` @@ -69,7 +71,15 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { c.Pull = true } + // Default to the normal Docker type + if c.Comm.Type == "" { + c.Comm.Type = "docker" + } + var errs *packer.MultiError + if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { + errs = packer.MultiErrorAppend(errs, es...) + } if c.Image == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("image must be specified")) diff --git a/builder/docker/driver.go b/builder/docker/driver.go index 7c9e6b868..d88c71022 100644 --- a/builder/docker/driver.go +++ b/builder/docker/driver.go @@ -22,6 +22,10 @@ type Driver interface { // Import imports a container from a tar file Import(path, repo string) (string, error) + // IPAddress returns the address of the container that can be used + // for external access. + IPAddress(id string) (string, error) + // Login. This will lock the driver from performing another Login // until Logout is called. Therefore, any users MUST call Logout. Login(repo, email, username, password string) error diff --git a/builder/docker/driver_docker.go b/builder/docker/driver_docker.go index ea895d362..0d406b1fa 100644 --- a/builder/docker/driver_docker.go +++ b/builder/docker/driver_docker.go @@ -116,6 +116,23 @@ func (d *DockerDriver) Import(path string, repo string) (string, error) { return strings.TrimSpace(stdout.String()), nil } +func (d *DockerDriver) IPAddress(id string) (string, error) { + var stderr, stdout bytes.Buffer + cmd := exec.Command( + "docker", + "inspect", + "--format", + "{{ .NetworkSettings.IPAddress }}", + id) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("Error: %s\n\nStderr: %s", err, stderr.String()) + } + + return strings.TrimSpace(stdout.String()), nil +} + func (d *DockerDriver) Login(repo, email, user, pass string) error { d.l.Lock() diff --git a/builder/docker/driver_mock.go b/builder/docker/driver_mock.go index 420856742..b0170f85f 100644 --- a/builder/docker/driver_mock.go +++ b/builder/docker/driver_mock.go @@ -23,6 +23,11 @@ type MockDriver struct { ImportId string ImportErr error + IPAddressCalled bool + IPAddressID string + IPAddressResult string + IPAddressErr error + LoginCalled bool LoginEmail string LoginUsername string @@ -104,6 +109,12 @@ func (d *MockDriver) Import(path, repo string) (string, error) { return d.ImportId, d.ImportErr } +func (d *MockDriver) IPAddress(id string) (string, error) { + d.IPAddressCalled = true + d.IPAddressID = id + return d.IPAddressResult, d.IPAddressErr +} + func (d *MockDriver) Login(r, e, u, p string) error { d.LoginCalled = true d.LoginRepo = r diff --git a/builder/docker/step_provision.go b/builder/docker/step_connect_docker.go similarity index 68% rename from builder/docker/step_provision.go rename to builder/docker/step_connect_docker.go index d9852ae2b..31f2ea2e4 100644 --- a/builder/docker/step_provision.go +++ b/builder/docker/step_connect_docker.go @@ -2,12 +2,11 @@ package docker import ( "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/common" ) -type StepProvision struct{} +type StepConnectDocker struct{} -func (s *StepProvision) Run(state multistep.StateBag) multistep.StepAction { +func (s *StepConnectDocker) Run(state multistep.StateBag) multistep.StepAction { containerId := state.Get("container_id").(string) driver := state.Get("driver").(Driver) tempDir := state.Get("temp_dir").(string) @@ -28,8 +27,8 @@ func (s *StepProvision) Run(state multistep.StateBag) multistep.StepAction { Version: version, } - prov := common.StepProvision{Comm: comm} - return prov.Run(state) + state.Put("communicator", comm) + return multistep.ActionContinue } -func (s *StepProvision) Cleanup(state multistep.StateBag) {} +func (s *StepConnectDocker) Cleanup(state multistep.StateBag) {} diff --git a/helper/communicator/step_connect.go b/helper/communicator/step_connect.go index ce77333e1..0c1522330 100644 --- a/helper/communicator/step_connect.go +++ b/helper/communicator/step_connect.go @@ -32,6 +32,11 @@ type StepConnect struct { // connecting via WinRM. WinRMConfig func(multistep.StateBag) (*WinRMConfig, error) + // CustomConnect can be set to have custom connectors for specific + // types. These take highest precedence so you can also override + // existing types. + CustomConnect map[string]multistep.Step + substep multistep.Step } @@ -50,6 +55,9 @@ func (s *StepConnect) Run(state multistep.StateBag) multistep.StepAction { WinRMConfig: s.WinRMConfig, }, } + for k, v := range s.CustomConnect { + typeMap[k] = v + } step, ok := typeMap[s.Config.Type] if !ok { From 382fa01e6f660a2e224fe300d3d4c0d4874fbf1b Mon Sep 17 00:00:00 2001 From: Vasiliy Tolstov Date: Mon, 15 Jun 2015 09:43:25 +0300 Subject: [PATCH 399/956] resume download after fail close #2106 Signed-off-by: Vasiliy Tolstov --- common/download.go | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/common/download.go b/common/download.go index b5798b76c..d6711b6b4 100644 --- a/common/download.go +++ b/common/download.go @@ -89,7 +89,7 @@ func NewDownloadClient(c *DownloadConfig) *DownloadClient { // downloading it. type Downloader interface { Cancel() - Download(io.Writer, *url.URL) error + Download(*os.File, *url.URL) error Progress() uint Total() uint } @@ -99,6 +99,8 @@ func (d *DownloadClient) Cancel() { } func (d *DownloadClient) Get() (string, error) { + var f *os.File + // If we already have the file and it matches, then just return the target path. if verify, _ := d.VerifyChecksum(d.config.TargetPath); verify { log.Println("Initial checksum matched, no download needed.") @@ -131,7 +133,7 @@ func (d *DownloadClient) Get() (string, error) { } // Otherwise, download using the downloader. - f, err := os.Create(finalPath) + f, err = os.OpenFile(finalPath, os.O_RDWR|os.O_CREATE, os.FileMode(0666)) if err != nil { return "", err } @@ -195,9 +197,9 @@ func (*HTTPDownloader) Cancel() { // TODO(mitchellh): Implement } -func (d *HTTPDownloader) Download(dst io.Writer, src *url.URL) error { +func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { log.Printf("Starting download: %s", src.String()) - req, err := http.NewRequest("GET", src.String(), nil) + req, err := http.NewRequest("HEAD", src.String(), nil) if err != nil { return err } @@ -217,10 +219,15 @@ func (d *HTTPDownloader) Download(dst io.Writer, src *url.URL) error { return err } + req.Method = "GET" if resp.StatusCode != 200 { log.Printf( "Non-200 status code: %d. Getting error body.", resp.StatusCode) + resp, err := httpClient.Do(req) + if err != nil { + return err + } errorBody := new(bytes.Buffer) io.Copy(errorBody, resp.Body) return fmt.Errorf("HTTP error '%d'! Remote side responded:\n%s", @@ -228,6 +235,21 @@ func (d *HTTPDownloader) Download(dst io.Writer, src *url.URL) error { } d.progress = 0 + + if resp.Header.Get("Accept-Ranges") == "bytes" { + if fi, err := dst.Stat(); err == nil { + if _, err = dst.Seek(0, os.SEEK_END); err == nil { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) + d.progress = uint(fi.Size()) + } + } + } + + resp, err = httpClient.Do(req) + if err != nil { + return err + } + d.total = uint(resp.ContentLength) var buffer [4096]byte From c3e79c62b9a460c0af0dcc2d9ea7ddadfe76c7c5 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 15 Jun 2015 10:01:32 -0500 Subject: [PATCH 400/956] document force_deregister --- website/source/docs/builders/amazon-chroot.html.markdown | 3 +++ website/source/docs/builders/amazon-ebs.html.markdown | 3 +++ website/source/docs/builders/amazon-instance.html.markdown | 3 +++ 3 files changed, 9 insertions(+) diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index e4e8cefa4..637153dab 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -124,6 +124,9 @@ each category, the available configuration keys are alphabetized. * `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. +* `force_deregister` (boolean) – Force Packer to first deregister an existing +AMI if one with the same name already exists. Default `false`. + * `mount_path` (string) - The path where the volume will be mounted. This is where the chroot environment will be. This defaults to `packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 0ff9522df..55747aa3c 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -96,6 +96,9 @@ each category, the available configuration keys are alphabetized. * `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. +* `force_deregister` (boolean) – Force Packer to first deregister an existing +AMI if one with the same name already exists. Default `false`. + * `iam_instance_profile` (string) - The name of an [IAM instance profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) to launch the EC2 instance with. diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index ae5fbff27..73b0caa5d 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -136,6 +136,9 @@ each category, the available configuration keys are alphabetized. * `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. +* `force_deregister` (boolean) – Force Packer to first deregister an existing +AMI if one with the same name already exists. Default `false`. + * `iam_instance_profile` (string) - The name of an [IAM instance profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) to launch the EC2 instance with. From e814349511c9a94ad47d2d2c3c74e4b46beb3e19 Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 15 Jun 2015 10:04:11 -0500 Subject: [PATCH 401/956] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a11f23d5..c7a38adca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,8 @@ FEATURES: IMPROVEMENTS: * core: Interrupt handling for SIGTERM signal as well. [GH-1858] + * builder/amazon: Add `force_deregister` option for automatic AMI + deregistration [GH-1873] * builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829] * builder/digitalocean: User data support [GH-2113] * builder/parallels: Support Parallels Desktop 11 [GH-2199] From 07f9eaf5b382b16f011227b8e63be2552c2598b9 Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 15 Jun 2015 10:04:46 -0500 Subject: [PATCH 402/956] Update CHANGELOG.md update GH issue for the `force_deregister` option --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7a38adca..d20273f0e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,7 +29,7 @@ IMPROVEMENTS: * core: Interrupt handling for SIGTERM signal as well. [GH-1858] * builder/amazon: Add `force_deregister` option for automatic AMI - deregistration [GH-1873] + deregistration [GH-2221] * builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829] * builder/digitalocean: User data support [GH-2113] * builder/parallels: Support Parallels Desktop 11 [GH-2199] From fc62afaafb4573f0da5514878662d9df2d82dda6 Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 15 Jun 2015 10:07:42 -0500 Subject: [PATCH 403/956] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d20273f0e..a7988ea2a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ IMPROVEMENTS: * core: Interrupt handling for SIGTERM signal as well. [GH-1858] * builder/amazon: Add `force_deregister` option for automatic AMI deregistration [GH-2221] + * builder/amazon: Now applies tags to EBS snapshots [GH-2212] * builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829] * builder/digitalocean: User data support [GH-2113] * builder/parallels: Support Parallels Desktop 11 [GH-2199] From d8cc24f86ec902a4e498566b852aebe2eaea8b2e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 09:34:35 -0700 Subject: [PATCH 404/956] builder/openstack: no more port --- builder/openstack/ssh.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/openstack/ssh.go b/builder/openstack/ssh.go index 7ec1fcfdd..3e7350d11 100644 --- a/builder/openstack/ssh.go +++ b/builder/openstack/ssh.go @@ -22,7 +22,7 @@ func CommHost( // If we have a specific interface, try that if sshinterface != "" { - if addr := sshAddrFromPool(s, sshinterface, port); addr != "" { + if addr := sshAddrFromPool(s, sshinterface); addr != "" { return addr, nil } } @@ -38,7 +38,7 @@ func CommHost( } // Try to get it from the requested interface - if addr := sshAddrFromPool(s, sshinterface, port); addr != "" { + if addr := sshAddrFromPool(s, sshinterface); addr != "" { return addr, nil } @@ -75,7 +75,7 @@ func SSHConfig(username string) func(multistep.StateBag) (*ssh.ClientConfig, err } } -func sshAddrFromPool(s *servers.Server, desired string, port int) string { +func sshAddrFromPool(s *servers.Server, desired string) string { // Get all the addresses associated with this server. This // was taken directly from Terraform. for pool, networkAddresses := range s.Addresses { @@ -106,7 +106,7 @@ func sshAddrFromPool(s *servers.Server, desired string, port int) string { } } if addr != "" { - return fmt.Sprintf("%s:%d", addr, port) + return addr } } } From 62727c14a02c8a90e059539467c8c11f675cd636 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 09:35:22 -0700 Subject: [PATCH 405/956] update CHANGELOG --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a7988ea2a..5374ab4f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,7 +28,7 @@ FEATURES: IMPROVEMENTS: * core: Interrupt handling for SIGTERM signal as well. [GH-1858] - * builder/amazon: Add `force_deregister` option for automatic AMI + * builder/amazon: Add `force_deregister` option for automatic AMI deregistration [GH-2221] * builder/amazon: Now applies tags to EBS snapshots [GH-2212] * builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829] @@ -80,6 +80,8 @@ BUG FIXES: * builder/docker: Fix crash that could occur at certain timed ctrl-c [GH-1838] * builder/docker: validate that `export_path` is not a directory [GH-2105] * builder/google: `ssh_timeout` is respected [GH-1781] + * builder/openstack: `ssh_interface` can be used to specify the interface + to retrieve the SSH IP from. [GH-2220] * builder/qemu: Add `disk_discard` option [GH-2120] * builder/virtualbox: Bind HTTP server to IPv4, which is more compatible with OS installers. [GH-1709] From 1691971e085779c62bd2563de0b1a7cf6f5057be Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 09:37:01 -0700 Subject: [PATCH 406/956] update CHANGELOG --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5374ab4f6..667352fe2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,9 @@ IMPROVEMENTS: have prohibitive firewalls * builder/openstack: Flavor names can be used as well as refs * builder/openstack: Add `availability_zone` [GH-2016] + * builder/openstack: Machine will be stopped prior to imaging if the + cluster supports the `startstop` extension. [GH-2223] + * builder/openstack: Support for user data [GH-2224] * builder/virtualbox: Added option: `ssh_skip_nat_mapping` to skip the automatic port forward for SSH and to use the guest port directly. [GH-1078] * builder/virtualbox: Added SCSI support @@ -53,6 +56,7 @@ IMPROVEMENTS: BUG FIXES: * core: Fix potential panic for post-processor plugin exits [GH-2098] + * core: `PACKER_CONFIG` may point to a non-existent file [GH-2226] * builder/amazon: Allow spaces in AMI names when using `clean_ami_name` [GH-2182] * builder/amazon: Remove deprecated ec2-upload-bundle paramger [GH-1931] * builder/amazon: Use IAM Profile to upload bundle if provided [GH-1985] From 3ea324d3f6814a3cb090fb9f80301301e662dcd9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 09:38:14 -0700 Subject: [PATCH 407/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 667352fe2..dc1a0815c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ IMPROVEMENTS: automatic port forward for SSH and to use the guest port directly. [GH-1078] * builder/virtualbox: Added SCSI support * builder/vmware: Support for additional disks [GH-1382] + * command/fix: After fixing, the template is validated [GH-2228] * command/push: Add `-name` flag for specifying name from CLI [GH-2042] * command/push: Push configuration in templates supports variables [GH-1861] * post-processor/docker-save: Can be chained [GH-2179] @@ -109,6 +110,7 @@ BUG FIXES: * provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call * provisioner/shell: chmod executable script to 0755, not 0777 [GH-1708] * provisioner/shell: inline commands failing will fail the provisioner [GH-2069] + * provisioner/shell: single quotes in env vars are escaped [GH-2229] ## 0.7.5 (December 9, 2014) From ac367c2b19b6023d1264e383a389d375136f6513 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 09:39:21 -0700 Subject: [PATCH 408/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dc1a0815c..4fcf55149 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -101,6 +101,8 @@ BUG FIXES: * builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989] * builder/vmware: More robust IP parsing from ifconfig output [GH-1999] * builder/vmware: Nested output directories for ESXi work [GH-2174] + * command/fix: For the `virtualbox` to `virtualbox-iso` builder rename, + provisioner overrides are now also fixed [GH-2231] * command/validate: don't crash for invalid builds [GH-2139] * post-processor/atlas: Find common archive prefix for Windows [GH-1874] * post-processor/atlas: Fix index out of range panic [GH-1959] From a978bbf781ec999a93f94431f3d1d7f0a99babf0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 09:40:11 -0700 Subject: [PATCH 409/956] website: update docs for new functions --- .../source/docs/templates/configuration-templates.html.markdown | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/source/docs/templates/configuration-templates.html.markdown b/website/source/docs/templates/configuration-templates.html.markdown index 514bf7820..cef1385d3 100644 --- a/website/source/docs/templates/configuration-templates.html.markdown +++ b/website/source/docs/templates/configuration-templates.html.markdown @@ -55,6 +55,8 @@ While some configuration settings have local variables specific to only that configuration, a set of functions are available globally for use in _any string_ in Packer templates. These are listed below for reference. +* `build_name` - The name of the build being run. +* `build_type` - The type of the builder being used currently. * `isotime [FORMAT]` - UTC time, which can be [formatted](http://golang.org/pkg/time/#example_Time_Format). See more examples below. * `lower` - Lowercases the string. From 23c7e715e26ec22324763bc16ca23eeebbd72fa8 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 09:41:15 -0700 Subject: [PATCH 410/956] update CHANGELOG --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4fcf55149..fd5cb7159 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -94,6 +94,8 @@ BUG FIXES: floppy disk. [GH-1879] * builder/virtualbox: Fixed regression where downloading ISO without a ".iso" extension didn't work. [GH-1839] + * builder/virtualbox: Output dir is verified at runtime, not template + validation time. [GH-2233] * builder/vmware: Add 100ms delay between keystrokes to avoid subtle timing issues in most cases. [GH-1663] * builder/vmware: Bind HTTP server to IPv4, which is more compatible with @@ -101,6 +103,8 @@ BUG FIXES: * builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989] * builder/vmware: More robust IP parsing from ifconfig output [GH-1999] * builder/vmware: Nested output directories for ESXi work [GH-2174] + * builder/vmware: Output dir is verified at runtime, not template + validation time. [GH-2233] * command/fix: For the `virtualbox` to `virtualbox-iso` builder rename, provisioner overrides are now also fixed [GH-2231] * command/validate: don't crash for invalid builds [GH-2139] From 0787060c3b64789652be01c81efa6e2589c6591c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 09:41:51 -0700 Subject: [PATCH 411/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fd5cb7159..3b1d6935e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,6 +88,7 @@ BUG FIXES: * builder/openstack: `ssh_interface` can be used to specify the interface to retrieve the SSH IP from. [GH-2220] * builder/qemu: Add `disk_discard` option [GH-2120] + * builder/qemu: Use proper SSH port, not hardcoded to 22. [GH-2236] * builder/virtualbox: Bind HTTP server to IPv4, which is more compatible with OS installers. [GH-1709] * builder/virtualbox: Remove the floppy controller in addition to the From 8b52fa5226ef0ab37c1e7cd6e7fae5a289e23202 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 09:43:14 -0700 Subject: [PATCH 412/956] update CHANGELOG --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b1d6935e..1e80fb53f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,10 @@ FEATURES: connections. Note that provisioners won't work if this is done. [GH-1591] * **SSH Agent Forwarding:** SSH Agent Forwarding will now be enabled to allow access to remote servers such as private git repos. [GH-1066] + * **New config function: `build_name`**: The name of the currently running + build. [GH-2232] + * **New config function: `build_type`**: The type of the currently running + builder. This is useful for provisioners. [GH-2232] * **New config function: `template_dir`**: The directory to the template being built. This should be used for template-relative paths. [GH-54] From 661cd0243efa2e201662538abd368e15776e419f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 09:44:44 -0700 Subject: [PATCH 413/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e80fb53f..ed5b0d111 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,8 @@ FEATURES: IMPROVEMENTS: * core: Interrupt handling for SIGTERM signal as well. [GH-1858] + * builder/*: Add `ssh_handshake_attempts` to configure the number of + handshake attempts done before failure [GH-2237] * builder/amazon: Add `force_deregister` option for automatic AMI deregistration [GH-2221] * builder/amazon: Now applies tags to EBS snapshots [GH-2212] From 70fdb36524d2213b7f451e12b42dc1b2cc049434 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 09:49:41 -0700 Subject: [PATCH 414/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ed5b0d111..ac36c6fe5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,8 @@ FEATURES: connections. Note that provisioners won't work if this is done. [GH-1591] * **SSH Agent Forwarding:** SSH Agent Forwarding will now be enabled to allow access to remote servers such as private git repos. [GH-1066] + * **Docker builder supports SSH**: The Docker builder now supports containers + with SSH, just set `communicator` to "ssh" [GH-2244] * **New config function: `build_name`**: The name of the currently running build. [GH-2232] * **New config function: `build_type`**: The type of the currently running From d015d20a2440f8bd92ff881c064439a462b64447 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 15 Jun 2015 11:53:03 -0500 Subject: [PATCH 415/956] document block device mapping fields --- .../docs/builders/amazon-ebs.html.markdown | 29 +++++++++++++++---- .../builders/amazon-instance.html.markdown | 27 +++++++++++++---- 2 files changed, 46 insertions(+), 10 deletions(-) diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index af3ece59e..a7153a950 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -61,11 +61,26 @@ each category, the available configuration keys are alphabetized. ### Optional: * `ami_block_device_mappings` (array of block device mappings) - Add the block - device mappings to the AMI. The block device mappings allow for keys: - "device\_name" (string), "virtual\_name" (string), "snapshot\_id" (string), - "volume\_type" (string), "volume\_size" (integer), "delete\_on\_termination" - (boolean), "encrypted" (boolean), "no\_device" (boolean), and "iops" - (integer). + device mappings to the AMI. The block device mappings allow for keys: + + - `device_name` (string) – The device name exposed to the instance (for + example, "/dev/sdh" or "xvdh") + - `virtual_name` (string) – The virtual device name. See the documentation on + [Block Device Mapping][1] for more information + - `snapshot_id` (string) – The ID of the snapshot + - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + volumes + - `volume_size` (integer) – The size of the volume, in GiB. Required if not + specifying a `snapshot_id` + - `delete_on_termination` (boolean) – Indicates whether the EBS volume is + deleted on instance termination + - `encrypted` (boolean) – Indicates whether to encrypt the volume or not + - `no_device` (boolean) – Suppresses the specified device included in the + block device mapping of the AMI + - `iops` (integer) – The number of I/O operations per second (IOPS) that the + volume supports. See the documentation on [IOPs][2] for more information + * `ami_description` (string) - The description to set for the resulting AMI(s). By default this description is empty. @@ -255,3 +270,7 @@ Here is an example using the optional AMI tags. This will add the tags } } ``` + + +[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html +[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 3ca82731b..2c7f6e4a7 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -81,12 +81,26 @@ each category, the available configuration keys are alphabetized. ### Optional: * `ami_block_device_mappings` (array of block device mappings) - Add the block - device mappings to the AMI. The block device mappings allow for keys: - "device\_name" (string), "virtual\_name" (string), "snapshot\_id" (string), - "volume\_type" (string), "volume\_size" (integer), "delete\_on\_termination" - (boolean), "encrypted" (boolean), "no\_device" (boolean), and "iops" (integer). - See [amazon-ebs](/docs/builders/amazon-ebs.html) for an example template. + device mappings to the AMI. The block device mappings allow for keys: + - `device_name` (string) – The device name exposed to the instance (for + example, "/dev/sdh" or "xvdh") + - `virtual_name` (string) – The virtual device name. See the documentation on + [Block Device Mapping][1] for more information + - `snapshot_id` (string) – The ID of the snapshot + - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + volumes + - `volume_size` (integer) – The size of the volume, in GiB. Required if not + specifying a `snapshot_id` + - `delete_on_termination` (boolean) – Indicates whether the EBS volume is + deleted on instance termination + - `encrypted` (boolean) – Indicates whether to encrypt the volume or not + - `no_device` (boolean) – Suppresses the specified device included in the + block device mapping of the AMI + - `iops` (integer) – The number of I/O operations per second (IOPS) that the + volume supports. See the documentation on [IOPs][2] for more information + * `ami_description` (string) - The description to set for the resulting AMI(s). By default this description is empty. @@ -318,3 +332,6 @@ sudo -i -n ec2-upload-bundle \ The available template variables should be self-explanatory based on the parameters they're used to satisfy the `ec2-upload-bundle` command. + +[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html +[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html From d22c4173d3457684fd2fd1bc290cafd62f43c09a Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 15 Jun 2015 11:53:21 -0500 Subject: [PATCH 416/956] fix crash when waiting for an instance that has failed --- builder/amazon/common/step_run_source_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index ec330ebc4..f88db5efc 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -245,7 +245,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi } latestInstance, err := WaitForState(&stateChange) if err != nil { - err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", *s.instance.InstanceID, err) + err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", instanceId, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt From 523a3342b8155ead8a23aeed69344a8eb838298f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 10:15:08 -0700 Subject: [PATCH 417/956] builder/qemu: fix tests --- builder/qemu/step_run.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/qemu/step_run.go b/builder/qemu/step_run.go index 816a3d3d3..21472f179 100644 --- a/builder/qemu/step_run.go +++ b/builder/qemu/step_run.go @@ -81,7 +81,7 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error defaultArgs["-name"] = vmName defaultArgs["-machine"] = fmt.Sprintf("type=%s", config.MachineType) defaultArgs["-netdev"] = fmt.Sprintf( - "user,id=user.0,hostfwd=tcp::%v-:%d", sshHostPort, config.SSHPort) + "user,id=user.0,hostfwd=tcp::%v-:%d", sshHostPort, config.Comm.Port()) defaultArgs["-device"] = fmt.Sprintf("%s,netdev=user.0", config.NetDevice) defaultArgs["-drive"] = fmt.Sprintf("file=%s,if=%s,cache=%s,discard=%s", imgPath, config.DiskInterface, config.DiskCache, config.DiskDiscard) if !config.DiskImage { From 2d13db300c493589c63cc5732b0eeaa52ede6a9a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 10:26:46 -0700 Subject: [PATCH 418/956] packer: HookProvision errors if no communicator --- common/step_provision.go | 6 ++++-- packer/build_test.go | 2 +- packer/builder_mock.go | 2 +- packer/provisioner.go | 12 ++++++++++++ packer/provisioner_test.go | 24 +++++++++++++++++++++--- 5 files changed, 39 insertions(+), 7 deletions(-) diff --git a/common/step_provision.go b/common/step_provision.go index ae06f1b0c..f40cfd896 100644 --- a/common/step_provision.go +++ b/common/step_provision.go @@ -23,9 +23,11 @@ type StepProvision struct { func (s *StepProvision) Run(state multistep.StateBag) multistep.StepAction { comm := s.Comm if comm == nil { - comm = state.Get("communicator").(packer.Communicator) + raw, ok := state.Get("communicator").(packer.Communicator) + if ok { + comm = raw.(packer.Communicator) + } } - hook := state.Get("hook").(packer.Hook) ui := state.Get("ui").(packer.Ui) diff --git a/packer/build_test.go b/packer/build_test.go index b183fb95a..e29318972 100644 --- a/packer/build_test.go +++ b/packer/build_test.go @@ -202,7 +202,7 @@ func TestBuild_Run(t *testing.T) { } // Verify provisioners run - dispatchHook.Run(HookProvision, nil, nil, 42) + dispatchHook.Run(HookProvision, nil, new(MockCommunicator), 42) prov := build.provisioners[0].provisioner.(*MockProvisioner) if !prov.ProvCalled { t.Fatal("should be called") diff --git a/packer/builder_mock.go b/packer/builder_mock.go index 9cb016963..d8fd98e13 100644 --- a/packer/builder_mock.go +++ b/packer/builder_mock.go @@ -43,7 +43,7 @@ func (tb *MockBuilder) Run(ui Ui, h Hook, c Cache) (Artifact, error) { } if h != nil { - if err := h.Run(HookProvision, ui, nil, nil); err != nil { + if err := h.Run(HookProvision, ui, new(MockCommunicator), nil); err != nil { return nil, err } } diff --git a/packer/provisioner.go b/packer/provisioner.go index d28d1371a..f4f3fce11 100644 --- a/packer/provisioner.go +++ b/packer/provisioner.go @@ -38,6 +38,18 @@ type ProvisionHook struct { // Runs the provisioners in order. func (h *ProvisionHook) Run(name string, ui Ui, comm Communicator, data interface{}) error { + // Shortcut + if len(h.Provisioners) == 0 { + return nil + } + + if comm == nil { + return fmt.Errorf( + "No communicator found for provisioners! This is usually because the\n" + + "`communicator` config was set to \"none\". If you have any provisioners\n" + + "then a communicator is required. Please fix this to continue.") + } + defer func() { h.lock.Lock() defer h.lock.Unlock() diff --git a/packer/provisioner_test.go b/packer/provisioner_test.go index 5eeebb4a3..7251d6f05 100644 --- a/packer/provisioner_test.go +++ b/packer/provisioner_test.go @@ -19,7 +19,7 @@ func TestProvisionHook(t *testing.T) { pB := &MockProvisioner{} ui := testUi() - var comm Communicator = nil + var comm Communicator = new(MockCommunicator) var data interface{} = nil hook := &ProvisionHook{ @@ -37,6 +37,24 @@ func TestProvisionHook(t *testing.T) { } } +func TestProvisionHook_nilComm(t *testing.T) { + pA := &MockProvisioner{} + pB := &MockProvisioner{} + + ui := testUi() + var comm Communicator = nil + var data interface{} = nil + + hook := &ProvisionHook{ + Provisioners: []Provisioner{pA, pB}, + } + + err := hook.Run("foo", ui, comm, data) + if err == nil { + t.Fatal("should error") + } +} + func TestProvisionHook_cancel(t *testing.T) { var lock sync.Mutex order := make([]string, 0, 2) @@ -59,7 +77,7 @@ func TestProvisionHook_cancel(t *testing.T) { finished := make(chan struct{}) go func() { - hook.Run("foo", nil, nil, nil) + hook.Run("foo", nil, new(MockCommunicator), nil) close(finished) }() @@ -74,7 +92,7 @@ func TestProvisionHook_cancel(t *testing.T) { <-finished // Verify order - if order[0] != "cancel" || order[1] != "prov" { + if len(order) != 2 || order[0] != "cancel" || order[1] != "prov" { t.Fatalf("bad: %#v", order) } } From d393bb5112241c75ecd8da878399ba62038d0d75 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 15 Jun 2015 10:30:45 -0700 Subject: [PATCH 419/956] make updatedeps will actually update now --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0574cbb5c..9abc16995 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ testrace: go test -race $(TEST) $(TESTARGS) updatedeps: - go get -d -v -p 2 ./... + go get -u -d -v -p 2 ./... vet: @go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ From 667c53942b9d7a547928002c417e1948ca0dfe60 Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Mon, 15 Jun 2015 12:40:34 -0700 Subject: [PATCH 420/956] use template for additional disks --- builder/vmware/iso/builder.go | 29 ++++++++++++++------------- builder/vmware/iso/step_create_vmx.go | 26 ++++++++++++++++++++++-- 2 files changed, 39 insertions(+), 16 deletions(-) mode change 100644 => 100755 builder/vmware/iso/builder.go mode change 100644 => 100755 builder/vmware/iso/step_create_vmx.go diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go old mode 100644 new mode 100755 index 38ba3a4a1..fa8deb983 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -36,20 +36,21 @@ type Config struct { vmwcommon.ToolsConfig `mapstructure:",squash"` vmwcommon.VMXConfig `mapstructure:",squash"` - AdditionalDiskSize []uint `mapstructure:"disk_additional_size"` - DiskName string `mapstructure:"vmdk_name"` - DiskSize uint `mapstructure:"disk_size"` - DiskTypeId string `mapstructure:"disk_type_id"` - FloppyFiles []string `mapstructure:"floppy_files"` - GuestOSType string `mapstructure:"guest_os_type"` - ISOChecksum string `mapstructure:"iso_checksum"` - ISOChecksumType string `mapstructure:"iso_checksum_type"` - ISOUrls []string `mapstructure:"iso_urls"` - Version string `mapstructure:"version"` - VMName string `mapstructure:"vm_name"` - BootCommand []string `mapstructure:"boot_command"` - SkipCompaction bool `mapstructure:"skip_compaction"` - VMXTemplatePath string `mapstructure:"vmx_template_path"` + AdditionalDiskSize []uint `mapstructure:"disk_additional_size"` + DiskName string `mapstructure:"vmdk_name"` + DiskSize uint `mapstructure:"disk_size"` + DiskTypeId string `mapstructure:"disk_type_id"` + FloppyFiles []string `mapstructure:"floppy_files"` + GuestOSType string `mapstructure:"guest_os_type"` + ISOChecksum string `mapstructure:"iso_checksum"` + ISOChecksumType string `mapstructure:"iso_checksum_type"` + ISOUrls []string `mapstructure:"iso_urls"` + Version string `mapstructure:"version"` + VMName string `mapstructure:"vm_name"` + BootCommand []string `mapstructure:"boot_command"` + SkipCompaction bool `mapstructure:"skip_compaction"` + VMXTemplatePath string `mapstructure:"vmx_template_path"` + VMXDiskTemplatePath string `mapstructure:"vmx_disk_template_path"` RemoteType string `mapstructure:"remote_type"` RemoteDatastore string `mapstructure:"remote_datastore"` diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go old mode 100644 new mode 100755 index 69cb3f261..272721893 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -76,7 +76,29 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { DiskName: config.DiskName, } - diskTemplate, err := interpolate.Render(DefaultAdditionalDiskTemplate, &ctx) + diskTemplate := DefaultAdditionalDiskTemplate + if config.VMXDiskTemplatePath != "" { + f, err := os.Open(config.VMXDiskTemplatePath) + if err != nil { + err := fmt.Errorf("Error reading VMX disk template: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + defer f.Close() + + rawBytes, err := ioutil.ReadAll(f) + if err != nil { + err := fmt.Errorf("Error reading VMX disk template: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + diskTemplate = string(rawBytes) + } + + diskContents, err := interpolate.Render(diskTemplate, &ctx) if err != nil { err := fmt.Errorf("Error preparing VMX template for additional disk: %s", err) state.Put("error", err) @@ -84,7 +106,7 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - vmxTemplate += diskTemplate + vmxTemplate += diskContents } } From 106c9403ed3d21ed16e03551b6a7783f42538180 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 13:37:20 -0700 Subject: [PATCH 421/956] provisioner/chef-client: chmod the directories --- provisioner/chef-client/provisioner.go | 21 +++++++++++++------ .../provisioners/chef-client.html.markdown | 9 ++++++++ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index b28c9e83a..527b375c1 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -310,16 +310,25 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri mkdirCmd = "sudo " + mkdirCmd } - cmd := &packer.RemoteCmd{ - Command: mkdirCmd, - } - + cmd := &packer.RemoteCmd{Command: mkdirCmd} if err := cmd.StartWithUi(comm, ui); err != nil { return err } - if cmd.ExitStatus != 0 { - return fmt.Errorf("Non-zero exit status.") + return fmt.Errorf("Non-zero exit status. See output above for more info.") + } + + // Chmod the directory to 0777 just so that we can access it as our user + mkdirCmd = fmt.Sprintf("chmod 0777 '%s'", dir) + if !p.config.PreventSudo { + mkdirCmd = "sudo " + mkdirCmd + } + cmd = &packer.RemoteCmd{Command: mkdirCmd} + if err := cmd.StartWithUi(comm, ui); err != nil { + return err + } + if cmd.ExitStatus != 0 { + return fmt.Errorf("Non-zero exit status. See output above for more info.") } return nil diff --git a/website/source/docs/provisioners/chef-client.html.markdown b/website/source/docs/provisioners/chef-client.html.markdown index eaeadbf45..22e965149 100644 --- a/website/source/docs/provisioners/chef-client.html.markdown +++ b/website/source/docs/provisioners/chef-client.html.markdown @@ -161,3 +161,12 @@ curl -L https://www.opscode.com/chef/install.sh | \ ``` This command can be customized using the `install_command` configuration. + +## Folder Permissions + +The `chef-client` provisioner will chmod the directory with your Chef +keys to 777. This is to ensure that Packer can upload and make use of that +directory. However, once the machine is created, you usually don't +want to keep these directories with those permissions. To change the +permissions on the directories, append a shell provisioner after Chef +to modify them. From 906c45266d96e1cdbbb03853c8c5c43b48e93800 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 13:46:43 -0700 Subject: [PATCH 422/956] website: make warning for chef perms --- website/source/docs/provisioners/chef-client.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/provisioners/chef-client.html.markdown b/website/source/docs/provisioners/chef-client.html.markdown index 22e965149..9a2a11379 100644 --- a/website/source/docs/provisioners/chef-client.html.markdown +++ b/website/source/docs/provisioners/chef-client.html.markdown @@ -164,7 +164,7 @@ This command can be customized using the `install_command` configuration. ## Folder Permissions -The `chef-client` provisioner will chmod the directory with your Chef +!> The `chef-client` provisioner will chmod the directory with your Chef keys to 777. This is to ensure that Packer can upload and make use of that directory. However, once the machine is created, you usually don't want to keep these directories with those permissions. To change the From 6f7818980ce2ec4dfa575295dae54a9643e67213 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 13:50:01 -0700 Subject: [PATCH 423/956] Update updatedeps --- Makefile | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 9abc16995..884d6bbf2 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,13 @@ testrace: go test -race $(TEST) $(TESTARGS) updatedeps: - go get -u -d -v -p 2 ./... + go get -u github.com/mitchellh/gox + go get -u golang.org/x/tools/cmd/stringer + go list ./... \ + | xargs go list -f '{{join .Deps "\n"}}' \ + | grep -v github.com/mitchellh/packer \ + | sort -u \ + | xargs go get -f -u -v vet: @go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ From 86539398ab22a042fe6ad531b9ea883bc475ef60 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 13:52:06 -0700 Subject: [PATCH 424/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ac36c6fe5..11566cfea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -121,6 +121,7 @@ BUG FIXES: * post-processor/atlas: Fix index out of range panic [GH-1959] * post-processor/vagrant-cloud: Fixed failing on response * post-processor/vagrant-cloud: Don't delete version on error [GH-2014] + * provisioner/chef-client: Fix permissions issues on default dir [GH-2255] * provisioner/puppet-masterless: Allow manifest_file to be a directory * provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call * provisioner/shell: chmod executable script to 0755, not 0777 [GH-1708] From 91e565d54f08c98cc2d4859f7f828f8d8a1f6b4b Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 15 Jun 2015 16:02:26 -0500 Subject: [PATCH 425/956] builder/amazon: Update docs on ssh_private_key --- website/source/docs/builders/amazon-ebs.html.markdown | 3 ++- website/source/docs/builders/amazon-instance.html.markdown | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index a7153a950..0f7a46186 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -152,7 +152,8 @@ AMI if one with the same name already exists. Default `false`. to port 22. * `ssh_private_key_file` (string) - Use this ssh private key file instead of - a generated ssh key pair for connecting to the instance. + a generated ssh key pair for connecting to the instance. This key file must + already exist on the `source_ami` * `ssh_private_ip` (bool) - If true, then SSH will always use the private IP if available. diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 2c7f6e4a7..326706e69 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -191,7 +191,8 @@ AMI if one with the same name already exists. Default `false`. to port 22. * `ssh_private_key_file` (string) - Use this ssh private key file instead of - a generated ssh key pair for connecting to the instance. + a generated ssh key pair for connecting to the instance. This key file must + already exist on the `source_ami` * `ssh_private_ip` (bool) - If true, then SSH will always use the private IP if available. From 14787fd4cc5332107c5757523cf2229fe9fa8465 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 14:29:12 -0700 Subject: [PATCH 426/956] provisioner/chef-client: run cleanup on node [GH-1295] --- provisioner/chef-client/provisioner.go | 42 +++++++++++++++++--------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index 527b375c1..20b594562 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -9,7 +9,6 @@ import ( "fmt" "io/ioutil" "os" - "os/exec" "path/filepath" "strings" @@ -336,15 +335,9 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node string) error { ui.Say("Cleaning up chef node...") - app := fmt.Sprintf("knife node delete %s -y", node) - - cmd := exec.Command("sh", "-c", app) - out, err := cmd.Output() - - ui.Message(fmt.Sprintf("%s", out)) - - if err != nil { - return err + args := []string{"node", "delete", node} + if err := p.knifeExec(ui, comm, node, args); err != nil { + return fmt.Errorf("Failed to cleanup node: %s", err) } return nil @@ -352,16 +345,35 @@ func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node str func (p *Provisioner) cleanClient(ui packer.Ui, comm packer.Communicator, node string) error { ui.Say("Cleaning up chef client...") - app := fmt.Sprintf("knife client delete %s -y", node) + args := []string{"client", "delete", node} + if err := p.knifeExec(ui, comm, node, args); err != nil { + return fmt.Errorf("Failed to cleanup client: %s", err) + } - cmd := exec.Command("sh", "-c", app) - out, err := cmd.Output() + return nil +} - ui.Message(fmt.Sprintf("%s", out)) +func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node string, args []string) error { + flags := []string{ + "-y", + "-s", fmt.Sprintf("'%s'", p.config.ServerUrl), + "-k", fmt.Sprintf("'%s'", p.config.ClientKey), + "-u", fmt.Sprintf("'%s'", node), + } - if err != nil { + cmdText := fmt.Sprintf( + "knife %s %s", strings.Join(args, " "), strings.Join(flags, " ")) + if !p.config.PreventSudo { + cmdText = "sudo " + cmdText + } + + cmd := &packer.RemoteCmd{Command: cmdText} + if err := cmd.StartWithUi(comm, ui); err != nil { return err } + if cmd.ExitStatus != 0 { + return fmt.Errorf("Non-zero exit status. See output above for more info.") + } return nil } From 711dfc9d0ad5b8b143290a4e71b5f7a5fa5af8c9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 14:35:54 -0700 Subject: [PATCH 427/956] provisioner/chef: show command in output --- provisioner/chef-client/provisioner.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index 20b594562..2d42d361d 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -372,7 +372,10 @@ func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node str return err } if cmd.ExitStatus != 0 { - return fmt.Errorf("Non-zero exit status. See output above for more info.") + return fmt.Errorf( + "Non-zero exit status. See output above for more info.\n\n"+ + "Command: %s", + cmdText) } return nil From 753ad76e2bfe011d3199632bdc4e6b2fb52b5d66 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 14:37:00 -0700 Subject: [PATCH 428/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 11566cfea..96fd4c2f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -122,6 +122,7 @@ BUG FIXES: * post-processor/vagrant-cloud: Fixed failing on response * post-processor/vagrant-cloud: Don't delete version on error [GH-2014] * provisioner/chef-client: Fix permissions issues on default dir [GH-2255] + * provisioner/chef-client: Node cleanup works now. [GH-2257] * provisioner/puppet-masterless: Allow manifest_file to be a directory * provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call * provisioner/shell: chmod executable script to 0755, not 0777 [GH-1708] From 452421b8bc7892c5865eb3f93719287bb62fb3bd Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 14:40:53 -0700 Subject: [PATCH 429/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 96fd4c2f9..7732e566d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ IMPROVEMENTS: automatic port forward for SSH and to use the guest port directly. [GH-1078] * builder/virtualbox: Added SCSI support * builder/vmware: Support for additional disks [GH-1382] + * builder/vmware: Can now customize the template used for adding disks [GH-2254] * command/fix: After fixing, the template is validated [GH-2228] * command/push: Add `-name` flag for specifying name from CLI [GH-2042] * command/push: Push configuration in templates supports variables [GH-1861] From 742e5568363eecef3c00f602611ce6ad2e9c10a6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 14:44:54 -0700 Subject: [PATCH 430/956] provisioner/puppet-masterless: only base if manifest is a file [GH-1933] --- provisioner/puppet-masterless/provisioner.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index eb364da58..a92a22094 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -259,7 +259,13 @@ func (p *Provisioner) uploadManifests(ui packer.Ui, comm packer.Communicator) (s } defer f.Close() - manifestFilename := filepath.Base(p.config.ManifestFile) + manifestFilename := p.config.ManifestFile + if fi, err := os.Stat(p.config.ManifestFile); err != nil { + return "", fmt.Errorf("Error inspecting manifest file: %s", err) + } else if !fi.IsDir() { + manifestFilename = filepath.Base(manifestFilename) + } + remoteManifestFile := fmt.Sprintf("%s/%s", remoteManifestsPath, manifestFilename) if err := comm.Upload(remoteManifestFile, f, nil); err != nil { return "", err From a235419c7d41c5a63e2c132cab6533f1cfc0b72f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:02:59 -0700 Subject: [PATCH 431/956] provisioner/shell: remove file after exec [GH-1536] --- provisioner/shell/provisioner.go | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index baedd645a..28c1a2e06 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -266,12 +266,24 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return err } - // Close the original file since we copied it - f.Close() - if cmd.ExitStatus != 0 { return fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) } + + // Delete the temporary file we created + cmd = &packer.RemoteCmd{ + Command: fmt.Sprintf("rm -f %s", p.config.RemotePath), + } + if err := comm.Start(cmd); err != nil { + return fmt.Errorf( + "Error removing temporary script at %s: %s", + p.config.RemotePath, err) + } + cmd.Wait() + if cmd.ExitStatus != 0 { + return fmt.Errorf( + "Error removing temporary script at %s!") + } } return nil From d98de209cb8b38b8d93c30f93f946c59b93ea24d Mon Sep 17 00:00:00 2001 From: Vasiliy Tolstov Date: Tue, 16 Jun 2015 01:04:48 +0300 Subject: [PATCH 432/956] fallback to not ranged request if server lacks HEAD Signed-off-by: Vasiliy Tolstov --- common/download.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/common/download.go b/common/download.go index d6711b6b4..782712e94 100644 --- a/common/download.go +++ b/common/download.go @@ -215,18 +215,23 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { } resp, err := httpClient.Do(req) - if err != nil { - return err + if err != nil || resp.StatusCode != 200 { + req.Method = "GET" + resp, err = httpClient.Do(req) + if err != nil { + return err + } } - req.Method = "GET" if resp.StatusCode != 200 { log.Printf( "Non-200 status code: %d. Getting error body.", resp.StatusCode) - - resp, err := httpClient.Do(req) - if err != nil { - return err + if req.Method != "GET" { + req.Method = "GET" + resp, err = httpClient.Do(req) + if err != nil { + return err + } } errorBody := new(bytes.Buffer) io.Copy(errorBody, resp.Body) @@ -234,6 +239,7 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { resp.StatusCode, errorBody.String()) } + req.Method = "GET" d.progress = 0 if resp.Header.Get("Accept-Ranges") == "bytes" { From 8ecca2aa54dbee4b0ccd36280493b3d944ec6136 Mon Sep 17 00:00:00 2001 From: Alexander Golovko Date: Mon, 9 Feb 2015 04:24:31 +0300 Subject: [PATCH 433/956] implement ssh.Download() --- communicator/ssh/communicator.go | 55 ++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 8fd9ba91e..46dd22e2d 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -14,6 +14,7 @@ import ( "net" "os" "path/filepath" + "strconv" "sync" ) @@ -171,8 +172,58 @@ func (c *comm) UploadDir(dst string, src string, excl []string) error { return c.scpSession("scp -rvt "+dst, scpFunc) } -func (c *comm) Download(string, io.Writer) error { - panic("not implemented yet") +func (c *comm) Download(path string, output io.Writer) error { + + scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { + fmt.Fprint(w, "\x00") + + // read file info + fi, err := stdoutR.ReadString( '\n') + if err != nil { + return err + } + + if len(fi) < 0 { + return fmt.Errorf("empty response from server") + } + + switch fi[0] { + case '\x01', '\x02': + return fmt.Errorf("%s", fi[1:len(fi)]) + case 'C': + case 'D': + return fmt.Errorf("remote file is directory") + default: + return fmt.Errorf("unexpected server response (%x)", fi[0]) + } + + var mode string + var size int64 + + n, err := fmt.Sscanf(fi, "%6s %d ", &mode, &size) + if err != nil || n != 2 { + return fmt.Errorf("can't parse server response (%s)", fi) + } + if size < 0 { + return fmt.Errorf("negative file size") + } + + fmt.Fprint(w, "\x00") + + if _, err := io.CopyN(output, stdoutR, size); err != nil { + return err + } + + fmt.Fprint(w, "\x00") + + if err := checkSCPStatus(stdoutR); err != nil { + return err + } + + return nil + } + + return c.scpSession("scp -vf "+strconv.Quote(path), scpFunc) } func (c *comm) newSession() (session *ssh.Session, err error) { From 500d83b673ca29f2189c9ab9c1470ddb1d13a9ef Mon Sep 17 00:00:00 2001 From: Alexander Golovko Date: Mon, 9 Feb 2015 04:25:27 +0300 Subject: [PATCH 434/956] add download support to file provisioner --- provisioner/file/provisioner.go | 43 +++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/provisioner/file/provisioner.go b/provisioner/file/provisioner.go index ce359a407..9bc2a646c 100644 --- a/provisioner/file/provisioner.go +++ b/provisioner/file/provisioner.go @@ -20,6 +20,9 @@ type Config struct { // The remote path where the local file will be uploaded to. Destination string + // Direction + Direction string + ctx interpolate.Context } @@ -38,12 +41,28 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { return err } + if p.config.Direction == "" { + p.config.Direction = "upload" + } + var errs *packer.MultiError if _, err := os.Stat(p.config.Source); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Bad source '%s': %s", p.config.Source, err)) } + if p.config.Direction != "download" && p.config.Direction != "upload" { + errs = packer.MultiErrorAppend(errs, + errors.New("Direction must be one of: download, upload.")) + } + + if p.config.Direction == "upload" { + if _, err := os.Stat(p.config.Source); err != nil { + errs = packer.MultiErrorAppend(errs, + fmt.Errorf("Bad source '%s': %s", p.config.Source, err)) + } + } + if p.config.Destination == "" { errs = packer.MultiErrorAppend(errs, errors.New("Destination must be specified.")) @@ -57,6 +76,30 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { + if p.config.Direction == "download" { + return p.ProvisionDownload(ui, comm) + } else { + return p.ProvisionUpload(ui, comm) + } +} + +func (p *Provisioner) ProvisionDownload(ui packer.Ui, comm packer.Communicator) error { + ui.Say(fmt.Sprintf("Downloading %s => %s", p.config.Source, p.config.Destination)) + + f, err := os.OpenFile(p.config.Destination, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer f.Close() + + err = comm.Download(p.config.Source, f) + if err != nil { + ui.Error(fmt.Sprintf("Download failed: %s", err)) + } + return err +} + +func (p *Provisioner) ProvisionUpload(ui packer.Ui, comm packer.Communicator) error { ui.Say(fmt.Sprintf("Uploading %s => %s", p.config.Source, p.config.Destination)) info, err := os.Stat(p.config.Source) if err != nil { From 15f40a3d004749425c2f40a9be007468970dec1e Mon Sep 17 00:00:00 2001 From: Alexander Golovko Date: Mon, 9 Feb 2015 04:48:53 +0300 Subject: [PATCH 435/956] fix disabling vmware tools for ESX --- builder/vmware/common/step_upload_tools.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/vmware/common/step_upload_tools.go b/builder/vmware/common/step_upload_tools.go index aa7dd08e7..3f7214965 100644 --- a/builder/vmware/common/step_upload_tools.go +++ b/builder/vmware/common/step_upload_tools.go @@ -23,6 +23,10 @@ type StepUploadTools struct { func (c *StepUploadTools) Run(state multistep.StateBag) multistep.StepAction { driver := state.Get("driver").(Driver) + if c.ToolsUploadFlavor == "" { + return multistep.ActionContinue + } + if c.RemoteType == "esx5" { if err := driver.ToolsInstall(); err != nil { state.Put("error", fmt.Errorf("Couldn't mount VMware tools ISO. Please check the 'guest_os_type' in your template.json.")) @@ -30,10 +34,6 @@ func (c *StepUploadTools) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionContinue } - if c.ToolsUploadFlavor == "" { - return multistep.ActionContinue - } - comm := state.Get("communicator").(packer.Communicator) tools_source := state.Get("tools_upload_source").(string) ui := state.Get("ui").(packer.Ui) From 686d4413ecb7fb9c2e1ccc2a6c08061624d2111a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:07:20 -0700 Subject: [PATCH 436/956] communicator/winrm: error if download --- communicator/ssh/communicator.go | 3 +-- communicator/winrm/communicator.go | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 46dd22e2d..2cc299b30 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -173,12 +173,11 @@ func (c *comm) UploadDir(dst string, src string, excl []string) error { } func (c *comm) Download(path string, output io.Writer) error { - scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { fmt.Fprint(w, "\x00") // read file info - fi, err := stdoutR.ReadString( '\n') + fi, err := stdoutR.ReadString('\n') if err != nil { return err } diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go index 82686e2a7..2b53ac62c 100644 --- a/communicator/winrm/communicator.go +++ b/communicator/winrm/communicator.go @@ -113,7 +113,7 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error } func (c *Communicator) Download(src string, dst io.Writer) error { - panic("download not implemented") + return fmt.Errorf("WinRM doesn't support download.") } func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { From aee48239f74433cd842e0a4a64557915dc95f1dc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:08:04 -0700 Subject: [PATCH 437/956] website: document file download --- website/source/docs/provisioners/file.html.markdown | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/website/source/docs/provisioners/file.html.markdown b/website/source/docs/provisioners/file.html.markdown index 68034fe00..19fcce9be 100644 --- a/website/source/docs/provisioners/file.html.markdown +++ b/website/source/docs/provisioners/file.html.markdown @@ -40,6 +40,10 @@ The available configuration options are listed below. All elements are required. machine. This value must be a writable location and any parent directories must already exist. +* `direction` (string) - The direction of the file transfer. This defaults + to "upload." If it is set to "download" then the file "source" in + the machine wll be downloaded locally to "destination" + ## Directory Uploads The file provisioner is also able to upload a complete directory to the From 13346ba648adb797206750ee8a147467c56d8b82 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:10:15 -0700 Subject: [PATCH 438/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7732e566d..340fc9872 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,8 @@ FEATURES: to allow access to remote servers such as private git repos. [GH-1066] * **Docker builder supports SSH**: The Docker builder now supports containers with SSH, just set `communicator` to "ssh" [GH-2244] + * **File provisioner can download**: The file provisioner can now download + files out of the build process. [GH-1909] * **New config function: `build_name`**: The name of the currently running build. [GH-2232] * **New config function: `build_type`**: The type of the currently running From 6c802286614a1127de545056a270199eef5f1aaf Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:11:08 -0700 Subject: [PATCH 439/956] provisioner/shell: missing error arg --- provisioner/shell/provisioner.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index 28c1a2e06..338092755 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -282,7 +282,8 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { cmd.Wait() if cmd.ExitStatus != 0 { return fmt.Errorf( - "Error removing temporary script at %s!") + "Error removing temporary script at %s!", + p.config.RemotePath) } } From 8dfd553e86674c2c082dc29dd3b13dcfc7d175b6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:16:17 -0700 Subject: [PATCH 440/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 340fc9872..8744e4a1d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -131,6 +131,7 @@ BUG FIXES: * provisioner/shell: chmod executable script to 0755, not 0777 [GH-1708] * provisioner/shell: inline commands failing will fail the provisioner [GH-2069] * provisioner/shell: single quotes in env vars are escaped [GH-2229] + * provisioner/shell: Temporary file is deleted after run [GH-2259] ## 0.7.5 (December 9, 2014) From b15a77660a1c0d25f3368e6d5ada6cc05e1efd99 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:18:21 -0700 Subject: [PATCH 441/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8744e4a1d..628d56d94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,7 @@ IMPROVEMENTS: * post-processor/docker-save: Can be chained [GH-2179] * post-processor/docker-tag: Support `force` option [GH-2055] * post-processor/docker-tag: Can be chained [GH-2179] + * provisioner/puppet-masterless: `working_directory` option [GH-1831] BUG FIXES: From 2bb4bdffc4c1ef012fb279ab163b9221c8318b28 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:23:38 -0700 Subject: [PATCH 442/956] website: update docs for ssh_keypair_name --- .../docs/builders/amazon-ebs.html.markdown | 27 ++++++++++------- .../builders/amazon-instance.html.markdown | 29 +++++++++++-------- 2 files changed, 33 insertions(+), 23 deletions(-) diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 0f7a46186..6c7840575 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -61,24 +61,24 @@ each category, the available configuration keys are alphabetized. ### Optional: * `ami_block_device_mappings` (array of block device mappings) - Add the block - device mappings to the AMI. The block device mappings allow for keys: + device mappings to the AMI. The block device mappings allow for keys: - - `device_name` (string) – The device name exposed to the instance (for - example, "/dev/sdh" or "xvdh") - - `virtual_name` (string) – The virtual device name. See the documentation on + - `device_name` (string) – The device name exposed to the instance (for + example, "/dev/sdh" or "xvdh") + - `virtual_name` (string) – The virtual device name. See the documentation on [Block Device Mapping][1] for more information - `snapshot_id` (string) – The ID of the snapshot - - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) - volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes - `volume_size` (integer) – The size of the volume, in GiB. Required if not specifying a `snapshot_id` - - `delete_on_termination` (boolean) – Indicates whether the EBS volume is + - `delete_on_termination` (boolean) – Indicates whether the EBS volume is deleted on instance termination - `encrypted` (boolean) – Indicates whether to encrypt the volume or not - - `no_device` (boolean) – Suppresses the specified device included in the - block device mapping of the AMI - - `iops` (integer) – The number of I/O operations per second (IOPS) that the + - `no_device` (boolean) – Suppresses the specified device included in the + block device mapping of the AMI + - `iops` (integer) – The number of I/O operations per second (IOPS) that the volume supports. See the documentation on [IOPs][2] for more information @@ -148,11 +148,16 @@ AMI if one with the same name already exists. Default `false`. spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +* `ssh_keypair_name` (string) - If specified, this is the key that will be + used for SSH with the machine. By default, this is blank, and Packer will + generate a temporary keypair. `ssh_private_key_file` must be specified + with this. + * `ssh_port` (integer) - The port that SSH will be available on. This defaults to port 22. * `ssh_private_key_file` (string) - Use this ssh private key file instead of - a generated ssh key pair for connecting to the instance. This key file must + a generated ssh key pair for connecting to the instance. This key file must already exist on the `source_ami` * `ssh_private_ip` (bool) - If true, then SSH will always use the private diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 326706e69..ff9e7c9a2 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -81,26 +81,26 @@ each category, the available configuration keys are alphabetized. ### Optional: * `ami_block_device_mappings` (array of block device mappings) - Add the block - device mappings to the AMI. The block device mappings allow for keys: + device mappings to the AMI. The block device mappings allow for keys: - - `device_name` (string) – The device name exposed to the instance (for - example, "/dev/sdh" or "xvdh") - - `virtual_name` (string) – The virtual device name. See the documentation on + - `device_name` (string) – The device name exposed to the instance (for + example, "/dev/sdh" or "xvdh") + - `virtual_name` (string) – The virtual device name. See the documentation on [Block Device Mapping][1] for more information - `snapshot_id` (string) – The ID of the snapshot - - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) - volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes - `volume_size` (integer) – The size of the volume, in GiB. Required if not specifying a `snapshot_id` - - `delete_on_termination` (boolean) – Indicates whether the EBS volume is + - `delete_on_termination` (boolean) – Indicates whether the EBS volume is deleted on instance termination - `encrypted` (boolean) – Indicates whether to encrypt the volume or not - - `no_device` (boolean) – Suppresses the specified device included in the - block device mapping of the AMI - - `iops` (integer) – The number of I/O operations per second (IOPS) that the + - `no_device` (boolean) – Suppresses the specified device included in the + block device mapping of the AMI + - `iops` (integer) – The number of I/O operations per second (IOPS) that the volume supports. See the documentation on [IOPs][2] for more information - + * `ami_description` (string) - The description to set for the resulting AMI(s). By default this description is empty. @@ -187,11 +187,16 @@ AMI if one with the same name already exists. Default `false`. spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +* `ssh_keypair_name` (string) - If specified, this is the key that will be + used for SSH with the machine. By default, this is blank, and Packer will + generate a temporary keypair. `ssh_private_key_file` must be specified + with this. + * `ssh_port` (integer) - The port that SSH will be available on. This defaults to port 22. * `ssh_private_key_file` (string) - Use this ssh private key file instead of - a generated ssh key pair for connecting to the instance. This key file must + a generated ssh key pair for connecting to the instance. This key file must already exist on the `source_ami` * `ssh_private_ip` (bool) - If true, then SSH will always use the private From 3bae1d24b3193e81a265700284645de7945b79ab Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:24:03 -0700 Subject: [PATCH 443/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 628d56d94..f57206328 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ IMPROVEMENTS: * builder/amazon: Add `force_deregister` option for automatic AMI deregistration [GH-2221] * builder/amazon: Now applies tags to EBS snapshots [GH-2212] + * builder/amazon: Support custom keypairs [GH-1837] * builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829] * builder/digitalocean: User data support [GH-2113] * builder/parallels: Support Parallels Desktop 11 [GH-2199] From 4258a4102933ecd3acfafd1389e129ebc4fb3d03 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:30:00 -0700 Subject: [PATCH 444/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f57206328..38718e928 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,8 @@ IMPROVEMENTS: * post-processor/docker-tag: Support `force` option [GH-2055] * post-processor/docker-tag: Can be chained [GH-2179] * provisioner/puppet-masterless: `working_directory` option [GH-1831] + * provisioner/puppet-masterless: `packer_build_name` and + `packer_build_type` are default facts. [GH-1878] BUG FIXES: From 29f02d243f2c16be24be9d3f6d1c13d9343aa7ac Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 15 Jun 2015 18:56:09 -0700 Subject: [PATCH 445/956] Had io.Copy args swapped; also use os.Create instead of os.OpenFile for MAGIC --- builder/file/builder.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/builder/file/builder.go b/builder/file/builder.go index ea3206dad..9297b456d 100644 --- a/builder/file/builder.go +++ b/builder/file/builder.go @@ -57,14 +57,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, err } - target, err := os.OpenFile(b.config.Target, os.O_WRONLY, 0600) + // Create will truncate an existing file + target, err := os.Create(b.config.Target) defer target.Close() if err != nil { return nil, err } ui.Say(fmt.Sprintf("Copying %s to %s", source.Name(), target.Name())) - bytes, err := io.Copy(source, target) + bytes, err := io.Copy(target, source) if err != nil { return nil, err } From 97e94eda7795b15d90b4c136302d6239c772eb53 Mon Sep 17 00:00:00 2001 From: Kerim Satirli Date: Tue, 16 Jun 2015 09:18:59 +0200 Subject: [PATCH 446/956] adds missing comma The `parallels_tools_flavor` key-value pair is missing a trailing comma, thereby making it invalid JSON. --- website/source/docs/builders/parallels-iso.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index ed7ebd86c..b84123f8b 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -32,7 +32,7 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio "iso_url": "http://releases.ubuntu.com/12.04/ubuntu-12.04.3-server-amd64.iso", "iso_checksum": "2cbe868812a871242cdcdd8f2fd6feb9", "iso_checksum_type": "md5", - "parallels_tools_flavor": "lin" + "parallels_tools_flavor": "lin", "ssh_username": "packer", "ssh_password": "packer", "ssh_wait_timeout": "30s", From 8990c38d5e53f4148fbee669a5ca1703d701abe2 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 16 Jun 2015 09:38:24 -0700 Subject: [PATCH 447/956] provisioner/puppet-masterless: deprecation warning --- provisioner/puppet-masterless/provisioner.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index a92a22094..643a555d0 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -264,6 +264,8 @@ func (p *Provisioner) uploadManifests(ui packer.Ui, comm packer.Communicator) (s return "", fmt.Errorf("Error inspecting manifest file: %s", err) } else if !fi.IsDir() { manifestFilename = filepath.Base(manifestFilename) + } else { + ui.Say("WARNING: manifest_file should be a file. Use manifest_dir for directories") } remoteManifestFile := fmt.Sprintf("%s/%s", remoteManifestsPath, manifestFilename) From fe0c548619b2288c3a196369ed0b27ff21eb27e0 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 16 Jun 2015 11:30:49 -0700 Subject: [PATCH 448/956] Added acceptance test for file builder --- builder/file/builder.go | 16 +---- builder/file/builder_test.go | 67 +++++++++++++++++++ builder/file/config_test.go | 4 +- builder/file/test-fixtures/artifact.txt | 1 + .../compress/post-processor_test.go | 21 +++++- 5 files changed, 90 insertions(+), 19 deletions(-) create mode 100644 builder/file/test-fixtures/artifact.txt diff --git a/builder/file/builder.go b/builder/file/builder.go index 9297b456d..9a2c2cc7f 100644 --- a/builder/file/builder.go +++ b/builder/file/builder.go @@ -15,27 +15,13 @@ import ( "github.com/mitchellh/packer/packer" ) -const BuilderId = "cbednarski.file" +const BuilderId = "packer.file" type Builder struct { config *Config runner multistep.Runner } -// Prepare is responsible for configuring the builder and validating -// that configuration. Any setup should be done in this method. Note that -// NO side effects should take place in prepare, it is meant as a state -// setup only. Calling Prepare is not necessarilly followed by a Run. -// -// The parameters to Prepare are a set of interface{} values of the -// configuration. These are almost always `map[string]interface{}` -// parsed from a template, but no guarantee is made. -// -// Each of the configuration values should merge into the final -// configuration. -// -// Prepare should return a list of warnings along with any errors -// that occured while preparing. func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { c, warnings, errs := NewConfig(raws...) if errs != nil { diff --git a/builder/file/builder_test.go b/builder/file/builder_test.go index 63d36a0a5..3ce9e77ae 100644 --- a/builder/file/builder_test.go +++ b/builder/file/builder_test.go @@ -1,11 +1,78 @@ package file import ( + "fmt" + "io/ioutil" "testing" + builderT "github.com/mitchellh/packer/helper/builder/testing" "github.com/mitchellh/packer/packer" ) func TestBuilder_implBuilder(t *testing.T) { var _ packer.Builder = new(Builder) } + +func TestBuilderFileAcc_content(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + Builder: &Builder{}, + Template: fileContentTest, + Check: checkContent, + }) +} + +func TestBuilderFileAcc_copy(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + Builder: &Builder{}, + Template: fileCopyTest, + Check: checkCopy, + }) +} + +func checkContent(artifacts []packer.Artifact) error { + content, err := ioutil.ReadFile("contentTest.txt") + if err != nil { + return err + } + contentString := string(content) + if contentString != "hello world!" { + return fmt.Errorf("Unexpected file contents: %s", contentString) + } + return nil +} + +func checkCopy(artifacts []packer.Artifact) error { + content, err := ioutil.ReadFile("copyTest.txt") + if err != nil { + return err + } + contentString := string(content) + if contentString != "Hello world.\n" { + return fmt.Errorf("Unexpected file contents: %s", contentString) + } + return nil +} + +const fileContentTest = ` +{ + "builders": [ + { + "type":"test", + "target":"contentTest.txt", + "content":"hello world!" + } + ] +} +` + +const fileCopyTest = ` +{ + "builders": [ + { + "type":"test", + "target":"copyTest.txt", + "source":"test-fixtures/artifact.txt" + } + ] +} +` diff --git a/builder/file/config_test.go b/builder/file/config_test.go index 6d8039558..9d8f346fc 100644 --- a/builder/file/config_test.go +++ b/builder/file/config_test.go @@ -1,7 +1,6 @@ package file import ( - "fmt" "strings" "testing" ) @@ -39,8 +38,7 @@ func TestNoContent(t *testing.T) { delete(raw, "content") delete(raw, "source") _, warns, _ := NewConfig(raw) - fmt.Println(len(warns)) - fmt.Printf("%#v\n", warns) + if len(warns) == 0 { t.Error("Expected config warning without any content") } diff --git a/builder/file/test-fixtures/artifact.txt b/builder/file/test-fixtures/artifact.txt new file mode 100644 index 000000000..18249f335 --- /dev/null +++ b/builder/file/test-fixtures/artifact.txt @@ -0,0 +1 @@ +Hello world. diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index 92cbfc4b3..5f4d6b9ca 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -1,3 +1,22 @@ package compress -import () +// import ( +// "testing" +// +// builderT "github.com/mitchellh/packer/helper/builder/testing" +// ) +// +// func TestBuilderTagsAcc_basic(t *testing.T) { +// builderT.Test(t, builderT.TestCase{ +// Builder: &Builder{}, +// Template: simpleTestCase, +// Check: checkTags(), +// }) +// } + +const simpleTestCase = ` +{ + "type": "compress", + "output": "foo.tar.gz" +} +` From aea70d5a720b9cc415bd57f46707a0cb4e4193dd Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 16 Jun 2015 11:31:53 -0700 Subject: [PATCH 449/956] Added acceptance test for file builder --- builder/file/builder.go | 16 +----- builder/file/builder_test.go | 67 +++++++++++++++++++++++++ builder/file/config_test.go | 4 +- builder/file/test-fixtures/artifact.txt | 1 + 4 files changed, 70 insertions(+), 18 deletions(-) create mode 100644 builder/file/test-fixtures/artifact.txt diff --git a/builder/file/builder.go b/builder/file/builder.go index 9297b456d..9a2c2cc7f 100644 --- a/builder/file/builder.go +++ b/builder/file/builder.go @@ -15,27 +15,13 @@ import ( "github.com/mitchellh/packer/packer" ) -const BuilderId = "cbednarski.file" +const BuilderId = "packer.file" type Builder struct { config *Config runner multistep.Runner } -// Prepare is responsible for configuring the builder and validating -// that configuration. Any setup should be done in this method. Note that -// NO side effects should take place in prepare, it is meant as a state -// setup only. Calling Prepare is not necessarilly followed by a Run. -// -// The parameters to Prepare are a set of interface{} values of the -// configuration. These are almost always `map[string]interface{}` -// parsed from a template, but no guarantee is made. -// -// Each of the configuration values should merge into the final -// configuration. -// -// Prepare should return a list of warnings along with any errors -// that occured while preparing. func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { c, warnings, errs := NewConfig(raws...) if errs != nil { diff --git a/builder/file/builder_test.go b/builder/file/builder_test.go index 63d36a0a5..3ce9e77ae 100644 --- a/builder/file/builder_test.go +++ b/builder/file/builder_test.go @@ -1,11 +1,78 @@ package file import ( + "fmt" + "io/ioutil" "testing" + builderT "github.com/mitchellh/packer/helper/builder/testing" "github.com/mitchellh/packer/packer" ) func TestBuilder_implBuilder(t *testing.T) { var _ packer.Builder = new(Builder) } + +func TestBuilderFileAcc_content(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + Builder: &Builder{}, + Template: fileContentTest, + Check: checkContent, + }) +} + +func TestBuilderFileAcc_copy(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + Builder: &Builder{}, + Template: fileCopyTest, + Check: checkCopy, + }) +} + +func checkContent(artifacts []packer.Artifact) error { + content, err := ioutil.ReadFile("contentTest.txt") + if err != nil { + return err + } + contentString := string(content) + if contentString != "hello world!" { + return fmt.Errorf("Unexpected file contents: %s", contentString) + } + return nil +} + +func checkCopy(artifacts []packer.Artifact) error { + content, err := ioutil.ReadFile("copyTest.txt") + if err != nil { + return err + } + contentString := string(content) + if contentString != "Hello world.\n" { + return fmt.Errorf("Unexpected file contents: %s", contentString) + } + return nil +} + +const fileContentTest = ` +{ + "builders": [ + { + "type":"test", + "target":"contentTest.txt", + "content":"hello world!" + } + ] +} +` + +const fileCopyTest = ` +{ + "builders": [ + { + "type":"test", + "target":"copyTest.txt", + "source":"test-fixtures/artifact.txt" + } + ] +} +` diff --git a/builder/file/config_test.go b/builder/file/config_test.go index 6d8039558..9d8f346fc 100644 --- a/builder/file/config_test.go +++ b/builder/file/config_test.go @@ -1,7 +1,6 @@ package file import ( - "fmt" "strings" "testing" ) @@ -39,8 +38,7 @@ func TestNoContent(t *testing.T) { delete(raw, "content") delete(raw, "source") _, warns, _ := NewConfig(raw) - fmt.Println(len(warns)) - fmt.Printf("%#v\n", warns) + if len(warns) == 0 { t.Error("Expected config warning without any content") } diff --git a/builder/file/test-fixtures/artifact.txt b/builder/file/test-fixtures/artifact.txt new file mode 100644 index 000000000..18249f335 --- /dev/null +++ b/builder/file/test-fixtures/artifact.txt @@ -0,0 +1 @@ +Hello world. From 12cf6650a075feab94e5f6695734c2ab4700d7b0 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 16 Jun 2015 12:10:28 -0700 Subject: [PATCH 450/956] Revert compress post-processor to master to get baseline test --- post-processor/compress/artifact.go | 30 +- post-processor/compress/post-processor.go | 405 +++------------------- 2 files changed, 61 insertions(+), 374 deletions(-) diff --git a/post-processor/compress/artifact.go b/post-processor/compress/artifact.go index 054d501d1..34a7ce8d6 100644 --- a/post-processor/compress/artifact.go +++ b/post-processor/compress/artifact.go @@ -8,31 +8,37 @@ import ( const BuilderId = "packer.post-processor.compress" type Artifact struct { - builderId string - dir string - f []string + Path string + Provider string } -func (a *Artifact) BuilderId() string { +func NewArtifact(provider, path string) *Artifact { + return &Artifact{ + Path: path, + Provider: provider, + } +} + +func (*Artifact) BuilderId() string { return BuilderId } -func (a *Artifact) Files() []string { - return a.f +func (self *Artifact) Id() string { + return "" } -func (*Artifact) Id() string { - return "COMPRESS" +func (self *Artifact) Files() []string { + return []string{self.Path} } -func (a *Artifact) String() string { - return fmt.Sprintf("VM compressed files in directory: %s", a.dir) +func (self *Artifact) String() string { + return fmt.Sprintf("'%s' compressing: %s", self.Provider, self.Path) } func (*Artifact) State(name string) interface{} { return nil } -func (a *Artifact) Destroy() error { - return os.RemoveAll(a.dir) +func (self *Artifact) Destroy() error { + return os.Remove(self.Path) } diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 6d28e7c0e..ccf300946 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -2,416 +2,97 @@ package compress import ( "archive/tar" - "archive/zip" - "compress/flate" "compress/gzip" "fmt" "io" "os" - "path/filepath" - "runtime" - "strings" - "time" - "github.com/biogo/hts/bgzf" - "github.com/klauspost/pgzip" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" - "github.com/pierrec/lz4" - "gopkg.in/yaml.v2" ) -type Metadata map[string]Metaitem - -type Metaitem struct { - CompSize int64 `yaml:"compsize"` - OrigSize int64 `yaml:"origsize"` - CompType string `yaml:"comptype"` - CompDate string `yaml:"compdate"` -} - type Config struct { common.PackerConfig `mapstructure:",squash"` - OutputPath string `mapstructure:"output"` - OutputFile string `mapstructure:"file"` - Compression int `mapstructure:"compression"` - Metadata bool `mapstructure:"metadata"` - NumCPU int `mapstructure:"numcpu"` - Format string `mapstructure:"format"` - KeepInputArtifact bool `mapstructure:"keep_input_artifact"` - ctx *interpolate.Context + OutputPath string `mapstructure:"output"` + + ctx interpolate.Context } type PostProcessor struct { - cfg Config + config Config } -func (p *PostProcessor) Configure(raws ...interface{}) error { - p.cfg.Compression = -1 - err := config.Decode(&p.cfg, &config.DecodeOpts{ +func (self *PostProcessor) Configure(raws ...interface{}) error { + err := config.Decode(&self.config, &config.DecodeOpts{ Interpolate: true, InterpolateFilter: &interpolate.RenderFilter{ - Exclude: []string{ - // TODO figure out if something needs to go here. - }, + Exclude: []string{}, }, }, raws...) - - errs := new(packer.MultiError) - - if p.cfg.OutputPath == "" { - p.cfg.OutputPath = "packer_{{.BuildName}}_{{.Provider}}" - } - - if err = interpolate.Validate(p.cfg.OutputPath, p.cfg.ctx); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error parsing target template: %s", err)) - } - - templates := map[string]*string{ - "output": &p.cfg.OutputPath, - } - - if p.cfg.Compression > flate.BestCompression { - p.cfg.Compression = flate.BestCompression - } - if p.cfg.Compression == -1 { - p.cfg.Compression = flate.DefaultCompression - } - - if p.cfg.NumCPU < 1 { - p.cfg.NumCPU = runtime.NumCPU() - } - - runtime.GOMAXPROCS(p.cfg.NumCPU) - - for key, ptr := range templates { - if *ptr == "" { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("%s must be set", key)) - } - - *ptr, err = interpolate.Render(p.cfg.OutputPath, p.cfg.ctx) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", key, err)) - } - } - - if len(errs.Errors) > 0 { - return errs + if err != nil { + return err } return nil } -func (p *PostProcessor) fillMetadata(metadata Metadata, files []string) Metadata { - // layout shows by example how the reference time should be represented. - const layout = "2006-01-02_15-04-05" - t := time.Now() +func (self *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { + ui.Say(fmt.Sprintf("Creating archive for '%s'", artifact.BuilderId())) - if !p.cfg.Metadata { - return metadata - } - for _, f := range files { - if fi, err := os.Stat(f); err != nil { - continue - } else { - if i, ok := metadata[filepath.Base(f)]; !ok { - metadata[filepath.Base(f)] = Metaitem{CompType: p.cfg.Format, OrigSize: fi.Size(), CompDate: t.Format(layout)} - } else { - i.CompSize = fi.Size() - i.CompDate = t.Format(layout) - metadata[filepath.Base(f)] = i - } - } - } - return metadata -} - -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { - newartifact := &Artifact{builderId: artifact.BuilderId(), dir: p.cfg.OutputPath} - metafile := filepath.Join(p.cfg.OutputPath, "metadata") - - _, err := os.Stat(newartifact.dir) - if err == nil { - return nil, false, fmt.Errorf("output dir must not exists: %s", err) - } - err = os.MkdirAll(newartifact.dir, 0755) + // Create the compressed archive file at the appropriate OutputPath. + fw, err := os.Create(self.config.OutputPath) if err != nil { - return nil, false, fmt.Errorf("failed to create output: %s", err) - } - - formats := strings.Split(p.cfg.Format, ".") - files := artifact.Files() - - metadata := make(Metadata, 0) - metadata = p.fillMetadata(metadata, files) - - for _, compress := range formats { - switch compress { - case "tar": - files, err = p.cmpTAR(files, filepath.Join(p.cfg.OutputPath, p.cfg.OutputFile)) - metadata = p.fillMetadata(metadata, files) - case "zip": - files, err = p.cmpZIP(files, filepath.Join(p.cfg.OutputPath, p.cfg.OutputFile)) - metadata = p.fillMetadata(metadata, files) - case "pgzip": - files, err = p.cmpPGZIP(files, p.cfg.OutputPath) - metadata = p.fillMetadata(metadata, files) - case "gzip": - files, err = p.cmpGZIP(files, p.cfg.OutputPath) - metadata = p.fillMetadata(metadata, files) - case "bgzf": - files, err = p.cmpBGZF(files, p.cfg.OutputPath) - metadata = p.fillMetadata(metadata, files) - case "lz4": - files, err = p.cmpLZ4(files, p.cfg.OutputPath) - metadata = p.fillMetadata(metadata, files) - case "e2fs": - files, err = p.cmpE2FS(files, filepath.Join(p.cfg.OutputPath, p.cfg.OutputFile)) - metadata = p.fillMetadata(metadata, files) - } - if err != nil { - return nil, false, fmt.Errorf("Failed to compress: %s", err) - } - } - - if p.cfg.Metadata { - fp, err := os.Create(metafile) - if err != nil { - return nil, false, err - } - if buf, err := yaml.Marshal(metadata); err != nil { - fp.Close() - return nil, false, err - } else { - if _, err = fp.Write(buf); err != nil { - fp.Close() - return nil, false, err - } - fp.Close() - } - } - - newartifact.f = append(newartifact.f, files...) - if p.cfg.Metadata { - newartifact.f = append(newartifact.f, metafile) - } - - return newartifact, p.cfg.KeepInputArtifact, nil -} - -func (p *PostProcessor) cmpTAR(src []string, dst string) ([]string, error) { - fw, err := os.Create(dst) - if err != nil { - return nil, fmt.Errorf("tar error: %s", err) + return nil, false, fmt.Errorf( + "Failed creating file for compressed archive: %s", self.config.OutputPath) } defer fw.Close() - tw := tar.NewWriter(fw) - defer tw.Close() + gw := gzip.NewWriter(fw) + defer gw.Close() - for _, name := range src { - fi, err := os.Stat(name) + // Iterate through all of the artifact's files and put them into the + // compressed archive using the tar/gzip writers. + for _, path := range artifact.Files() { + fi, err := os.Stat(path) if err != nil { - return nil, fmt.Errorf("tar error: %s", err) + return nil, false, fmt.Errorf( + "Failed stating file: %s", path) } - target, _ := os.Readlink(name) + target, _ := os.Readlink(path) header, err := tar.FileInfoHeader(fi, target) if err != nil { - return nil, fmt.Errorf("tar error: %s", err) + return nil, false, fmt.Errorf( + "Failed creating archive header: %s", path) } - if err = tw.WriteHeader(header); err != nil { - return nil, fmt.Errorf("tar error: %s", err) + tw := tar.NewWriter(gw) + defer tw.Close() + + // Write the header first to the archive. This takes partial data + // from the FileInfo that is grabbed by running the stat command. + if err := tw.WriteHeader(header); err != nil { + return nil, false, fmt.Errorf( + "Failed writing archive header: %s", path) } - fr, err := os.Open(name) + // Open the target file for archiving and compressing. + fr, err := os.Open(path) if err != nil { - return nil, fmt.Errorf("tar error: %s", err) + return nil, false, fmt.Errorf( + "Failed opening file '%s' to write compressed archive.", path) } + defer fr.Close() if _, err = io.Copy(tw, fr); err != nil { - fr.Close() - return nil, fmt.Errorf("tar error: %s", err) + return nil, false, fmt.Errorf( + "Failed copying file to archive: %s", path) } - fr.Close() } - return []string{dst}, nil -} - -func (p *PostProcessor) cmpGZIP(src []string, dst string) ([]string, error) { - var res []string - for _, name := range src { - filename := filepath.Join(dst, filepath.Base(name)) - fw, err := os.Create(filename) - if err != nil { - return nil, fmt.Errorf("gzip error: %s", err) - } - cw, err := gzip.NewWriterLevel(fw, p.cfg.Compression) - if err != nil { - fw.Close() - return nil, fmt.Errorf("gzip error: %s", err) - } - fr, err := os.Open(name) - if err != nil { - cw.Close() - fw.Close() - return nil, fmt.Errorf("gzip error: %s", err) - } - if _, err = io.Copy(cw, fr); err != nil { - cw.Close() - fr.Close() - fw.Close() - return nil, fmt.Errorf("gzip error: %s", err) - } - cw.Close() - fr.Close() - fw.Close() - res = append(res, filename) - } - return res, nil -} - -func (p *PostProcessor) cmpPGZIP(src []string, dst string) ([]string, error) { - var res []string - for _, name := range src { - filename := filepath.Join(dst, filepath.Base(name)) - fw, err := os.Create(filename) - if err != nil { - return nil, fmt.Errorf("pgzip error: %s", err) - } - cw, err := pgzip.NewWriterLevel(fw, p.cfg.Compression) - if err != nil { - fw.Close() - return nil, fmt.Errorf("pgzip error: %s", err) - } - fr, err := os.Open(name) - if err != nil { - cw.Close() - fw.Close() - return nil, fmt.Errorf("pgzip error: %s", err) - } - if _, err = io.Copy(cw, fr); err != nil { - cw.Close() - fr.Close() - fw.Close() - return nil, fmt.Errorf("pgzip error: %s", err) - } - cw.Close() - fr.Close() - fw.Close() - res = append(res, filename) - } - return res, nil -} - -func (p *PostProcessor) cmpLZ4(src []string, dst string) ([]string, error) { - var res []string - for _, name := range src { - filename := filepath.Join(dst, filepath.Base(name)) - fw, err := os.Create(filename) - if err != nil { - return nil, fmt.Errorf("lz4 error: %s", err) - } - cw := lz4.NewWriter(fw) - if err != nil { - fw.Close() - return nil, fmt.Errorf("lz4 error: %s", err) - } - if p.cfg.Compression > flate.DefaultCompression { - cw.Header.HighCompression = true - } - fr, err := os.Open(name) - if err != nil { - cw.Close() - fw.Close() - return nil, fmt.Errorf("lz4 error: %s", err) - } - if _, err = io.Copy(cw, fr); err != nil { - cw.Close() - fr.Close() - fw.Close() - return nil, fmt.Errorf("lz4 error: %s", err) - } - cw.Close() - fr.Close() - fw.Close() - res = append(res, filename) - } - return res, nil -} - -func (p *PostProcessor) cmpBGZF(src []string, dst string) ([]string, error) { - var res []string - for _, name := range src { - filename := filepath.Join(dst, filepath.Base(name)) - fw, err := os.Create(filename) - if err != nil { - return nil, fmt.Errorf("bgzf error: %s", err) - } - - cw, err := bgzf.NewWriterLevel(fw, p.cfg.Compression, runtime.NumCPU()) - if err != nil { - return nil, fmt.Errorf("bgzf error: %s", err) - } - fr, err := os.Open(name) - if err != nil { - cw.Close() - fw.Close() - return nil, fmt.Errorf("bgzf error: %s", err) - } - if _, err = io.Copy(cw, fr); err != nil { - cw.Close() - fr.Close() - fw.Close() - return nil, fmt.Errorf("bgzf error: %s", err) - } - cw.Close() - fr.Close() - fw.Close() - res = append(res, filename) - } - return res, nil -} - -func (p *PostProcessor) cmpE2FS(src []string, dst string) ([]string, error) { - panic("not implemented") -} - -func (p *PostProcessor) cmpZIP(src []string, dst string) ([]string, error) { - fw, err := os.Create(dst) - if err != nil { - return nil, fmt.Errorf("zip error: %s", err) - } - defer fw.Close() - - zw := zip.NewWriter(fw) - defer zw.Close() - - for _, name := range src { - header, err := zw.Create(name) - if err != nil { - return nil, fmt.Errorf("zip error: %s", err) - } - - fr, err := os.Open(name) - if err != nil { - return nil, fmt.Errorf("zip error: %s", err) - } - - if _, err = io.Copy(header, fr); err != nil { - fr.Close() - return nil, fmt.Errorf("zip error: %s", err) - } - fr.Close() - } - return []string{dst}, nil - + + return NewArtifact(artifact.BuilderId(), self.config.OutputPath), false, nil } From fe105107d25d909d07344c2a58dd524983b652d6 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 16 Jun 2015 12:11:11 -0700 Subject: [PATCH 451/956] Removed extra files -- will re-add later --- post-processor/compress/LICENSE | 21 --- post-processor/compress/benchmark.go | 197 --------------------------- 2 files changed, 218 deletions(-) delete mode 100644 post-processor/compress/LICENSE delete mode 100644 post-processor/compress/benchmark.go diff --git a/post-processor/compress/LICENSE b/post-processor/compress/LICENSE deleted file mode 100644 index 38bbf26f3..000000000 --- a/post-processor/compress/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Vasiliy Tolstov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/post-processor/compress/benchmark.go b/post-processor/compress/benchmark.go deleted file mode 100644 index ed4d68168..000000000 --- a/post-processor/compress/benchmark.go +++ /dev/null @@ -1,197 +0,0 @@ -// +build ignore - -package main - -import ( - "compress/flate" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "testing" - - "github.com/biogo/hts/bgzf" - "github.com/klauspost/pgzip" - "github.com/pierrec/lz4" -) - -type Compressor struct { - r *os.File - w *os.File - sr int64 - sw int64 -} - -func (c *Compressor) Close() error { - var err error - - fi, _ := c.w.Stat() - c.sw = fi.Size() - if err = c.w.Close(); err != nil { - return err - } - - fi, _ = c.r.Stat() - c.sr = fi.Size() - if err = c.r.Close(); err != nil { - return err - } - - return nil -} - -func NewCompressor(src, dst string) (*Compressor, error) { - r, err := os.Open(src) - if err != nil { - return nil, err - } - - w, err := os.Create(dst) - if err != nil { - r.Close() - return nil, err - } - - c := &Compressor{r: r, w: w} - return c, nil -} - -func main() { - - runtime.GOMAXPROCS(runtime.NumCPU()) - - var resw testing.BenchmarkResult - var resr testing.BenchmarkResult - - c, err := NewCompressor("/tmp/image.r", "/tmp/image.w") - if err != nil { - panic(err) - } - resw = testing.Benchmark(c.BenchmarkGZIPWriter) - c.w.Seek(0, 0) - resr = testing.Benchmark(c.BenchmarkGZIPReader) - c.Close() - fmt.Printf("gzip:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) - - c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") - if err != nil { - panic(err) - } - resw = testing.Benchmark(c.BenchmarkBGZFWriter) - c.w.Seek(0, 0) - resr = testing.Benchmark(c.BenchmarkBGZFReader) - c.Close() - fmt.Printf("bgzf:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) - - c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") - if err != nil { - panic(err) - } - resw = testing.Benchmark(c.BenchmarkPGZIPWriter) - c.w.Seek(0, 0) - resr = testing.Benchmark(c.BenchmarkPGZIPReader) - c.Close() - fmt.Printf("pgzip:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) - - c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") - if err != nil { - panic(err) - } - resw = testing.Benchmark(c.BenchmarkLZ4Writer) - c.w.Seek(0, 0) - resr = testing.Benchmark(c.BenchmarkLZ4Reader) - c.Close() - fmt.Printf("lz4:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) - -} - -func (c *Compressor) BenchmarkGZIPWriter(b *testing.B) { - cw, _ := gzip.NewWriterLevel(c.w, flate.BestSpeed) - b.ResetTimer() - - _, err := io.Copy(cw, c.r) - if err != nil { - b.Fatal(err) - } - cw.Close() - c.w.Sync() -} - -func (c *Compressor) BenchmarkGZIPReader(b *testing.B) { - cr, _ := gzip.NewReader(c.w) - b.ResetTimer() - - _, err := io.Copy(ioutil.Discard, cr) - if err != nil { - b.Fatal(err) - } -} - -func (c *Compressor) BenchmarkBGZFWriter(b *testing.B) { - cw, _ := bgzf.NewWriterLevel(c.w, flate.BestSpeed, runtime.NumCPU()) - b.ResetTimer() - - _, err := io.Copy(cw, c.r) - if err != nil { - b.Fatal(err) - } - c.w.Sync() -} - -func (c *Compressor) BenchmarkBGZFReader(b *testing.B) { - cr, _ := bgzf.NewReader(c.w, 0) - b.ResetTimer() - - _, err := io.Copy(ioutil.Discard, cr) - if err != nil { - b.Fatal(err) - } -} - -func (c *Compressor) BenchmarkPGZIPWriter(b *testing.B) { - cw, _ := pgzip.NewWriterLevel(c.w, flate.BestSpeed) - b.ResetTimer() - - _, err := io.Copy(cw, c.r) - if err != nil { - b.Fatal(err) - } - cw.Close() - c.w.Sync() -} - -func (c *Compressor) BenchmarkPGZIPReader(b *testing.B) { - cr, _ := pgzip.NewReader(c.w) - b.ResetTimer() - - _, err := io.Copy(ioutil.Discard, cr) - if err != nil { - b.Fatal(err) - } -} - -func (c *Compressor) BenchmarkLZ4Writer(b *testing.B) { - cw := lz4.NewWriter(c.w) - // cw.Header.HighCompression = true - cw.Header.NoChecksum = true - b.ResetTimer() - - _, err := io.Copy(cw, c.r) - if err != nil { - b.Fatal(err) - } - cw.Close() - c.w.Sync() -} - -func (c *Compressor) BenchmarkLZ4Reader(b *testing.B) { - cr := lz4.NewReader(c.w) - b.ResetTimer() - - _, err := io.Copy(ioutil.Discard, cr) - if err != nil { - b.Fatal(err) - } -} From ddbc145d29f8e312057cb1221e49ff70172ff77c Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 16 Jun 2015 16:31:09 -0700 Subject: [PATCH 452/956] Implemented acceptance test for compress --- .../compress/post-processor_test.go | 103 +++++++++++++++--- 1 file changed, 88 insertions(+), 15 deletions(-) diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index 5f4d6b9ca..12faeabed 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -1,22 +1,95 @@ package compress -// import ( -// "testing" -// -// builderT "github.com/mitchellh/packer/helper/builder/testing" -// ) -// -// func TestBuilderTagsAcc_basic(t *testing.T) { -// builderT.Test(t, builderT.TestCase{ -// Builder: &Builder{}, -// Template: simpleTestCase, -// Check: checkTags(), -// }) -// } +import ( + "fmt" + "os" + "strings" + "testing" + + "github.com/mitchellh/packer/builder/file" + env "github.com/mitchellh/packer/helper/builder/testing" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template" +) + +func setup(t *testing.T) (packer.Ui, packer.Artifact, error) { + // Create fake UI and Cache + ui := packer.TestUi(t) + cache := &packer.FileCache{CacheDir: os.TempDir()} + + // Create config for file builder + const fileConfig = `{"builders":[{"type":"file","target":"package.txt","content":"Hello world!"}]}` + tpl, err := template.Parse(strings.NewReader(fileConfig)) + if err != nil { + return nil, nil, fmt.Errorf("Unable to parse setup configuration: %s", err) + } + + // Prepare the file builder + builder := file.Builder{} + warnings, err := builder.Prepare(tpl.Builders["file"].Config) + if len(warnings) > 0 { + for _, warn := range warnings { + return nil, nil, fmt.Errorf("Configuration warning: %s", warn) + } + } + if err != nil { + return nil, nil, fmt.Errorf("Invalid configuration: %s", err) + } + + // Run the file builder + artifact, err := builder.Run(ui, nil, cache) + if err != nil { + return nil, nil, fmt.Errorf("Failed to build artifact: %s", err) + } + + return ui, artifact, err +} + +func TestSimpleCompress(t *testing.T) { + if os.Getenv(env.TestEnvVar) == "" { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", env.TestEnvVar)) + } + + ui, artifact, err := setup(t) + if err != nil { + t.Fatalf("Error bootstrapping test: %s", err) + } + if artifact != nil { + defer artifact.Destroy() + } + + tpl, err := template.Parse(strings.NewReader(simpleTestCase)) + if err != nil { + t.Fatalf("Unable to parse test config: %s", err) + } + + compressor := PostProcessor{} + compressor.Configure(tpl.PostProcessors[0][0].Config) + artifactOut, _, err := compressor.PostProcess(ui, artifact) + if err != nil { + t.Fatalf("Failed to compress artifact: %s", err) + } + // Cleanup after the test completes + defer artifactOut.Destroy() + + // Verify things look good + fi, err := os.Stat("package.tar.gz") + if err != nil { + t.Errorf("Unable to read archive: %s", err) + } + if fi.IsDir() { + t.Error("Archive should not be a directory") + } +} const simpleTestCase = ` { - "type": "compress", - "output": "foo.tar.gz" + "post-processors": [ + { + "type": "compress", + "output": "package.tar.gz" + } + ] } ` From af4d8b99b41a4d5b9329ba75f61444a8fcda71bf Mon Sep 17 00:00:00 2001 From: Gonzalo Peci Date: Wed, 17 Jun 2015 12:29:10 +1200 Subject: [PATCH 453/956] Add quickfix to restart provisioner as existing one was not working. For more information read https://github.com/mitchellh/packer/pull/2243 --- provisioner/windows-restart/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/windows-restart/provisioner.go b/provisioner/windows-restart/provisioner.go index 234980183..e88aa0b3b 100644 --- a/provisioner/windows-restart/provisioner.go +++ b/provisioner/windows-restart/provisioner.go @@ -12,7 +12,7 @@ import ( "github.com/mitchellh/packer/template/interpolate" ) -var DefaultRestartCommand = "shutdown /r /c \"packer restart\" /t 5 && net stop winrm" +var DefaultRestartCommand = "powershell \"& {Restart-Computer -force }\"" var DefaultRestartCheckCommand = winrm.Powershell(`echo "${env:COMPUTERNAME} restarted."`) var retryableSleep = 5 * time.Second From 2d6f8279e6a807f7b65c14cb99b08e038aac18c8 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 16 Jun 2015 19:08:22 -0700 Subject: [PATCH 454/956] Restore configuration structure from vtolstov's branch --- post-processor/compress/post-processor.go | 34 +++++++++++++++++++---- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index c2f608685..b08465a66 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -2,23 +2,47 @@ package compress import ( "archive/tar" + "archive/zip" + "compress/flate" "compress/gzip" "fmt" "io" "os" + "path/filepath" + "runtime" + "strings" + "time" + "gopkg.in/yaml.v2" + + "github.com/biogo/hts/bgzf" + "github.com/klauspost/pgzip" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" + "github.com/pierrec/lz4" ) +type Metadata map[string]Metaitem + +type Metaitem struct { + CompSize int64 `yaml:"compsize"` + OrigSize int64 `yaml:"origsize"` + CompType string `yaml:"comptype"` + CompDate string `yaml:"compdate"` +} + type Config struct { common.PackerConfig `mapstructure:",squash"` - - OutputPath string `mapstructure:"output"` - - ctx interpolate.Context + OutputPath string `mapstructure:"output"` + OutputFile string `mapstructure:"file"` + Compression int `mapstructure:"compression"` + Metadata bool `mapstructure:"metadata"` + NumCPU int `mapstructure:"numcpu"` + Format string `mapstructure:"format"` + KeepInputArtifact bool `mapstructure:"keep_input_artifact"` + ctx *interpolate.Context } type PostProcessor struct { @@ -205,7 +229,7 @@ func (p *PostProcessor) cmpTAR(src []string, dst string) ([]string, error) { return nil, fmt.Errorf("tar error on stat of %s: %s", name, err) } - target, _ := os.Readlink(path) + target, _ := os.Readlink(name) header, err := tar.FileInfoHeader(fi, target) if err != nil { return nil, fmt.Errorf("tar error reading info for %s: %s", name, err) From 47bb5ae89908716683f11fe402440656f6582b5d Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 16 Jun 2015 20:23:40 -0700 Subject: [PATCH 455/956] Re-added benchmark and license --- post-processor/compress/LICENSE | 21 +++ post-processor/compress/benchmark.go | 197 +++++++++++++++++++++++++++ 2 files changed, 218 insertions(+) create mode 100644 post-processor/compress/LICENSE create mode 100644 post-processor/compress/benchmark.go diff --git a/post-processor/compress/LICENSE b/post-processor/compress/LICENSE new file mode 100644 index 000000000..38bbf26f3 --- /dev/null +++ b/post-processor/compress/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Vasiliy Tolstov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/post-processor/compress/benchmark.go b/post-processor/compress/benchmark.go new file mode 100644 index 000000000..ed4d68168 --- /dev/null +++ b/post-processor/compress/benchmark.go @@ -0,0 +1,197 @@ +// +build ignore + +package main + +import ( + "compress/flate" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "testing" + + "github.com/biogo/hts/bgzf" + "github.com/klauspost/pgzip" + "github.com/pierrec/lz4" +) + +type Compressor struct { + r *os.File + w *os.File + sr int64 + sw int64 +} + +func (c *Compressor) Close() error { + var err error + + fi, _ := c.w.Stat() + c.sw = fi.Size() + if err = c.w.Close(); err != nil { + return err + } + + fi, _ = c.r.Stat() + c.sr = fi.Size() + if err = c.r.Close(); err != nil { + return err + } + + return nil +} + +func NewCompressor(src, dst string) (*Compressor, error) { + r, err := os.Open(src) + if err != nil { + return nil, err + } + + w, err := os.Create(dst) + if err != nil { + r.Close() + return nil, err + } + + c := &Compressor{r: r, w: w} + return c, nil +} + +func main() { + + runtime.GOMAXPROCS(runtime.NumCPU()) + + var resw testing.BenchmarkResult + var resr testing.BenchmarkResult + + c, err := NewCompressor("/tmp/image.r", "/tmp/image.w") + if err != nil { + panic(err) + } + resw = testing.Benchmark(c.BenchmarkGZIPWriter) + c.w.Seek(0, 0) + resr = testing.Benchmark(c.BenchmarkGZIPReader) + c.Close() + fmt.Printf("gzip:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) + + c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") + if err != nil { + panic(err) + } + resw = testing.Benchmark(c.BenchmarkBGZFWriter) + c.w.Seek(0, 0) + resr = testing.Benchmark(c.BenchmarkBGZFReader) + c.Close() + fmt.Printf("bgzf:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) + + c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") + if err != nil { + panic(err) + } + resw = testing.Benchmark(c.BenchmarkPGZIPWriter) + c.w.Seek(0, 0) + resr = testing.Benchmark(c.BenchmarkPGZIPReader) + c.Close() + fmt.Printf("pgzip:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) + + c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") + if err != nil { + panic(err) + } + resw = testing.Benchmark(c.BenchmarkLZ4Writer) + c.w.Seek(0, 0) + resr = testing.Benchmark(c.BenchmarkLZ4Reader) + c.Close() + fmt.Printf("lz4:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) + +} + +func (c *Compressor) BenchmarkGZIPWriter(b *testing.B) { + cw, _ := gzip.NewWriterLevel(c.w, flate.BestSpeed) + b.ResetTimer() + + _, err := io.Copy(cw, c.r) + if err != nil { + b.Fatal(err) + } + cw.Close() + c.w.Sync() +} + +func (c *Compressor) BenchmarkGZIPReader(b *testing.B) { + cr, _ := gzip.NewReader(c.w) + b.ResetTimer() + + _, err := io.Copy(ioutil.Discard, cr) + if err != nil { + b.Fatal(err) + } +} + +func (c *Compressor) BenchmarkBGZFWriter(b *testing.B) { + cw, _ := bgzf.NewWriterLevel(c.w, flate.BestSpeed, runtime.NumCPU()) + b.ResetTimer() + + _, err := io.Copy(cw, c.r) + if err != nil { + b.Fatal(err) + } + c.w.Sync() +} + +func (c *Compressor) BenchmarkBGZFReader(b *testing.B) { + cr, _ := bgzf.NewReader(c.w, 0) + b.ResetTimer() + + _, err := io.Copy(ioutil.Discard, cr) + if err != nil { + b.Fatal(err) + } +} + +func (c *Compressor) BenchmarkPGZIPWriter(b *testing.B) { + cw, _ := pgzip.NewWriterLevel(c.w, flate.BestSpeed) + b.ResetTimer() + + _, err := io.Copy(cw, c.r) + if err != nil { + b.Fatal(err) + } + cw.Close() + c.w.Sync() +} + +func (c *Compressor) BenchmarkPGZIPReader(b *testing.B) { + cr, _ := pgzip.NewReader(c.w) + b.ResetTimer() + + _, err := io.Copy(ioutil.Discard, cr) + if err != nil { + b.Fatal(err) + } +} + +func (c *Compressor) BenchmarkLZ4Writer(b *testing.B) { + cw := lz4.NewWriter(c.w) + // cw.Header.HighCompression = true + cw.Header.NoChecksum = true + b.ResetTimer() + + _, err := io.Copy(cw, c.r) + if err != nil { + b.Fatal(err) + } + cw.Close() + c.w.Sync() +} + +func (c *Compressor) BenchmarkLZ4Reader(b *testing.B) { + cr := lz4.NewReader(c.w) + b.ResetTimer() + + _, err := io.Copy(ioutil.Discard, cr) + if err != nil { + b.Fatal(err) + } +} From a53cc8b07045599c2d20c022629c040b6f859ee3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 17 Jun 2015 17:53:40 +0200 Subject: [PATCH 456/956] update CHANGELOG --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 38718e928..a495a3b4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,8 @@ FEATURES: connections. Note that provisioners won't work if this is done. [GH-1591] * **SSH Agent Forwarding:** SSH Agent Forwarding will now be enabled to allow access to remote servers such as private git repos. [GH-1066] + * **OpenStack v3 Identity:** The OpenStack builder now supports the + v3 identity API. * **Docker builder supports SSH**: The Docker builder now supports containers with SSH, just set `communicator` to "ssh" [GH-2244] * **File provisioner can download**: The file provisioner can now download @@ -32,6 +34,12 @@ FEATURES: builder. This is useful for provisioners. [GH-2232] * **New config function: `template_dir`**: The directory to the template being built. This should be used for template-relative paths. [GH-54] + * **New provisioner: powershell**: Provision Windows machines + with PowerShell scripts. [GH-2243] + * **New provisioner: windows-shell**: Provision Windows machines with + batch files. [GH-2243] + * **New provisioner: windows-restart**: Restart a Windows machines and + wait for it to come back online. [GH-2243] IMPROVEMENTS: From 7711e07f053ccccd491f811c9cf33334a0895120 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 17 Jun 2015 17:56:31 +0200 Subject: [PATCH 457/956] provisioner/windows-restart: test fix --- provisioner/windows-restart/provisioner_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/windows-restart/provisioner_test.go b/provisioner/windows-restart/provisioner_test.go index d2a54d274..bbb89e116 100644 --- a/provisioner/windows-restart/provisioner_test.go +++ b/provisioner/windows-restart/provisioner_test.go @@ -34,7 +34,7 @@ func TestProvisionerPrepare_Defaults(t *testing.T) { t.Errorf("unexpected remote path: %s", p.config.RestartTimeout) } - if p.config.RestartCommand != "shutdown /r /c \"packer restart\" /t 5 && net stop winrm" { + if p.config.RestartCommand != "powershell \"& {Restart-Computer -force }\"" { t.Errorf("unexpected remote path: %s", p.config.RestartCommand) } } From b2609db395fb4d7520f6d391319c2c77bbd07101 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 17 Jun 2015 18:22:27 +0200 Subject: [PATCH 458/956] provisioner/windows-restart: fix potential panic case --- provisioner/windows-restart/provisioner.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/provisioner/windows-restart/provisioner.go b/provisioner/windows-restart/provisioner.go index e88aa0b3b..7c1c5ada9 100644 --- a/provisioner/windows-restart/provisioner.go +++ b/provisioner/windows-restart/provisioner.go @@ -3,6 +3,7 @@ package restart import ( "fmt" "log" + "sync" "time" "github.com/masterzen/winrm/winrm" @@ -33,10 +34,11 @@ type Config struct { } type Provisioner struct { - config Config - comm packer.Communicator - ui packer.Ui - cancel chan struct{} + config Config + comm packer.Communicator + ui packer.Ui + cancel chan struct{} + cancelLock sync.Mutex } func (p *Provisioner) Prepare(raws ...interface{}) error { @@ -68,10 +70,13 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { + p.cancelLock.Lock() + p.cancel = make(chan struct{}) + p.cancelLock.Unlock() + ui.Say("Restarting Machine") p.comm = comm p.ui = ui - p.cancel = make(chan struct{}) var cmd *packer.RemoteCmd command := p.config.RestartCommand @@ -164,7 +169,12 @@ var waitForCommunicator = func(p *Provisioner) error { func (p *Provisioner) Cancel() { log.Printf("Received interrupt Cancel()") - close(p.cancel) + + p.cancelLock.Lock() + defer p.cancelLock.Unlock() + if p.cancel != nil { + close(p.cancel) + } } // retryable will retry the given function over and over until a From cbaaf0da52cd0860e4efe43eaa0d675240ee6362 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 17 Jun 2015 22:10:42 +0200 Subject: [PATCH 459/956] communicator/ssh: support for bastion SSH --- communicator/ssh/connect.go | 43 ++++++++++++++++++ helper/communicator/config.go | 18 ++++++++ helper/communicator/step_connect_ssh.go | 58 +++++++++++++++++++++++-- 3 files changed, 116 insertions(+), 3 deletions(-) diff --git a/communicator/ssh/connect.go b/communicator/ssh/connect.go index b280f3ead..43277595c 100644 --- a/communicator/ssh/connect.go +++ b/communicator/ssh/connect.go @@ -1,8 +1,11 @@ package ssh import ( + "fmt" "net" "time" + + "golang.org/x/crypto/ssh" ) // ConnectFunc is a convenience method for returning a function @@ -23,3 +26,43 @@ func ConnectFunc(network, addr string) func() (net.Conn, error) { return c, nil } } + +// BastionConnectFunc is a convenience method for returning a function +// that connects to a host over a bastion connection. +func BastionConnectFunc( + bProto string, + bAddr string, + bConf *ssh.ClientConfig, + proto string, + addr string) func() (net.Conn, error) { + return func() (net.Conn, error) { + // Connect to the bastion + bastion, err := ssh.Dial(bProto, bAddr, bConf) + if err != nil { + return nil, fmt.Errorf("Error connecting to bastion: %s", err) + } + + // Connect through to the end host + conn, err := bastion.Dial(proto, addr) + if err != nil { + bastion.Close() + return nil, err + } + + // Wrap it up so we close both things properly + return &bastionConn{ + Conn: conn, + Bastion: bastion, + }, nil + } +} + +type bastionConn struct { + net.Conn + Bastion *ssh.Client +} + +func (c *bastionConn) Close() error { + c.Conn.Close() + return c.Bastion.Close() +} diff --git a/helper/communicator/config.go b/helper/communicator/config.go index 4c316ff69..4d10bbbb6 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -23,6 +23,11 @@ type Config struct { SSHPty bool `mapstructure:"ssh_pty"` SSHTimeout time.Duration `mapstructure:"ssh_timeout"` SSHHandshakeAttempts int `mapstructure:"ssh_handshake_attempts"` + SSHBastionHost string `mapstructure:"ssh_bastion_host"` + SSHBastionPort int `mapstructure:"ssh_bastion_port"` + SSHBastionUsername string `mapstructure:"ssh_bastion_username"` + SSHBastionPassword string `mapstructure:"ssh_bastion_password"` + SSHBastionPrivateKey string `mapstructure:"ssh_bastion_private_key_file"` // WinRM WinRMUser string `mapstructure:"winrm_username"` @@ -77,6 +82,12 @@ func (c *Config) prepareSSH(ctx *interpolate.Context) []error { c.SSHHandshakeAttempts = 10 } + if c.SSHBastionHost != "" { + if c.SSHBastionPort == 0 { + c.SSHBastionPort = 22 + } + } + // Validation var errs []error if c.SSHUsername == "" { @@ -93,6 +104,13 @@ func (c *Config) prepareSSH(ctx *interpolate.Context) []error { } } + if c.SSHBastionHost != "" { + if c.SSHBastionPassword == "" && c.SSHBastionPrivateKey == "" { + errs = append(errs, errors.New( + "ssh_bastion_password or ssh_bastion_private_key_file must be specified")) + } + } + return errs } diff --git a/helper/communicator/step_connect_ssh.go b/helper/communicator/step_connect_ssh.go index 4b664fe4c..fd6b585f8 100644 --- a/helper/communicator/step_connect_ssh.go +++ b/helper/communicator/step_connect_ssh.go @@ -4,10 +4,12 @@ import ( "errors" "fmt" "log" + "net" "strings" "time" "github.com/mitchellh/multistep" + commonssh "github.com/mitchellh/packer/common/ssh" "github.com/mitchellh/packer/communicator/ssh" "github.com/mitchellh/packer/packer" gossh "golang.org/x/crypto/ssh" @@ -79,6 +81,24 @@ func (s *StepConnectSSH) Cleanup(multistep.StateBag) { } func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan struct{}) (packer.Communicator, error) { + // Determine if we're using a bastion host, and if so, retrieve + // that configuration. This configuration doesn't change so we + // do this one before entering the retry loop. + var bProto, bAddr string + var bConf *gossh.ClientConfig + if s.Config.SSHBastionHost != "" { + // The protocol is hardcoded for now, but may be configurable one day + bProto = "tcp" + bAddr = fmt.Sprintf( + "%s:%d", s.Config.SSHBastionHost, s.Config.SSHBastionPort) + + conf, err := sshBastionConfig(s.Config) + if err != nil { + return nil, fmt.Errorf("Error configuring bastion: %s", err) + } + bConf = conf + } + handshakeAttempts := 0 var comm packer.Communicator @@ -117,10 +137,18 @@ func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan stru continue } - address := fmt.Sprintf("%s:%d", host, port) - // Attempt to connect to SSH port - connFunc := ssh.ConnectFunc("tcp", address) + var connFunc func() (net.Conn, error) + address := fmt.Sprintf("%s:%d", host, port) + if bAddr != "" { + // We're using a bastion host, so use the bastion connfunc + connFunc = ssh.BastionConnectFunc( + bProto, bAddr, bConf, "tcp", address) + } else { + // No bastion host, connect directly + connFunc = ssh.ConnectFunc("tcp", address) + } + nc, err := connFunc() if err != nil { log.Printf("[DEBUG] TCP connection to SSH ip/port failed: %s", err) @@ -164,3 +192,27 @@ func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan stru return comm, nil } + +func sshBastionConfig(config *Config) (*gossh.ClientConfig, error) { + auth := make([]gossh.AuthMethod, 0, 2) + if config.SSHBastionPassword != "" { + auth = append(auth, + gossh.Password(config.SSHBastionPassword), + gossh.KeyboardInteractive( + ssh.PasswordKeyboardInteractive(config.SSHBastionPassword))) + } + + if config.SSHBastionPrivateKey != "" { + signer, err := commonssh.FileSigner(config.SSHBastionPrivateKey) + if err != nil { + return nil, err + } + + auth = append(auth, gossh.PublicKeys(signer)) + } + + return &gossh.ClientConfig{ + User: config.SSHBastionUsername, + Auth: auth, + }, nil +} From 889b5b610595a6ca984f8140bb413fda8749263e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 17 Jun 2015 22:21:04 +0200 Subject: [PATCH 460/956] website: update vsphere --- .../source/docs/post-processors/vsphere.html.markdown | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/website/source/docs/post-processors/vsphere.html.markdown b/website/source/docs/post-processors/vsphere.html.markdown index d49683a43..a6f790bc1 100644 --- a/website/source/docs/post-processors/vsphere.html.markdown +++ b/website/source/docs/post-processors/vsphere.html.markdown @@ -25,14 +25,17 @@ Required: * `datacenter` (string) - The name of the datacenter within vSphere to add the VM to. +* `datastore` (string) - The name of the datastore to store this VM. + This is _not required_ if `resource_pool` is specified. + * `host` (string) - The vSphere host that will be contacted to perform the VM upload. * `password` (string) - Password to use to authenticate to the vSphere endpoint. -* `resource_pool` (string) - The resource pool to upload the VM to. This can be - " " if you do not have resource pools configured +* `resource_pool` (string) - The resource pool to upload the VM to. + This is _not required_ if `datastore` is specified. * `username` (string) - The username to use to authenticate to the vSphere endpoint. @@ -41,8 +44,6 @@ Required: Optional: -* `datastore` (string) - The name of the datastore to store this VM. - * `disk_mode` (string) - Target disk format. See `ovftool` manual for available options. By default, "thick" will be used. From 8401177363df6181c440273d5e9bedbd015b6bdf Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 17 Jun 2015 22:21:30 +0200 Subject: [PATCH 461/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a495a3b4d..244006486 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,6 +73,8 @@ IMPROVEMENTS: * post-processor/docker-save: Can be chained [GH-2179] * post-processor/docker-tag: Support `force` option [GH-2055] * post-processor/docker-tag: Can be chained [GH-2179] + * post-processor/vsphere: Make more fields optional, support empty + resource pools. [GH-1868] * provisioner/puppet-masterless: `working_directory` option [GH-1831] * provisioner/puppet-masterless: `packer_build_name` and `packer_build_type` are default facts. [GH-1878] From b20e26be17b56e893d9b29945441041824d0e110 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 17 Jun 2015 22:30:57 +0200 Subject: [PATCH 462/956] website: update docs for ami_groups claritifaction [GH-1068] --- website/source/docs/builders/amazon-ebs.html.markdown | 1 + website/source/docs/builders/amazon-instance.html.markdown | 1 + 2 files changed, 2 insertions(+) diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 6c7840575..5420c10fd 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -88,6 +88,7 @@ each category, the available configuration keys are alphabetized. * `ami_groups` (array of strings) - A list of groups that have access to launch the resulting AMI(s). By default no groups have permission to launch the AMI. `all` will make the AMI publicly accessible. + AWS currently doesn't accept any value other than "all". * `ami_product_codes` (array of strings) - A list of product codes to associate with the AMI. By default no product codes are associated with diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index ff9e7c9a2..bacb5ee58 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -107,6 +107,7 @@ each category, the available configuration keys are alphabetized. * `ami_groups` (array of strings) - A list of groups that have access to launch the resulting AMI(s). By default no groups have permission to launch the AMI. `all` will make the AMI publicly accessible. + AWS currently doesn't accept any value other than "all". * `ami_product_codes` (array of strings) - A list of product codes to associate with the AMI. By default no product codes are associated with From 6cdc17dda4ada1a7c9e85ed27d60502d4b2e2a57 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 17 Jun 2015 22:33:59 +0200 Subject: [PATCH 463/956] helper/communicator: default bastion PK to normal PK --- helper/communicator/config.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/helper/communicator/config.go b/helper/communicator/config.go index 4d10bbbb6..e3da09618 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -86,6 +86,10 @@ func (c *Config) prepareSSH(ctx *interpolate.Context) []error { if c.SSHBastionPort == 0 { c.SSHBastionPort = 22 } + + if c.SSHBastionPrivateKey == "" && c.SSHPrivateKey != "" { + c.SSHBastionPrivateKey = c.SSHPrivateKey + } } // Validation From ac510cd84581418cdeedd33c49c2c9eb53b0b1d2 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 17 Jun 2015 22:36:44 +0200 Subject: [PATCH 464/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 244006486..56cbb1eed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,8 @@ FEATURES: connections. Note that provisioners won't work if this is done. [GH-1591] * **SSH Agent Forwarding:** SSH Agent Forwarding will now be enabled to allow access to remote servers such as private git repos. [GH-1066] + * **SSH Bastion Hosts:** You can now specify a bastion host for + SSH access (works with all builders). [GH-387] * **OpenStack v3 Identity:** The OpenStack builder now supports the v3 identity API. * **Docker builder supports SSH**: The Docker builder now supports containers From b77fcd90f31e7e31a2c2f3f1295f8a8c05f58cbb Mon Sep 17 00:00:00 2001 From: Bob Kuo Date: Wed, 17 Jun 2015 14:31:55 -0500 Subject: [PATCH 465/956] Force qemu to use a VNC port by setting vnc_min_port == vnc_max_port Similar to Issue #1288, this prevents a crash when we set the VNC minimum port equivalent to the VNC maximum port --- builder/qemu/step_configure_vnc.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/builder/qemu/step_configure_vnc.go b/builder/qemu/step_configure_vnc.go index be452620d..913175dd6 100644 --- a/builder/qemu/step_configure_vnc.go +++ b/builder/qemu/step_configure_vnc.go @@ -32,7 +32,12 @@ func (stepConfigureVNC) Run(state multistep.StateBag) multistep.StepAction { var vncPort uint portRange := int(config.VNCPortMax - config.VNCPortMin) for { - vncPort = uint(rand.Intn(portRange)) + config.VNCPortMin + if portRange > 0 { + vncPort = uint(rand.Intn(portRange)) + config.VNCPortMin + } else { + vncPort = config.VNCPortMin + } + log.Printf("Trying port: %d", vncPort) l, err := net.Listen("tcp", fmt.Sprintf(":%d", vncPort)) if err == nil { From b7dab2689a8fa5e3510e9d459905a2ce8277ab74 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 18 Jun 2015 05:23:04 +0200 Subject: [PATCH 466/956] fmt --- builder/amazon/common/block_device_test.go | 4 ++-- builder/qemu/step_configure_vnc.go | 4 ++-- builder/vmware/common/step_compact_disk.go | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index 12d1530bf..b99a22747 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -38,8 +38,8 @@ func TestBlockDevice(t *testing.T) { }, { Config: &BlockDevice{ - DeviceName: "/dev/sdb", - VolumeSize: 8, + DeviceName: "/dev/sdb", + VolumeSize: 8, }, Result: &ec2.BlockDeviceMapping{ diff --git a/builder/qemu/step_configure_vnc.go b/builder/qemu/step_configure_vnc.go index 913175dd6..e24c4f9ca 100644 --- a/builder/qemu/step_configure_vnc.go +++ b/builder/qemu/step_configure_vnc.go @@ -33,11 +33,11 @@ func (stepConfigureVNC) Run(state multistep.StateBag) multistep.StepAction { portRange := int(config.VNCPortMax - config.VNCPortMin) for { if portRange > 0 { - vncPort = uint(rand.Intn(portRange)) + config.VNCPortMin + vncPort = uint(rand.Intn(portRange)) + config.VNCPortMin } else { vncPort = config.VNCPortMin } - + log.Printf("Trying port: %d", vncPort) l, err := net.Listen("tcp", fmt.Sprintf(":%d", vncPort)) if err == nil { diff --git a/builder/vmware/common/step_compact_disk.go b/builder/vmware/common/step_compact_disk.go index 4319b7fe9..9a81c98c8 100644 --- a/builder/vmware/common/step_compact_disk.go +++ b/builder/vmware/common/step_compact_disk.go @@ -36,11 +36,11 @@ func (s StepCompactDisk) Run(state multistep.StateBag) multistep.StepAction { state.Put("error", fmt.Errorf("Error compacting disk: %s", err)) return multistep.ActionHalt } - + if state.Get("additional_disk_paths") != nil { if moreDisks := state.Get("additional_disk_paths").([]string); len(moreDisks) > 0 { for i, path := range moreDisks { - ui.Say(fmt.Sprintf("Compacting additional disk image %d",i+1)) + ui.Say(fmt.Sprintf("Compacting additional disk image %d", i+1)) if err := driver.CompactDisk(path); err != nil { state.Put("error", fmt.Errorf("Error compacting additional disk %d: %s", i+1, err)) return multistep.ActionHalt From 8fdb4f77e09eba301713f70d132ca3b7c8c0b3c2 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 18 Jun 2015 00:47:33 -0700 Subject: [PATCH 467/956] WIP 2/4 tests passing, still need to re-implement ZIP and bare compression files and do some cleanup --- post-processor/compress/post-processor.go | 322 ++++++++---------- .../compress/post-processor_test.go | 140 ++++++++ .../post-processors/compress.html.markdown | 39 ++- 3 files changed, 323 insertions(+), 178 deletions(-) diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index b08465a66..42cea2d35 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -3,19 +3,14 @@ package compress import ( "archive/tar" "archive/zip" - "compress/flate" "compress/gzip" "fmt" "io" "os" "path/filepath" + "regexp" "runtime" - "strings" - "time" - "gopkg.in/yaml.v2" - - "github.com/biogo/hts/bgzf" "github.com/klauspost/pgzip" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/helper/config" @@ -24,24 +19,13 @@ import ( "github.com/pierrec/lz4" ) -type Metadata map[string]Metaitem - -type Metaitem struct { - CompSize int64 `yaml:"compsize"` - OrigSize int64 `yaml:"origsize"` - CompType string `yaml:"comptype"` - CompDate string `yaml:"compdate"` -} - type Config struct { common.PackerConfig `mapstructure:",squash"` OutputPath string `mapstructure:"output"` - OutputFile string `mapstructure:"file"` - Compression int `mapstructure:"compression"` - Metadata bool `mapstructure:"metadata"` - NumCPU int `mapstructure:"numcpu"` - Format string `mapstructure:"format"` + Level int `mapstructure:"level"` KeepInputArtifact bool `mapstructure:"keep_input_artifact"` + Archive string + Algorithm string ctx *interpolate.Context } @@ -49,8 +33,52 @@ type PostProcessor struct { config Config } +// ErrInvalidCompressionLevel is returned when the compression level passed to +// gzip is not in the expected range. See compress/flate for details. +var ErrInvalidCompressionLevel = fmt.Errorf( + "Invalid compression level. Expected an integer from -1 to 9.") + +var ErrWrongInputCount = fmt.Errorf( + "Can only have 1 input file when not using tar/zip") + +func detectFromFilename(config *Config) error { + re := regexp.MustCompile("^.+?(?:\\.([a-z0-9]+))?\\.([a-z0-9]+)$") + + extensions := map[string]string{ + "tar": "tar", + "zip": "zip", + "gz": "pgzip", + "lz4": "lz4", + } + + result := re.FindAllString(config.OutputPath, -1) + + // Should we make an archive? E.g. tar or zip? + if result[0] == "tar" { + config.Archive = "tar" + } + if result[1] == "zip" || result[1] == "tar" { + config.Archive = result[1] + // Tar or zip is our final artifact. Bail out. + return nil + } + + // Should we compress the artifact? + algorithm, ok := extensions[result[1]] + if ok { + config.Algorithm = algorithm + // We found our compression algorithm something. Bail out. + return nil + } + + // We didn't find anything. Default to tar + pgzip + config.Algorithm = "pgzip" + config.Archive = "tar" + return fmt.Errorf("Unable to detect compression algorithm") +} + func (p *PostProcessor) Configure(raws ...interface{}) error { - p.config.Compression = -1 + p.config.Level = -1 err := config.Decode(&p.config, &config.DecodeOpts{ Interpolate: true, InterpolateFilter: &interpolate.RenderFilter{ @@ -73,19 +101,13 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { "output": &p.config.OutputPath, } - if p.config.Compression > flate.BestCompression { - p.config.Compression = flate.BestCompression + if p.config.Level > gzip.BestCompression { + p.config.Level = gzip.BestCompression } - if p.config.Compression == -1 { - p.config.Compression = flate.DefaultCompression + if p.config.Level == -1 { + p.config.Level = gzip.DefaultCompression } - if p.config.NumCPU < 1 { - p.config.NumCPU = runtime.NumCPU() - } - - runtime.GOMAXPROCS(p.config.NumCPU) - for key, ptr := range templates { if *ptr == "" { errs = packer.MultiErrorAppend( @@ -107,123 +129,113 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } -func (p *PostProcessor) fillMetadata(metadata Metadata, files []string) Metadata { - // layout shows by example how the reference time should be represented. - const layout = "2006-01-02_15-04-05" - t := time.Now() - - if !p.config.Metadata { - return metadata - } - for _, f := range files { - if fi, err := os.Stat(f); err != nil { - continue - } else { - if i, ok := metadata[filepath.Base(f)]; !ok { - metadata[filepath.Base(f)] = Metaitem{CompType: p.config.Format, OrigSize: fi.Size(), CompDate: t.Format(layout)} - } else { - i.CompSize = fi.Size() - i.CompDate = t.Format(layout) - metadata[filepath.Base(f)] = i - } - } - } - return metadata -} - func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { - newartifact := &Artifact{Path: p.config.OutputPath} - metafile := filepath.Join(p.config.OutputPath, "metadata") - ui.Say(fmt.Sprintf("[CBEDNARSKI] Creating archive at %s", newartifact.Path)) - _, err := os.Stat(newartifact.Path) - if err == nil { - return nil, false, fmt.Errorf("output dir %s must not exists", newartifact.Path) - } - err = os.MkdirAll(newartifact.Path, 0755) + newArtifact := &Artifact{Path: p.config.OutputPath} + + outputFile, err := os.Create(p.config.OutputPath) if err != nil { - return nil, false, fmt.Errorf("failed to create output: %s", err) + return nil, false, fmt.Errorf( + "Unable to create archive %s: %s", p.config.OutputPath, err) } + defer outputFile.Close() - p.config.Format += "tar.gzip" - formats := strings.Split(p.config.Format, ".") - ui.Say(fmt.Sprintf("[CBEDNARSKI] Formats length %d", len(formats))) - if len(p.config.Format) == 0 { - ui.Say("[CBEDNARSKI] Formats is empty") - formats[0] = "tar.gzip" - } - files := artifact.Files() - - metadata := make(Metadata, 0) - metadata = p.fillMetadata(metadata, files) - - ui.Say(fmt.Sprintf("[CBEDNARSKI] Formats %#v", formats)) - - for _, compress := range formats { - switch compress { - case "tar": - files, err = p.cmpTAR(files, filepath.Join(p.config.OutputPath, p.config.OutputFile)) - metadata = p.fillMetadata(metadata, files) - case "zip": - files, err = p.cmpZIP(files, filepath.Join(p.config.OutputPath, p.config.OutputFile)) - metadata = p.fillMetadata(metadata, files) - case "pgzip": - files, err = p.cmpPGZIP(files, p.config.OutputPath) - metadata = p.fillMetadata(metadata, files) - case "gzip": - files, err = p.cmpGZIP(files, p.config.OutputPath) - metadata = p.fillMetadata(metadata, files) - case "bgzf": - files, err = p.cmpBGZF(files, p.config.OutputPath) - metadata = p.fillMetadata(metadata, files) - case "lz4": - files, err = p.cmpLZ4(files, p.config.OutputPath) - metadata = p.fillMetadata(metadata, files) - case "e2fs": - files, err = p.cmpE2FS(files, filepath.Join(p.config.OutputPath, p.config.OutputFile)) - metadata = p.fillMetadata(metadata, files) + // Setup output interface. If we're using compression, output is a + // compression writer. Otherwise it's just a file. + var output io.WriteCloser + switch p.config.Algorithm { + case "lz4": + lzwriter := lz4.NewWriter(outputFile) + if p.config.Level > gzip.DefaultCompression { + lzwriter.Header.HighCompression = true } + defer lzwriter.Close() + output = lzwriter + case "pgzip": + output, err = pgzip.NewWriterLevel(outputFile, p.config.Level) if err != nil { - return nil, false, fmt.Errorf("Failed to compress: %s", err) + return nil, false, ErrInvalidCompressionLevel } + defer output.Close() + default: + output = outputFile } - if p.config.Metadata { - fp, err := os.Create(metafile) + //Archive + switch p.config.Archive { + case "tar": + archiveTar(artifact.Files(), output) + case "zip": + archive := zip.NewWriter(output) + defer archive.Close() + default: + // We have a regular file, so we'll just do an io.Copy + if len(artifact.Files()) != 1 { + return nil, false, fmt.Errorf( + "Can only have 1 input file when not using tar/zip. Found %d "+ + "files: %v", len(artifact.Files()), artifact.Files()) + } + source, err := os.Open(artifact.Files()[0]) if err != nil { - return nil, false, err - } - if buf, err := yaml.Marshal(metadata); err != nil { - fp.Close() - return nil, false, err - } else { - if _, err = fp.Write(buf); err != nil { - fp.Close() - return nil, false, err - } - fp.Close() + return nil, false, fmt.Errorf( + "Failed to open source file %s for reading: %s", + artifact.Files()[0], err) } + defer source.Close() + io.Copy(output, source) } - newartifact.files = append(newartifact.files, files...) - if p.config.Metadata { - newartifact.files = append(newartifact.files, metafile) - } - - return newartifact, p.config.KeepInputArtifact, nil + return newArtifact, p.config.KeepInputArtifact, nil } -func (p *PostProcessor) cmpTAR(src []string, dst string) ([]string, error) { - fw, err := os.Create(dst) +func archiveTar(files []string, output io.WriteCloser) error { + archive := tar.NewWriter(output) + defer archive.Close() + + for _, path := range files { + file, err := os.Open(path) + if err != nil { + return fmt.Errorf("Unable to read file %s: %s", path, err) + } + defer file.Close() + + fi, err := file.Stat() + if err != nil { + return fmt.Errorf("Unable to get fileinfo for %s: %s", path, err) + } + + target, err := os.Readlink(path) + if err != nil { + return fmt.Errorf("Failed to readlink for %s: %s", path, err) + } + + header, err := tar.FileInfoHeader(fi, target) + if err != nil { + return fmt.Errorf("Failed to create tar header for %s: %s", path, err) + } + + if err := archive.WriteHeader(header); err != nil { + return fmt.Errorf("Failed to write tar header for %s: %s", path, err) + } + + if _, err := io.Copy(archive, file); err != nil { + return fmt.Errorf("Failed to copy %s data to archive: %s", path, err) + } + } + return nil +} + +func (p *PostProcessor) cmpTAR(files []string, target string) ([]string, error) { + fw, err := os.Create(target) if err != nil { - return nil, fmt.Errorf("tar error creating tar %s: %s", dst, err) + return nil, fmt.Errorf("tar error creating tar %s: %s", target, err) } defer fw.Close() tw := tar.NewWriter(fw) defer tw.Close() - for _, name := range src { + for _, name := range files { fi, err := os.Stat(name) if err != nil { return nil, fmt.Errorf("tar error on stat of %s: %s", name, err) @@ -250,18 +262,18 @@ func (p *PostProcessor) cmpTAR(src []string, dst string) ([]string, error) { } fr.Close() } - return []string{dst}, nil + return []string{target}, nil } -func (p *PostProcessor) cmpGZIP(src []string, dst string) ([]string, error) { +func (p *PostProcessor) cmpGZIP(files []string, target string) ([]string, error) { var res []string - for _, name := range src { - filename := filepath.Join(dst, filepath.Base(name)) + for _, name := range files { + filename := filepath.Join(target, filepath.Base(name)) fw, err := os.Create(filename) if err != nil { - return nil, fmt.Errorf("gzip error: %s", err) + return nil, fmt.Errorf("gzip error creating archive: %s", err) } - cw, err := gzip.NewWriterLevel(fw, p.config.Compression) + cw, err := gzip.NewWriterLevel(fw, p.config.Level) if err != nil { fw.Close() return nil, fmt.Errorf("gzip error: %s", err) @@ -286,15 +298,16 @@ func (p *PostProcessor) cmpGZIP(src []string, dst string) ([]string, error) { return res, nil } -func (p *PostProcessor) cmpPGZIP(src []string, dst string) ([]string, error) { +func (p *PostProcessor) cmpPGZIP(files []string, target string) ([]string, error) { var res []string - for _, name := range src { - filename := filepath.Join(dst, filepath.Base(name)) + for _, name := range files { + filename := filepath.Join(target, filepath.Base(name)) fw, err := os.Create(filename) if err != nil { return nil, fmt.Errorf("pgzip error: %s", err) } - cw, err := pgzip.NewWriterLevel(fw, p.config.Compression) + cw, err := pgzip.NewWriterLevel(fw, p.config.Level) + cw.SetConcurrency(500000, runtime.GOMAXPROCS(-1)) if err != nil { fw.Close() return nil, fmt.Errorf("pgzip error: %s", err) @@ -332,7 +345,7 @@ func (p *PostProcessor) cmpLZ4(src []string, dst string) ([]string, error) { fw.Close() return nil, fmt.Errorf("lz4 error: %s", err) } - if p.config.Compression > flate.DefaultCompression { + if p.config.Level > gzip.DefaultCompression { cw.Header.HighCompression = true } fr, err := os.Open(name) @@ -355,43 +368,6 @@ func (p *PostProcessor) cmpLZ4(src []string, dst string) ([]string, error) { return res, nil } -func (p *PostProcessor) cmpBGZF(src []string, dst string) ([]string, error) { - var res []string - for _, name := range src { - filename := filepath.Join(dst, filepath.Base(name)) - fw, err := os.Create(filename) - if err != nil { - return nil, fmt.Errorf("bgzf error: %s", err) - } - - cw, err := bgzf.NewWriterLevel(fw, p.config.Compression, runtime.NumCPU()) - if err != nil { - return nil, fmt.Errorf("bgzf error: %s", err) - } - fr, err := os.Open(name) - if err != nil { - cw.Close() - fw.Close() - return nil, fmt.Errorf("bgzf error: %s", err) - } - if _, err = io.Copy(cw, fr); err != nil { - cw.Close() - fr.Close() - fw.Close() - return nil, fmt.Errorf("bgzf error: %s", err) - } - cw.Close() - fr.Close() - fw.Close() - res = append(res, filename) - } - return res, nil -} - -func (p *PostProcessor) cmpE2FS(src []string, dst string) ([]string, error) { - panic("not implemented") -} - func (p *PostProcessor) cmpZIP(src []string, dst string) ([]string, error) { fw, err := os.Create(dst) if err != nil { diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index 12faeabed..6d28a6698 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -83,6 +83,111 @@ func TestSimpleCompress(t *testing.T) { } } +func TestZipArchive(t *testing.T) { + if os.Getenv(env.TestEnvVar) == "" { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", env.TestEnvVar)) + } + + ui, artifact, err := setup(t) + if err != nil { + t.Fatalf("Error bootstrapping test: %s", err) + } + if artifact != nil { + defer artifact.Destroy() + } + + tpl, err := template.Parse(strings.NewReader(tarTestCase)) + if err != nil { + t.Fatalf("Unable to parse test config: %s", err) + } + + compressor := PostProcessor{} + compressor.Configure(tpl.PostProcessors[0][0].Config) + artifactOut, _, err := compressor.PostProcess(ui, artifact) + if err != nil { + t.Fatalf("Failed to archive artifact: %s", err) + } + // Cleanup after the test completes + defer artifactOut.Destroy() + + // Verify things look good + _, err = os.Stat("package.zip") + if err != nil { + t.Errorf("Unable to read archive: %s", err) + } +} + +func TestTarArchive(t *testing.T) { + if os.Getenv(env.TestEnvVar) == "" { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", env.TestEnvVar)) + } + + ui, artifact, err := setup(t) + if err != nil { + t.Fatalf("Error bootstrapping test: %s", err) + } + if artifact != nil { + defer artifact.Destroy() + } + + tpl, err := template.Parse(strings.NewReader(tarTestCase)) + if err != nil { + t.Fatalf("Unable to parse test config: %s", err) + } + + compressor := PostProcessor{} + compressor.Configure(tpl.PostProcessors[0][0].Config) + artifactOut, _, err := compressor.PostProcess(ui, artifact) + if err != nil { + t.Fatalf("Failed to archive artifact: %s", err) + } + // Cleanup after the test completes + defer artifactOut.Destroy() + + // Verify things look good + _, err = os.Stat("package.tar") + if err != nil { + t.Errorf("Unable to read archive: %s", err) + } +} + +func TestCompressOptions(t *testing.T) { + if os.Getenv(env.TestEnvVar) == "" { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", env.TestEnvVar)) + } + + ui, artifact, err := setup(t) + if err != nil { + t.Fatalf("Error bootstrapping test: %s", err) + } + if artifact != nil { + defer artifact.Destroy() + } + + tpl, err := template.Parse(strings.NewReader(zipTestCase)) + if err != nil { + t.Fatalf("Unable to parse test config: %s", err) + } + + compressor := PostProcessor{} + compressor.Configure(tpl.PostProcessors[0][0].Config) + artifactOut, _, err := compressor.PostProcess(ui, artifact) + if err != nil { + t.Fatalf("Failed to archive artifact: %s", err) + } + // Cleanup after the test completes + defer artifactOut.Destroy() + + // Verify things look good + _, err = os.Stat("package.gz") + if err != nil { + t.Errorf("Unable to read archive: %s", err) + } +} + const simpleTestCase = ` { "post-processors": [ @@ -93,3 +198,38 @@ const simpleTestCase = ` ] } ` + +const zipTestCase = ` +{ + "post-processors": [ + { + "type": "compress", + "output": "package.zip" + } + ] +} +` + +const tarTestCase = ` +{ + "post-processors": [ + { + "type": "compress", + "output": "package.tar" + } + ] +} +` + +const optionsTestCase = ` +{ + "post-processors": [ + { + "type": "compress", + "output": "package.gz", + "level": 9, + "parallel": false + } + ] +} +` diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index ea3b9c7ac..6f1430e2e 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -15,17 +15,46 @@ archive. ## Configuration -The configuration for this post-processor is extremely simple. +The minimal required configuration is to specify the output file. This will create a gzipped tarball. -* `output` (string) - The path to save the compressed archive. +* `output` (required, string) - The path to save the compressed archive. The archive format is inferred from the filename. E.g. `.tar.gz` will be a gzipped tarball. `.zip` will be a zip file. + + If the extension can't be detected tar+gzip will be used as a fallback. + +If you want more control over how the archive is created you can specify the following settings: + +* `level` (optional, integer) - Specify the compression level, for algorithms that support it. Value from -1 through 9 inclusive. 9 offers the smallest file size, but takes longer +* `keep_input_artifact` (optional, bool) - Keep source files; defaults to false + +## Supported Formats + +Supported file extensions include `.zip`, `.tar`, `.gz`, `.tar.gz`, `.lz4` and `.tar.lz4`. ## Example -An example is shown below, showing only the post-processor configuration: +Some minimal examples are shown below, showing only the post-processor configuration: -```javascript +```json { "type": "compress", - "output": "foo.tar.gz" + "output": "archive.tar.gz" +} +``` + +```json +{ + "type": "compress", + "output": "archive.zip" +} +``` + +A more complex example, again showing only the post-processor configuration: + +```json +{ + "type": "compress", + "output": "archive.gz", + "compression": 9, + "parallel": false } ``` From 6fbf4147cde3fbbd1196698515436b77ea184ce1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 18 Jun 2015 10:12:33 +0200 Subject: [PATCH 468/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 56cbb1eed..f3b9a0aed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -100,6 +100,7 @@ BUG FIXES: * builder/amazon: Improved retry logic around waiting for instances. [GH-1764] * builder/amazon: Fix issues with creating Block Devices. [GH-2195] * builder/amazon/chroot: Retry waiting for disk attachments [GH-2046] + * builder/amazon/chroot: Only unmount path if it is mounted [GH-2054] * builder/amazon/instance: Use `-i` in sudo commands so PATH is inherited. [GH-1930] * builder/amazon/instance: Use `--region` flag for bundle upload command. [GH-1931] * builder/digitalocean: Wait for droplet to unlock before changing state, From 40cb558f2fa598f2e1fa6d094154169fc980a2a9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 18 Jun 2015 10:15:53 +0200 Subject: [PATCH 469/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3b9a0aed..e100ad128 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ IMPROVEMENTS: * builder/amazon: Support custom keypairs [GH-1837] * builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829] * builder/digitalocean: User data support [GH-2113] + * builder/googlecompute: Option to use internal IP for connections. [GH-2152] * builder/parallels: Support Parallels Desktop 11 [GH-2199] * builder/openstack: Add `rackconnect_wait` for Rackspace customers to wait for RackConnect data to appear From d9fceaf39d315fc81572da883dbee56591325eef Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 18 Jun 2015 10:19:46 +0200 Subject: [PATCH 470/956] update CHANGELOG --- CHANGELOG.md | 2 ++ builder/virtualbox/common/step_forward_ssh.go | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e100ad128..985c4d4fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -118,6 +118,7 @@ BUG FIXES: to retrieve the SSH IP from. [GH-2220] * builder/qemu: Add `disk_discard` option [GH-2120] * builder/qemu: Use proper SSH port, not hardcoded to 22. [GH-2236] + * builder/qemu: Find unused SSH port if SSH port is taken. [GH-2032] * builder/virtualbox: Bind HTTP server to IPv4, which is more compatible with OS installers. [GH-1709] * builder/virtualbox: Remove the floppy controller in addition to the @@ -126,6 +127,7 @@ BUG FIXES: ".iso" extension didn't work. [GH-1839] * builder/virtualbox: Output dir is verified at runtime, not template validation time. [GH-2233] + * builder/virtualbox: Find unused SSH port if SSH port is taken. [GH-2032] * builder/vmware: Add 100ms delay between keystrokes to avoid subtle timing issues in most cases. [GH-1663] * builder/vmware: Bind HTTP server to IPv4, which is more compatible with diff --git a/builder/virtualbox/common/step_forward_ssh.go b/builder/virtualbox/common/step_forward_ssh.go index a33efc977..86376c834 100644 --- a/builder/virtualbox/common/step_forward_ssh.go +++ b/builder/virtualbox/common/step_forward_ssh.go @@ -46,10 +46,10 @@ func (s *StepForwardSSH) Run(state multistep.StateBag) multistep.StepAction { } for { - sshHostPort = offset + s.HostPortMin - if sshHostPort >= s.HostPortMax { + sshHostPort = offset + int(s.HostPortMin) + if sshHostPort >= int(s.HostPortMax) { offset = 0 - sshHostPort = s.HostPortMin + sshHostPort = int(s.HostPortMin) } log.Printf("Trying port: %d", sshHostPort) l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", sshHostPort)) From f6660e8a4f9b2433a735e3b36bb3a126da125ae7 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 18 Jun 2015 10:25:47 +0200 Subject: [PATCH 471/956] post-processor/vagrant-cloud: retry uploads [GH-2167] --- post-processor/vagrant-cloud/step_upload.go | 35 ++++++++++++++++++--- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/post-processor/vagrant-cloud/step_upload.go b/post-processor/vagrant-cloud/step_upload.go index f82f125f8..2bada80b8 100644 --- a/post-processor/vagrant-cloud/step_upload.go +++ b/post-processor/vagrant-cloud/step_upload.go @@ -2,6 +2,8 @@ package vagrantcloud import ( "fmt" + "time" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" ) @@ -17,13 +19,38 @@ func (s *stepUpload) Run(state multistep.StateBag) multistep.StepAction { url := upload.UploadPath ui.Say(fmt.Sprintf("Uploading box: %s", artifactFilePath)) + ui.Message( + "Depending on your internet connection and the size of the box,\n" + + "this may take some time") - ui.Message("Depending on your internet connection and the size of the box, this may take some time") + var finalErr error + for i := 0; i < 3; i++ { + if i > 0 { + ui.Message(fmt.Sprintf("Uploading box, attempt %d", i+1)) + } - resp, err := client.Upload(artifactFilePath, url) + resp, err := client.Upload(artifactFilePath, url) + if err != nil { + finalErr = err + ui.Message(fmt.Sprintf( + "Error uploading box! Will retry in 10 seconds. Error: %s", err)) + time.Sleep(10 * time.Second) + continue + } + if resp.StatusCode != 200 { + finalErr = fmt.Errorf("bad HTTP status: %d", resp.StatusCode) + ui.Message(fmt.Sprintf( + "Error uploading box! Will retry in 10 seconds. Status: %d", + resp.StatusCode)) + time.Sleep(10 * time.Second) + continue + } - if err != nil || (resp.StatusCode != 200) { - state.Put("error", fmt.Errorf("Error uploading Box: %s", err)) + finalErr = nil + } + + if finalErr != nil { + state.Put("error", finalErr) return multistep.ActionHalt } From 4463083a605ff98d973af461afd6cca7e628c139 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 18 Jun 2015 10:38:23 +0200 Subject: [PATCH 472/956] provisioner/shell: randomize default script name --- provisioner/shell/provisioner.go | 6 +++--- provisioner/shell/provisioner_test.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index 338092755..c2ae4d938 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -9,6 +9,7 @@ import ( "io" "io/ioutil" "log" + "math/rand" "os" "strings" "time" @@ -19,8 +20,6 @@ import ( "github.com/mitchellh/packer/template/interpolate" ) -const DefaultRemotePath = "/tmp/script.sh" - type Config struct { common.PackerConfig `mapstructure:",squash"` @@ -102,7 +101,8 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } if p.config.RemotePath == "" { - p.config.RemotePath = DefaultRemotePath + p.config.RemotePath = fmt.Sprintf( + "/tmp/script_%d.sh", rand.Intn(9999)) } if p.config.Scripts == nil { diff --git a/provisioner/shell/provisioner_test.go b/provisioner/shell/provisioner_test.go index 54c41c956..165490e2a 100644 --- a/provisioner/shell/provisioner_test.go +++ b/provisioner/shell/provisioner_test.go @@ -30,7 +30,7 @@ func TestProvisionerPrepare_Defaults(t *testing.T) { t.Fatalf("err: %s", err) } - if p.config.RemotePath != DefaultRemotePath { + if p.config.RemotePath == "" { t.Errorf("unexpected remote path: %s", p.config.RemotePath) } } From f7af571cd95718204c286b53588bffd5bd12e634 Mon Sep 17 00:00:00 2001 From: Mikhail Zholobov Date: Thu, 18 Jun 2015 12:04:02 +0300 Subject: [PATCH 473/956] builder/parallels: Add "SetDefaultConfiguration" function This functions applies the default configuration to the virtual machine. Also, it disables some integration features which should not present in the resulted VM image. Functions are different in PD 9 and 10 structs because some additional options appeared only in Parallels Desktop 10 release. --- builder/parallels/common/driver.go | 3 +++ builder/parallels/common/driver_10.go | 24 ++++++++++++++++++++++++ builder/parallels/common/driver_9.go | 19 +++++++++++++++++++ 3 files changed, 46 insertions(+) diff --git a/builder/parallels/common/driver.go b/builder/parallels/common/driver.go index 5c3d93c09..03a4e0f09 100644 --- a/builder/parallels/common/driver.go +++ b/builder/parallels/common/driver.go @@ -44,6 +44,9 @@ type Driver interface { // Send scancodes to the vm using the prltype python script. SendKeyScanCodes(string, ...string) error + // Apply default сonfiguration settings to the virtual machine + SetDefaultConfiguration(string) error + // Finds the MAC address of the NIC nic0 Mac(string) (string, error) diff --git a/builder/parallels/common/driver_10.go b/builder/parallels/common/driver_10.go index 9ab0754de..c37d421dc 100644 --- a/builder/parallels/common/driver_10.go +++ b/builder/parallels/common/driver_10.go @@ -5,3 +5,27 @@ package common type Parallels10Driver struct { Parallels9Driver } + +func (d *Parallels10Driver) SetDefaultConfiguration(vmName string) error { + commands := make([][]string, 12) + commands[0] = []string{"set", vmName, "--cpus", "1"} + commands[1] = []string{"set", vmName, "--memsize", "512"} + commands[2] = []string{"set", vmName, "--startup-view", "same"} + commands[3] = []string{"set", vmName, "--on-shutdown", "close"} + commands[4] = []string{"set", vmName, "--on-window-close", "keep-running"} + commands[5] = []string{"set", vmName, "--auto-share-camera", "off"} + commands[6] = []string{"set", vmName, "--smart-guard", "off"} + commands[7] = []string{"set", vmName, "--shared-cloud", "off"} + commands[8] = []string{"set", vmName, "--shared-profile", "off"} + commands[9] = []string{"set", vmName, "--smart-mount", "off"} + commands[10] = []string{"set", vmName, "--sh-app-guest-to-host", "off"} + commands[11] = []string{"set", vmName, "--sh-app-host-to-guest", "off"} + + for _, command := range commands { + err := d.Prlctl(command...) + if err != nil { + return err + } + } + return nil +} diff --git a/builder/parallels/common/driver_9.go b/builder/parallels/common/driver_9.go index 98d36cc24..c577151dc 100644 --- a/builder/parallels/common/driver_9.go +++ b/builder/parallels/common/driver_9.go @@ -255,6 +255,25 @@ func prepend(head string, tail []string) []string { return tmp } +func (d *Parallels9Driver) SetDefaultConfiguration(vmName string) error { + commands := make([][]string, 7) + commands[0] = []string{"set", vmName, "--cpus", "1"} + commands[1] = []string{"set", vmName, "--memsize", "512"} + commands[2] = []string{"set", vmName, "--startup-view", "same"} + commands[3] = []string{"set", vmName, "--on-shutdown", "close"} + commands[4] = []string{"set", vmName, "--on-window-close", "keep-running"} + commands[5] = []string{"set", vmName, "--auto-share-camera", "off"} + commands[6] = []string{"set", vmName, "--smart-guard", "off"} + + for _, command := range commands { + err := d.Prlctl(command...) + if err != nil { + return err + } + } + return nil +} + func (d *Parallels9Driver) Mac(vmName string) (string, error) { var stdout bytes.Buffer From 2860bfdf82eb84549f7f43619abd998144774593 Mon Sep 17 00:00:00 2001 From: Mikhail Zholobov Date: Thu, 18 Jun 2015 12:06:49 +0300 Subject: [PATCH 474/956] builder/parallels: Apply default settings on the VM creation step. --- builder/parallels/iso/step_create_vm.go | 40 +++++++++++-------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/builder/parallels/iso/step_create_vm.go b/builder/parallels/iso/step_create_vm.go index ebe7effa8..ca8c7c44e 100644 --- a/builder/parallels/iso/step_create_vm.go +++ b/builder/parallels/iso/step_create_vm.go @@ -23,37 +23,33 @@ func (s *stepCreateVM) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) name := config.VMName - commands := make([][]string, 8) - commands[0] = []string{ + command := []string{ "create", name, "--distribution", config.GuestOSType, "--dst", config.OutputDir, "--vmtype", "vm", "--no-hdd", } - commands[1] = []string{"set", name, "--cpus", "1"} - commands[2] = []string{"set", name, "--memsize", "512"} - commands[3] = []string{"set", name, "--startup-view", "same"} - commands[4] = []string{"set", name, "--on-shutdown", "close"} - commands[5] = []string{"set", name, "--on-window-close", "keep-running"} - commands[6] = []string{"set", name, "--auto-share-camera", "off"} - commands[7] = []string{"set", name, "--smart-guard", "off"} ui.Say("Creating virtual machine...") - for _, command := range commands { - err := driver.Prlctl(command...) - ui.Say(fmt.Sprintf("Executing: prlctl %s", command)) - if err != nil { - err := fmt.Errorf("Error creating VM: %s", err) - state.Put("error", err) - ui.Error(err.Error()) - return multistep.ActionHalt - } + if err := driver.Prlctl(command...); err != nil { + err := fmt.Errorf("Error creating VM: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } - // Set the VM name property on the first command - if s.vmName == "" { - s.vmName = name - } + ui.Say("Applying default settings...") + if err := driver.SetDefaultConfiguration(name); err != nil { + err := fmt.Errorf("Error VM configuration: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + // Set the VM name property on the first command + if s.vmName == "" { + s.vmName = name } // Set the final name in the state bag so others can use it From b767aa7f9945b8b7e311d1d367b4200fba2be5c2 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 18 Jun 2015 03:55:51 -0700 Subject: [PATCH 475/956] Change to compression_level, fix and add tests for format detection --- post-processor/compress/post-processor.go | 155 +++++++++--------- .../compress/post-processor_test.go | 52 +++++- .../post-processors/compress.html.markdown | 20 +-- 3 files changed, 137 insertions(+), 90 deletions(-) diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 42cea2d35..5bceae8c8 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -22,27 +22,31 @@ import ( type Config struct { common.PackerConfig `mapstructure:",squash"` OutputPath string `mapstructure:"output"` - Level int `mapstructure:"level"` + CompressionLevel int `mapstructure:"compression_level"` KeepInputArtifact bool `mapstructure:"keep_input_artifact"` Archive string Algorithm string + UsingDefault bool ctx *interpolate.Context } type PostProcessor struct { - config Config + config *Config } -// ErrInvalidCompressionLevel is returned when the compression level passed to -// gzip is not in the expected range. See compress/flate for details. -var ErrInvalidCompressionLevel = fmt.Errorf( - "Invalid compression level. Expected an integer from -1 to 9.") +var ( + // ErrInvalidCompressionLevel is returned when the compression level passed + // to gzip is not in the expected range. See compress/flate for details. + ErrInvalidCompressionLevel = fmt.Errorf( + "Invalid compression level. Expected an integer from -1 to 9.") -var ErrWrongInputCount = fmt.Errorf( - "Can only have 1 input file when not using tar/zip") + ErrWrongInputCount = fmt.Errorf( + "Can only have 1 input file when not using tar/zip") -func detectFromFilename(config *Config) error { - re := regexp.MustCompile("^.+?(?:\\.([a-z0-9]+))?\\.([a-z0-9]+)$") + filenamePattern = regexp.MustCompile(`(?:\.([a-z0-9]+))`) +) + +func (config *Config) detectFromFilename() { extensions := map[string]string{ "tar": "tar", @@ -51,34 +55,47 @@ func detectFromFilename(config *Config) error { "lz4": "lz4", } - result := re.FindAllString(config.OutputPath, -1) + result := filenamePattern.FindAllStringSubmatch(config.OutputPath, -1) + + if len(result) == 0 { + config.Algorithm = "pgzip" + config.Archive = "tar" + return + } // Should we make an archive? E.g. tar or zip? - if result[0] == "tar" { + var nextToLastItem string + if len(result) == 1 { + nextToLastItem = "" + } else { + nextToLastItem = result[len(result)-2][1] + } + + lastItem := result[len(result)-1][1] + if nextToLastItem == "tar" { config.Archive = "tar" } - if result[1] == "zip" || result[1] == "tar" { - config.Archive = result[1] + if lastItem == "zip" || lastItem == "tar" { + config.Archive = lastItem // Tar or zip is our final artifact. Bail out. - return nil + return } // Should we compress the artifact? - algorithm, ok := extensions[result[1]] + algorithm, ok := extensions[lastItem] if ok { config.Algorithm = algorithm - // We found our compression algorithm something. Bail out. - return nil + // We found our compression algorithm. Bail out. + return } // We didn't find anything. Default to tar + pgzip config.Algorithm = "pgzip" config.Archive = "tar" - return fmt.Errorf("Unable to detect compression algorithm") + return } func (p *PostProcessor) Configure(raws ...interface{}) error { - p.config.Level = -1 err := config.Decode(&p.config, &config.DecodeOpts{ Interpolate: true, InterpolateFilter: &interpolate.RenderFilter{ @@ -86,6 +103,8 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { }, }, raws...) + fmt.Printf("CompressionLevel: %d\n", p.config.CompressionLevel) + errs := new(packer.MultiError) if p.config.OutputPath == "" { @@ -101,13 +120,17 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { "output": &p.config.OutputPath, } - if p.config.Level > gzip.BestCompression { - p.config.Level = gzip.BestCompression + if p.config.CompressionLevel > pgzip.BestCompression { + p.config.CompressionLevel = pgzip.BestCompression } - if p.config.Level == -1 { - p.config.Level = gzip.DefaultCompression + // Technically 0 means "don't compress" but I don't know how to + // differentiate between "user entered zero" and "user entered nothing". + // Also, why bother creating a compressed file with zero compression? + if p.config.CompressionLevel == -1 || p.config.CompressionLevel == 0 { + p.config.CompressionLevel = pgzip.DefaultCompression } + fmt.Printf("CompressionLevel: %d\n", p.config.CompressionLevel) for key, ptr := range templates { if *ptr == "" { errs = packer.MultiErrorAppend( @@ -121,6 +144,8 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } } + p.config.detectFromFilename() + if len(errs.Errors) > 0 { return errs } @@ -131,12 +156,13 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { - newArtifact := &Artifact{Path: p.config.OutputPath} + target := p.config.OutputPath + newArtifact := &Artifact{Path: target} - outputFile, err := os.Create(p.config.OutputPath) + outputFile, err := os.Create(target) if err != nil { return nil, false, fmt.Errorf( - "Unable to create archive %s: %s", p.config.OutputPath, err) + "Unable to create archive %s: %s", target, err) } defer outputFile.Close() @@ -145,31 +171,44 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac var output io.WriteCloser switch p.config.Algorithm { case "lz4": + ui.Say(fmt.Sprintf("Preparing lz4 compression for %s", target)) lzwriter := lz4.NewWriter(outputFile) - if p.config.Level > gzip.DefaultCompression { + if p.config.CompressionLevel > gzip.DefaultCompression { lzwriter.Header.HighCompression = true } defer lzwriter.Close() output = lzwriter case "pgzip": - output, err = pgzip.NewWriterLevel(outputFile, p.config.Level) + ui.Say(fmt.Sprintf("Preparing gzip compression for %s", target)) + gzipWriter, err := pgzip.NewWriterLevel(outputFile, p.config.CompressionLevel) if err != nil { return nil, false, ErrInvalidCompressionLevel } + gzipWriter.SetConcurrency(500000, runtime.GOMAXPROCS(-1)) + output = gzipWriter defer output.Close() default: output = outputFile } - //Archive + compression := p.config.Algorithm + if compression == "" { + compression = "no" + } + + // Build an archive, if we're supposed to do that. switch p.config.Archive { case "tar": - archiveTar(artifact.Files(), output) + ui.Say(fmt.Sprintf("Taring %s with %s compression", target, compression)) + createTarArchive(artifact.Files(), output) case "zip": + ui.Say(fmt.Sprintf("Zipping %s", target)) archive := zip.NewWriter(output) defer archive.Close() default: - // We have a regular file, so we'll just do an io.Copy + ui.Say(fmt.Sprintf("Copying %s with %s compression", target, compression)) + // Filename indicates no tarball (just compress) so we'll do an io.Copy + // into our compressor. if len(artifact.Files()) != 1 { return nil, false, fmt.Errorf( "Can only have 1 input file when not using tar/zip. Found %d "+ @@ -185,10 +224,12 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac io.Copy(output, source) } + ui.Say(fmt.Sprintf("Archive %s completed", target)) + return newArtifact, p.config.KeepInputArtifact, nil } -func archiveTar(files []string, output io.WriteCloser) error { +func createTarArchive(files []string, output io.WriteCloser) error { archive := tar.NewWriter(output) defer archive.Close() @@ -225,44 +266,8 @@ func archiveTar(files []string, output io.WriteCloser) error { return nil } -func (p *PostProcessor) cmpTAR(files []string, target string) ([]string, error) { - fw, err := os.Create(target) - if err != nil { - return nil, fmt.Errorf("tar error creating tar %s: %s", target, err) - } - defer fw.Close() - - tw := tar.NewWriter(fw) - defer tw.Close() - - for _, name := range files { - fi, err := os.Stat(name) - if err != nil { - return nil, fmt.Errorf("tar error on stat of %s: %s", name, err) - } - - target, _ := os.Readlink(name) - header, err := tar.FileInfoHeader(fi, target) - if err != nil { - return nil, fmt.Errorf("tar error reading info for %s: %s", name, err) - } - - if err = tw.WriteHeader(header); err != nil { - return nil, fmt.Errorf("tar error writing header for %s: %s", name, err) - } - - fr, err := os.Open(name) - if err != nil { - return nil, fmt.Errorf("tar error opening file %s: %s", name, err) - } - - if _, err = io.Copy(tw, fr); err != nil { - fr.Close() - return nil, fmt.Errorf("tar error copying contents of %s: %s", name, err) - } - fr.Close() - } - return []string{target}, nil +func createZipArchive(files []string, output io.WriteCloser) error { + return fmt.Errorf("Not implemented") } func (p *PostProcessor) cmpGZIP(files []string, target string) ([]string, error) { @@ -273,7 +278,7 @@ func (p *PostProcessor) cmpGZIP(files []string, target string) ([]string, error) if err != nil { return nil, fmt.Errorf("gzip error creating archive: %s", err) } - cw, err := gzip.NewWriterLevel(fw, p.config.Level) + cw, err := gzip.NewWriterLevel(fw, p.config.CompressionLevel) if err != nil { fw.Close() return nil, fmt.Errorf("gzip error: %s", err) @@ -306,8 +311,8 @@ func (p *PostProcessor) cmpPGZIP(files []string, target string) ([]string, error if err != nil { return nil, fmt.Errorf("pgzip error: %s", err) } - cw, err := pgzip.NewWriterLevel(fw, p.config.Level) - cw.SetConcurrency(500000, runtime.GOMAXPROCS(-1)) + cw, err := pgzip.NewWriterLevel(fw, p.config.CompressionLevel) + if err != nil { fw.Close() return nil, fmt.Errorf("pgzip error: %s", err) @@ -345,7 +350,7 @@ func (p *PostProcessor) cmpLZ4(src []string, dst string) ([]string, error) { fw.Close() return nil, fmt.Errorf("lz4 error: %s", err) } - if p.config.Level > gzip.DefaultCompression { + if p.config.CompressionLevel > gzip.DefaultCompression { cw.Header.HighCompression = true } fr, err := os.Open(name) diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index 6d28a6698..f60a7846a 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -45,6 +45,48 @@ func setup(t *testing.T) (packer.Ui, packer.Artifact, error) { return ui, artifact, err } +func TestDetectFilename(t *testing.T) { + // Test default / fallback with no file extension + nakedFilename := Config{OutputPath: "test"} + nakedFilename.detectFromFilename() + if nakedFilename.Archive != "tar" { + t.Error("Expected to find tar archive setting") + } + if nakedFilename.Algorithm != "pgzip" { + t.Error("Expected to find pgzip algorithm setting") + } + + // Test .archive + zipFilename := Config{OutputPath: "test.zip"} + zipFilename.detectFromFilename() + if zipFilename.Archive != "zip" { + t.Error("Expected to find zip archive setting") + } + if zipFilename.Algorithm != "" { + t.Error("Expected to find empty algorithm setting") + } + + // Test .compress + lz4Filename := Config{OutputPath: "test.lz4"} + lz4Filename.detectFromFilename() + if lz4Filename.Archive != "" { + t.Error("Expected to find empty archive setting") + } + if lz4Filename.Algorithm != "lz4" { + t.Error("Expected to find lz4 algorithm setting") + } + + // Test .archive.compress with some.extra.dots... + lotsOfDots := Config{OutputPath: "test.blah.bloo.blee.tar.lz4"} + lotsOfDots.detectFromFilename() + if lotsOfDots.Archive != "tar" { + t.Error("Expected to find tar archive setting") + } + if lotsOfDots.Algorithm != "lz4" { + t.Error("Expected to find lz4 algorithm setting") + } +} + func TestSimpleCompress(t *testing.T) { if os.Getenv(env.TestEnvVar) == "" { t.Skip(fmt.Sprintf( @@ -167,13 +209,18 @@ func TestCompressOptions(t *testing.T) { defer artifact.Destroy() } - tpl, err := template.Parse(strings.NewReader(zipTestCase)) + tpl, err := template.Parse(strings.NewReader(optionsTestCase)) if err != nil { t.Fatalf("Unable to parse test config: %s", err) } compressor := PostProcessor{} compressor.Configure(tpl.PostProcessors[0][0].Config) + + if compressor.config.CompressionLevel != 9 { + t.Errorf("Expected compression_level 9, got %d", compressor.config.CompressionLevel) + } + artifactOut, _, err := compressor.PostProcess(ui, artifact) if err != nil { t.Fatalf("Failed to archive artifact: %s", err) @@ -227,8 +274,7 @@ const optionsTestCase = ` { "type": "compress", "output": "package.gz", - "level": 9, - "parallel": false + "compression_level": 9 } ] } diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index 6f1430e2e..c5a05a937 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -10,25 +10,24 @@ description: |- Type: `compress` The Packer compress post-processor takes an artifact with files (such as from -VMware or VirtualBox) and gzip compresses the artifact into a single -archive. +VMware or VirtualBox) and compresses the artifact into a single archive. ## Configuration -The minimal required configuration is to specify the output file. This will create a gzipped tarball. +You must specify the output filename. The archive format is derived from the filename. -* `output` (required, string) - The path to save the compressed archive. The archive format is inferred from the filename. E.g. `.tar.gz` will be a gzipped tarball. `.zip` will be a zip file. +* `output` (required, string) - The path to save the compressed archive. The archive format is inferred from the filename. E.g. `.tar.gz` will be a gzipped tarball. `.zip` will be a zip file. If the extension can't be detected packer defaults to `.tar.gz` behavior but will not change the filename. - If the extension can't be detected tar+gzip will be used as a fallback. + If you are executing multiple builders in parallel you should make sure `output` is unique for each one. For example `packer_{{.BuildName}}_{{.Provider}}.zip`. If you want more control over how the archive is created you can specify the following settings: -* `level` (optional, integer) - Specify the compression level, for algorithms that support it. Value from -1 through 9 inclusive. 9 offers the smallest file size, but takes longer +* `compression_level` (optional, integer) - Specify the compression level, for algorithms that support it, from 1 through 9 inclusive. Typically higher compression levels take longer but produce smaller files. Default if omitted is 6 * `keep_input_artifact` (optional, bool) - Keep source files; defaults to false ## Supported Formats -Supported file extensions include `.zip`, `.tar`, `.gz`, `.tar.gz`, `.lz4` and `.tar.lz4`. +Supported file extensions include `.zip`, `.tar`, `.gz`, `.tar.gz`, `.lz4` and `.tar.lz4`. Note that `.gz` and `.lz4` will fail if you have multiple files to compress. ## Example @@ -37,7 +36,7 @@ Some minimal examples are shown below, showing only the post-processor configura ```json { "type": "compress", - "output": "archive.tar.gz" + "output": "archive.tar.lz4" } ``` @@ -48,13 +47,10 @@ Some minimal examples are shown below, showing only the post-processor configura } ``` -A more complex example, again showing only the post-processor configuration: - ```json { "type": "compress", "output": "archive.gz", - "compression": 9, - "parallel": false + "compression": 9 } ``` From 0a53fbc29da151bdb09429c32ddf365ad9c7dc04 Mon Sep 17 00:00:00 2001 From: Mikhail Zholobov Date: Thu, 18 Jun 2015 14:36:32 +0300 Subject: [PATCH 476/956] builder/parallels: Add mock for "SetDefaultConfiguration" method Fixes unit test failures --- builder/parallels/common/driver_mock.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/builder/parallels/common/driver_mock.go b/builder/parallels/common/driver_mock.go index 25e4ea182..5629a6db9 100644 --- a/builder/parallels/common/driver_mock.go +++ b/builder/parallels/common/driver_mock.go @@ -37,6 +37,9 @@ type DriverMock struct { SendKeyScanCodesCalls [][]string SendKeyScanCodesErrs []error + SetDefaultConfigurationCalled bool + SetDefaultConfigurationError error + ToolsIsoPathCalled bool ToolsIsoPathFlavor string ToolsIsoPathResult string @@ -107,6 +110,11 @@ func (d *DriverMock) SendKeyScanCodes(name string, scancodes ...string) error { return nil } +func (d *DriverMock) SetDefaultConfiguration(name string) error { + d.SetDefaultConfigurationCalled = true + return d.SetDefaultConfigurationError +} + func (d *DriverMock) Mac(name string) (string, error) { d.MacName = name return d.MacReturn, d.MacError From d8f78d9174bad365d4cd8aead5de23e748b9274a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 18 Jun 2015 04:41:05 -0700 Subject: [PATCH 477/956] Cleanup --- post-processor/compress/post-processor.go | 308 ++++++++-------------- 1 file changed, 106 insertions(+), 202 deletions(-) diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 5bceae8c8..0ecc7db86 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -46,55 +46,6 @@ var ( filenamePattern = regexp.MustCompile(`(?:\.([a-z0-9]+))`) ) -func (config *Config) detectFromFilename() { - - extensions := map[string]string{ - "tar": "tar", - "zip": "zip", - "gz": "pgzip", - "lz4": "lz4", - } - - result := filenamePattern.FindAllStringSubmatch(config.OutputPath, -1) - - if len(result) == 0 { - config.Algorithm = "pgzip" - config.Archive = "tar" - return - } - - // Should we make an archive? E.g. tar or zip? - var nextToLastItem string - if len(result) == 1 { - nextToLastItem = "" - } else { - nextToLastItem = result[len(result)-2][1] - } - - lastItem := result[len(result)-1][1] - if nextToLastItem == "tar" { - config.Archive = "tar" - } - if lastItem == "zip" || lastItem == "tar" { - config.Archive = lastItem - // Tar or zip is our final artifact. Bail out. - return - } - - // Should we compress the artifact? - algorithm, ok := extensions[lastItem] - if ok { - config.Algorithm = algorithm - // We found our compression algorithm. Bail out. - return - } - - // We didn't find anything. Default to tar + pgzip - config.Algorithm = "pgzip" - config.Archive = "tar" - return -} - func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ Interpolate: true, @@ -157,6 +108,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { target := p.config.OutputPath + keep := p.config.KeepInputArtifact newArtifact := &Artifact{Path: target} outputFile, err := os.Create(target) @@ -172,20 +124,11 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac switch p.config.Algorithm { case "lz4": ui.Say(fmt.Sprintf("Preparing lz4 compression for %s", target)) - lzwriter := lz4.NewWriter(outputFile) - if p.config.CompressionLevel > gzip.DefaultCompression { - lzwriter.Header.HighCompression = true - } - defer lzwriter.Close() - output = lzwriter + output, err = makeLZ4Writer(outputFile, p.config.CompressionLevel) + defer output.Close() case "pgzip": ui.Say(fmt.Sprintf("Preparing gzip compression for %s", target)) - gzipWriter, err := pgzip.NewWriterLevel(outputFile, p.config.CompressionLevel) - if err != nil { - return nil, false, ErrInvalidCompressionLevel - } - gzipWriter.SetConcurrency(500000, runtime.GOMAXPROCS(-1)) - output = gzipWriter + output, err = makePgzipWriter(outputFile, p.config.CompressionLevel) defer output.Close() default: output = outputFile @@ -199,34 +142,112 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac // Build an archive, if we're supposed to do that. switch p.config.Archive { case "tar": - ui.Say(fmt.Sprintf("Taring %s with %s compression", target, compression)) - createTarArchive(artifact.Files(), output) + ui.Say(fmt.Sprintf("Tarring %s with %s compression", target, compression)) + err = createTarArchive(artifact.Files(), output) + if err != nil { + return nil, keep, fmt.Errorf("Error creating tar: %s", err) + } case "zip": ui.Say(fmt.Sprintf("Zipping %s", target)) - archive := zip.NewWriter(output) - defer archive.Close() + err = createZipArchive(artifact.Files(), output) + if err != nil { + return nil, keep, fmt.Errorf("Error creating zip: %s", err) + } default: ui.Say(fmt.Sprintf("Copying %s with %s compression", target, compression)) // Filename indicates no tarball (just compress) so we'll do an io.Copy // into our compressor. if len(artifact.Files()) != 1 { - return nil, false, fmt.Errorf( + return nil, keep, fmt.Errorf( "Can only have 1 input file when not using tar/zip. Found %d "+ "files: %v", len(artifact.Files()), artifact.Files()) } + source, err := os.Open(artifact.Files()[0]) if err != nil { - return nil, false, fmt.Errorf( + return nil, keep, fmt.Errorf( "Failed to open source file %s for reading: %s", artifact.Files()[0], err) } defer source.Close() - io.Copy(output, source) + + if _, err = io.Copy(output, source); err != nil { + return nil, keep, fmt.Errorf("Failed to compress %s: %s", + artifact.Files()[0], err) + } } ui.Say(fmt.Sprintf("Archive %s completed", target)) - return newArtifact, p.config.KeepInputArtifact, nil + return newArtifact, keep, nil +} + +func (config *Config) detectFromFilename() { + + extensions := map[string]string{ + "tar": "tar", + "zip": "zip", + "gz": "pgzip", + "lz4": "lz4", + } + + result := filenamePattern.FindAllStringSubmatch(config.OutputPath, -1) + + // No dots. Bail out with defaults. + if len(result) == 0 { + config.Algorithm = "pgzip" + config.Archive = "tar" + return + } + + // Parse the last two .groups, if they're there + lastItem := result[len(result)-1][1] + var nextToLastItem string + if len(result) == 1 { + nextToLastItem = "" + } else { + nextToLastItem = result[len(result)-2][1] + } + + // Should we make an archive? E.g. tar or zip? + if nextToLastItem == "tar" { + config.Archive = "tar" + } + if lastItem == "zip" || lastItem == "tar" { + config.Archive = lastItem + // Tar or zip is our final artifact. Bail out. + return + } + + // Should we compress the artifact? + algorithm, ok := extensions[lastItem] + if ok { + config.Algorithm = algorithm + // We found our compression algorithm. Bail out. + return + } + + // We didn't match a known compression format. Default to tar + pgzip + config.Algorithm = "pgzip" + config.Archive = "tar" + return +} + +func makeLZ4Writer(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) { + lzwriter := lz4.NewWriter(output) + if compressionLevel > gzip.DefaultCompression { + lzwriter.Header.HighCompression = true + } + return lzwriter, nil +} + +func makePgzipWriter(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) { + gzipWriter, err := pgzip.NewWriterLevel(output, compressionLevel) + if err != nil { + return nil, ErrInvalidCompressionLevel + } + gzipWriter.SetConcurrency(500000, runtime.GOMAXPROCS(-1)) + return gzipWriter, nil } func createTarArchive(files []string, output io.WriteCloser) error { @@ -245,12 +266,7 @@ func createTarArchive(files []string, output io.WriteCloser) error { return fmt.Errorf("Unable to get fileinfo for %s: %s", path, err) } - target, err := os.Readlink(path) - if err != nil { - return fmt.Errorf("Failed to readlink for %s: %s", path, err) - } - - header, err := tar.FileInfoHeader(fi, target) + header, err := tar.FileInfoHeader(fi, path) if err != nil { return fmt.Errorf("Failed to create tar header for %s: %s", path, err) } @@ -267,139 +283,27 @@ func createTarArchive(files []string, output io.WriteCloser) error { } func createZipArchive(files []string, output io.WriteCloser) error { - return fmt.Errorf("Not implemented") -} + archive := zip.NewWriter(output) + defer archive.Close() -func (p *PostProcessor) cmpGZIP(files []string, target string) ([]string, error) { - var res []string - for _, name := range files { - filename := filepath.Join(target, filepath.Base(name)) - fw, err := os.Create(filename) + for _, path := range files { + path = filepath.ToSlash(path) + + source, err := os.Open(path) if err != nil { - return nil, fmt.Errorf("gzip error creating archive: %s", err) + return fmt.Errorf("Unable to read file %s: %s", path, err) } - cw, err := gzip.NewWriterLevel(fw, p.config.CompressionLevel) + defer source.Close() + + target, err := archive.Create(path) if err != nil { - fw.Close() - return nil, fmt.Errorf("gzip error: %s", err) + return fmt.Errorf("Failed to add zip header for %s: %s", path, err) } - fr, err := os.Open(name) + + _, err = io.Copy(target, source) if err != nil { - cw.Close() - fw.Close() - return nil, fmt.Errorf("gzip error: %s", err) + return fmt.Errorf("Failed to copy %s data to archive: %s", path, err) } - if _, err = io.Copy(cw, fr); err != nil { - cw.Close() - fr.Close() - fw.Close() - return nil, fmt.Errorf("gzip error: %s", err) - } - cw.Close() - fr.Close() - fw.Close() - res = append(res, filename) } - return res, nil -} - -func (p *PostProcessor) cmpPGZIP(files []string, target string) ([]string, error) { - var res []string - for _, name := range files { - filename := filepath.Join(target, filepath.Base(name)) - fw, err := os.Create(filename) - if err != nil { - return nil, fmt.Errorf("pgzip error: %s", err) - } - cw, err := pgzip.NewWriterLevel(fw, p.config.CompressionLevel) - - if err != nil { - fw.Close() - return nil, fmt.Errorf("pgzip error: %s", err) - } - fr, err := os.Open(name) - if err != nil { - cw.Close() - fw.Close() - return nil, fmt.Errorf("pgzip error: %s", err) - } - if _, err = io.Copy(cw, fr); err != nil { - cw.Close() - fr.Close() - fw.Close() - return nil, fmt.Errorf("pgzip error: %s", err) - } - cw.Close() - fr.Close() - fw.Close() - res = append(res, filename) - } - return res, nil -} - -func (p *PostProcessor) cmpLZ4(src []string, dst string) ([]string, error) { - var res []string - for _, name := range src { - filename := filepath.Join(dst, filepath.Base(name)) - fw, err := os.Create(filename) - if err != nil { - return nil, fmt.Errorf("lz4 error: %s", err) - } - cw := lz4.NewWriter(fw) - if err != nil { - fw.Close() - return nil, fmt.Errorf("lz4 error: %s", err) - } - if p.config.CompressionLevel > gzip.DefaultCompression { - cw.Header.HighCompression = true - } - fr, err := os.Open(name) - if err != nil { - cw.Close() - fw.Close() - return nil, fmt.Errorf("lz4 error: %s", err) - } - if _, err = io.Copy(cw, fr); err != nil { - cw.Close() - fr.Close() - fw.Close() - return nil, fmt.Errorf("lz4 error: %s", err) - } - cw.Close() - fr.Close() - fw.Close() - res = append(res, filename) - } - return res, nil -} - -func (p *PostProcessor) cmpZIP(src []string, dst string) ([]string, error) { - fw, err := os.Create(dst) - if err != nil { - return nil, fmt.Errorf("zip error: %s", err) - } - defer fw.Close() - - zw := zip.NewWriter(fw) - defer zw.Close() - - for _, name := range src { - header, err := zw.Create(name) - if err != nil { - return nil, fmt.Errorf("zip error: %s", err) - } - - fr, err := os.Open(name) - if err != nil { - return nil, fmt.Errorf("zip error: %s", err) - } - - if _, err = io.Copy(header, fr); err != nil { - fr.Close() - return nil, fmt.Errorf("zip error: %s", err) - } - fr.Close() - } - return []string{dst}, nil - + return nil } From e81378ac393af8e08316d6d6446357eed37248b5 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 18 Jun 2015 04:46:39 -0700 Subject: [PATCH 478/956] Fix test case --- post-processor/compress/post-processor_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index f60a7846a..f6e82b1c9 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -139,7 +139,7 @@ func TestZipArchive(t *testing.T) { defer artifact.Destroy() } - tpl, err := template.Parse(strings.NewReader(tarTestCase)) + tpl, err := template.Parse(strings.NewReader(zipTestCase)) if err != nil { t.Fatalf("Unable to parse test config: %s", err) } From 9cd572461d1ed7ca8f2bc3d80b10f18b221b1965 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 18 Jun 2015 05:13:48 -0700 Subject: [PATCH 479/956] Updated docs, fix artifact bug --- post-processor/compress/artifact.go | 2 +- .../post-processors/compress.html.markdown | 25 +++++++++++++------ 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/post-processor/compress/artifact.go b/post-processor/compress/artifact.go index cfc914a55..56a5ce402 100644 --- a/post-processor/compress/artifact.go +++ b/post-processor/compress/artifact.go @@ -29,7 +29,7 @@ func (*Artifact) Id() string { } func (a *Artifact) Files() []string { - return a.files + return []string{a.Path} } func (a *Artifact) String() string { diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index c5a05a937..8fcd81ee3 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -2,7 +2,7 @@ layout: "docs" page_title: "compress Post-Processor" description: |- - The Packer compress post-processor takes an artifact with files (such as from VMware or VirtualBox) and gzip compresses the artifact into a single archive. + The Packer compress post-processor takes an artifact with files (such as from VMware or VirtualBox) and compresses the artifact into a single archive. --- # Compress Post-Processor @@ -14,22 +14,33 @@ VMware or VirtualBox) and compresses the artifact into a single archive. ## Configuration +### Required: + You must specify the output filename. The archive format is derived from the filename. -* `output` (required, string) - The path to save the compressed archive. The archive format is inferred from the filename. E.g. `.tar.gz` will be a gzipped tarball. `.zip` will be a zip file. If the extension can't be detected packer defaults to `.tar.gz` behavior but will not change the filename. +* `output` (string) - The path to save the compressed archive. The archive + format is inferred from the filename. E.g. `.tar.gz` will be a gzipped + tarball. `.zip` will be a zip file. If the extension can't be detected packer + defaults to `.tar.gz` behavior but will not change the filename. - If you are executing multiple builders in parallel you should make sure `output` is unique for each one. For example `packer_{{.BuildName}}_{{.Provider}}.zip`. + If you are executing multiple builders in parallel you should make sure + `output` is unique for each one. For example `packer_{{.BuildName}}_{{.Provider}}.zip`. + +### Optional: If you want more control over how the archive is created you can specify the following settings: -* `compression_level` (optional, integer) - Specify the compression level, for algorithms that support it, from 1 through 9 inclusive. Typically higher compression levels take longer but produce smaller files. Default if omitted is 6 -* `keep_input_artifact` (optional, bool) - Keep source files; defaults to false +* `compression_level` (integer) - Specify the compression level, for algorithms + that support it, from 1 through 9 inclusive. Typically higher compression + levels take longer but produce smaller files. Defaults to `6` -## Supported Formats +* `keep_input_artifact` (bool) - Keep source files; defaults to `false` + +### Supported Formats Supported file extensions include `.zip`, `.tar`, `.gz`, `.tar.gz`, `.lz4` and `.tar.lz4`. Note that `.gz` and `.lz4` will fail if you have multiple files to compress. -## Example +## Examples Some minimal examples are shown below, showing only the post-processor configuration: From 301bd8ceb27ccf8650ecaa31f1f22aab9725f503 Mon Sep 17 00:00:00 2001 From: Joel Scoble Date: Thu, 18 Jun 2015 14:12:28 -0500 Subject: [PATCH 480/956] update the documented datatype for facter to 'object of key/value strings' for both puppet provisioners --- .../source/docs/provisioners/puppet-masterless.html.markdown | 2 +- website/source/docs/provisioners/puppet-server.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index 08da2c20e..6ba570add 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -50,7 +50,7 @@ Optional parameters: various [configuration template variables](/docs/templates/configuration-templates.html) available. See below for more information. -* `facter` (object, string keys and values) - Additional +* `facter` (object of key/value strings) - Additional [facts](http://puppetlabs.com/puppet/related-projects/facter) to make available when Puppet is running. diff --git a/website/source/docs/provisioners/puppet-server.html.markdown b/website/source/docs/provisioners/puppet-server.html.markdown index f60e88f50..ecc33f36c 100644 --- a/website/source/docs/provisioners/puppet-server.html.markdown +++ b/website/source/docs/provisioners/puppet-server.html.markdown @@ -48,7 +48,7 @@ required. They are listed below: the node on your disk. This defaults to nothing, in which case a client private key won't be uploaded. -* `facter` (hash) - Additional Facter facts to make available to the +* `facter` (object of key/value strings) - Additional Facter facts to make available to the Puppet run. * `options` (string) - Additional command line options to pass From 0880d448f06fca9338d26b35a1c183bf1fe13d76 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 18 Jun 2015 14:01:00 -0700 Subject: [PATCH 481/956] Cleanup some debug code and reorganize config struct --- post-processor/compress/post-processor.go | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 0ecc7db86..769f21fb2 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -21,13 +21,17 @@ import ( type Config struct { common.PackerConfig `mapstructure:",squash"` - OutputPath string `mapstructure:"output"` - CompressionLevel int `mapstructure:"compression_level"` - KeepInputArtifact bool `mapstructure:"keep_input_artifact"` - Archive string - Algorithm string - UsingDefault bool - ctx *interpolate.Context + + // Fields from config file + OutputPath string `mapstructure:"output"` + CompressionLevel int `mapstructure:"compression_level"` + KeepInputArtifact bool `mapstructure:"keep_input_artifact"` + + // Derived fields + Archive string + Algorithm string + + ctx *interpolate.Context } type PostProcessor struct { @@ -54,8 +58,6 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { }, }, raws...) - fmt.Printf("CompressionLevel: %d\n", p.config.CompressionLevel) - errs := new(packer.MultiError) if p.config.OutputPath == "" { @@ -81,7 +83,6 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { p.config.CompressionLevel = pgzip.DefaultCompression } - fmt.Printf("CompressionLevel: %d\n", p.config.CompressionLevel) for key, ptr := range templates { if *ptr == "" { errs = packer.MultiErrorAppend( From 7497db67b4d2c652759024e5133501c3cbd0c6b0 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 18 Jun 2015 14:22:26 -0700 Subject: [PATCH 482/956] Tweaked some of the UI/UX around GOMAXPROCS --- post-processor/compress/post-processor.go | 24 +++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 769f21fb2..1a4b97595 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -60,6 +60,11 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { errs := new(packer.MultiError) + // If there is no explicit number of Go threads to use, then set it + if os.Getenv("GOMAXPROCS") == "" { + runtime.GOMAXPROCS(runtime.NumCPU()) + } + if p.config.OutputPath == "" { p.config.OutputPath = "packer_{{.BuildName}}_{{.Provider}}" } @@ -124,11 +129,13 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac var output io.WriteCloser switch p.config.Algorithm { case "lz4": - ui.Say(fmt.Sprintf("Preparing lz4 compression for %s", target)) + ui.Say(fmt.Sprintf("Using lz4 compression with %d cores for %s", + runtime.GOMAXPROCS(-1), target)) output, err = makeLZ4Writer(outputFile, p.config.CompressionLevel) defer output.Close() case "pgzip": - ui.Say(fmt.Sprintf("Preparing gzip compression for %s", target)) + ui.Say(fmt.Sprintf("Using pgzip compression with %d cores for %s", + runtime.GOMAXPROCS(-1), target)) output, err = makePgzipWriter(outputFile, p.config.CompressionLevel) defer output.Close() default: @@ -137,13 +144,13 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac compression := p.config.Algorithm if compression == "" { - compression = "no" + compression = "no compression" } // Build an archive, if we're supposed to do that. switch p.config.Archive { case "tar": - ui.Say(fmt.Sprintf("Tarring %s with %s compression", target, compression)) + ui.Say(fmt.Sprintf("Tarring %s with %s", target, compression)) err = createTarArchive(artifact.Files(), output) if err != nil { return nil, keep, fmt.Errorf("Error creating tar: %s", err) @@ -155,7 +162,6 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac return nil, keep, fmt.Errorf("Error creating zip: %s", err) } default: - ui.Say(fmt.Sprintf("Copying %s with %s compression", target, compression)) // Filename indicates no tarball (just compress) so we'll do an io.Copy // into our compressor. if len(artifact.Files()) != 1 { @@ -163,18 +169,20 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac "Can only have 1 input file when not using tar/zip. Found %d "+ "files: %v", len(artifact.Files()), artifact.Files()) } + archiveFile := artifact.Files()[0] + ui.Say(fmt.Sprintf("Archiving %s with %s", archiveFile, compression)) - source, err := os.Open(artifact.Files()[0]) + source, err := os.Open(archiveFile) if err != nil { return nil, keep, fmt.Errorf( "Failed to open source file %s for reading: %s", - artifact.Files()[0], err) + archiveFile, err) } defer source.Close() if _, err = io.Copy(output, source); err != nil { return nil, keep, fmt.Errorf("Failed to compress %s: %s", - artifact.Files()[0], err) + archiveFile, err) } } From d02f6644d2407a64c9a6cb3b519922363c45bc96 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 18 Jun 2015 18:21:03 -0700 Subject: [PATCH 483/956] Refactored test so it's more DRY and also rearranged things so the test cases and configs are at the top of the file --- .../compress/post-processor_test.go | 291 +++++++----------- 1 file changed, 110 insertions(+), 181 deletions(-) diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index f6e82b1c9..31525adf7 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -12,39 +12,6 @@ import ( "github.com/mitchellh/packer/template" ) -func setup(t *testing.T) (packer.Ui, packer.Artifact, error) { - // Create fake UI and Cache - ui := packer.TestUi(t) - cache := &packer.FileCache{CacheDir: os.TempDir()} - - // Create config for file builder - const fileConfig = `{"builders":[{"type":"file","target":"package.txt","content":"Hello world!"}]}` - tpl, err := template.Parse(strings.NewReader(fileConfig)) - if err != nil { - return nil, nil, fmt.Errorf("Unable to parse setup configuration: %s", err) - } - - // Prepare the file builder - builder := file.Builder{} - warnings, err := builder.Prepare(tpl.Builders["file"].Config) - if len(warnings) > 0 { - for _, warn := range warnings { - return nil, nil, fmt.Errorf("Configuration warning: %s", warn) - } - } - if err != nil { - return nil, nil, fmt.Errorf("Invalid configuration: %s", err) - } - - // Run the file builder - artifact, err := builder.Run(ui, nil, cache) - if err != nil { - return nil, nil, fmt.Errorf("Failed to build artifact: %s", err) - } - - return ui, artifact, err -} - func TestDetectFilename(t *testing.T) { // Test default / fallback with no file extension nakedFilename := Config{OutputPath: "test"} @@ -87,154 +54,6 @@ func TestDetectFilename(t *testing.T) { } } -func TestSimpleCompress(t *testing.T) { - if os.Getenv(env.TestEnvVar) == "" { - t.Skip(fmt.Sprintf( - "Acceptance tests skipped unless env '%s' set", env.TestEnvVar)) - } - - ui, artifact, err := setup(t) - if err != nil { - t.Fatalf("Error bootstrapping test: %s", err) - } - if artifact != nil { - defer artifact.Destroy() - } - - tpl, err := template.Parse(strings.NewReader(simpleTestCase)) - if err != nil { - t.Fatalf("Unable to parse test config: %s", err) - } - - compressor := PostProcessor{} - compressor.Configure(tpl.PostProcessors[0][0].Config) - artifactOut, _, err := compressor.PostProcess(ui, artifact) - if err != nil { - t.Fatalf("Failed to compress artifact: %s", err) - } - // Cleanup after the test completes - defer artifactOut.Destroy() - - // Verify things look good - fi, err := os.Stat("package.tar.gz") - if err != nil { - t.Errorf("Unable to read archive: %s", err) - } - if fi.IsDir() { - t.Error("Archive should not be a directory") - } -} - -func TestZipArchive(t *testing.T) { - if os.Getenv(env.TestEnvVar) == "" { - t.Skip(fmt.Sprintf( - "Acceptance tests skipped unless env '%s' set", env.TestEnvVar)) - } - - ui, artifact, err := setup(t) - if err != nil { - t.Fatalf("Error bootstrapping test: %s", err) - } - if artifact != nil { - defer artifact.Destroy() - } - - tpl, err := template.Parse(strings.NewReader(zipTestCase)) - if err != nil { - t.Fatalf("Unable to parse test config: %s", err) - } - - compressor := PostProcessor{} - compressor.Configure(tpl.PostProcessors[0][0].Config) - artifactOut, _, err := compressor.PostProcess(ui, artifact) - if err != nil { - t.Fatalf("Failed to archive artifact: %s", err) - } - // Cleanup after the test completes - defer artifactOut.Destroy() - - // Verify things look good - _, err = os.Stat("package.zip") - if err != nil { - t.Errorf("Unable to read archive: %s", err) - } -} - -func TestTarArchive(t *testing.T) { - if os.Getenv(env.TestEnvVar) == "" { - t.Skip(fmt.Sprintf( - "Acceptance tests skipped unless env '%s' set", env.TestEnvVar)) - } - - ui, artifact, err := setup(t) - if err != nil { - t.Fatalf("Error bootstrapping test: %s", err) - } - if artifact != nil { - defer artifact.Destroy() - } - - tpl, err := template.Parse(strings.NewReader(tarTestCase)) - if err != nil { - t.Fatalf("Unable to parse test config: %s", err) - } - - compressor := PostProcessor{} - compressor.Configure(tpl.PostProcessors[0][0].Config) - artifactOut, _, err := compressor.PostProcess(ui, artifact) - if err != nil { - t.Fatalf("Failed to archive artifact: %s", err) - } - // Cleanup after the test completes - defer artifactOut.Destroy() - - // Verify things look good - _, err = os.Stat("package.tar") - if err != nil { - t.Errorf("Unable to read archive: %s", err) - } -} - -func TestCompressOptions(t *testing.T) { - if os.Getenv(env.TestEnvVar) == "" { - t.Skip(fmt.Sprintf( - "Acceptance tests skipped unless env '%s' set", env.TestEnvVar)) - } - - ui, artifact, err := setup(t) - if err != nil { - t.Fatalf("Error bootstrapping test: %s", err) - } - if artifact != nil { - defer artifact.Destroy() - } - - tpl, err := template.Parse(strings.NewReader(optionsTestCase)) - if err != nil { - t.Fatalf("Unable to parse test config: %s", err) - } - - compressor := PostProcessor{} - compressor.Configure(tpl.PostProcessors[0][0].Config) - - if compressor.config.CompressionLevel != 9 { - t.Errorf("Expected compression_level 9, got %d", compressor.config.CompressionLevel) - } - - artifactOut, _, err := compressor.PostProcess(ui, artifact) - if err != nil { - t.Fatalf("Failed to archive artifact: %s", err) - } - // Cleanup after the test completes - defer artifactOut.Destroy() - - // Verify things look good - _, err = os.Stat("package.gz") - if err != nil { - t.Errorf("Unable to read archive: %s", err) - } -} - const simpleTestCase = ` { "post-processors": [ @@ -246,6 +65,19 @@ const simpleTestCase = ` } ` +func TestSimpleCompress(t *testing.T) { + artifact := testArchive(t, simpleTestCase) + defer artifact.Destroy() + + fi, err := os.Stat("package.tar.gz") + if err != nil { + t.Errorf("Unable to read archive: %s", err) + } + if fi.IsDir() { + t.Error("Archive should not be a directory") + } +} + const zipTestCase = ` { "post-processors": [ @@ -257,6 +89,17 @@ const zipTestCase = ` } ` +func TestZipArchive(t *testing.T) { + artifact := testArchive(t, zipTestCase) + defer artifact.Destroy() + + // Verify things look good + _, err := os.Stat("package.zip") + if err != nil { + t.Errorf("Unable to read archive: %s", err) + } +} + const tarTestCase = ` { "post-processors": [ @@ -268,6 +111,17 @@ const tarTestCase = ` } ` +func TestTarArchive(t *testing.T) { + artifact := testArchive(t, tarTestCase) + defer artifact.Destroy() + + // Verify things look good + _, err := os.Stat("package.tar") + if err != nil { + t.Errorf("Unable to read archive: %s", err) + } +} + const optionsTestCase = ` { "post-processors": [ @@ -279,3 +133,78 @@ const optionsTestCase = ` ] } ` + +func TestCompressOptions(t *testing.T) { + artifact := testArchive(t, optionsTestCase) + defer artifact.Destroy() + + // Verify things look good + _, err := os.Stat("package.gz") + if err != nil { + t.Errorf("Unable to read archive: %s", err) + } +} + +// Test Helpers + +func setup(t *testing.T) (packer.Ui, packer.Artifact, error) { + // Create fake UI and Cache + ui := packer.TestUi(t) + cache := &packer.FileCache{CacheDir: os.TempDir()} + + // Create config for file builder + const fileConfig = `{"builders":[{"type":"file","target":"package.txt","content":"Hello world!"}]}` + tpl, err := template.Parse(strings.NewReader(fileConfig)) + if err != nil { + return nil, nil, fmt.Errorf("Unable to parse setup configuration: %s", err) + } + + // Prepare the file builder + builder := file.Builder{} + warnings, err := builder.Prepare(tpl.Builders["file"].Config) + if len(warnings) > 0 { + for _, warn := range warnings { + return nil, nil, fmt.Errorf("Configuration warning: %s", warn) + } + } + if err != nil { + return nil, nil, fmt.Errorf("Invalid configuration: %s", err) + } + + // Run the file builder + artifact, err := builder.Run(ui, nil, cache) + if err != nil { + return nil, nil, fmt.Errorf("Failed to build artifact: %s", err) + } + + return ui, artifact, err +} + +func testArchive(t *testing.T, config string) packer.Artifact { + if os.Getenv(env.TestEnvVar) == "" { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", env.TestEnvVar)) + } + + ui, artifact, err := setup(t) + if err != nil { + t.Fatalf("Error bootstrapping test: %s", err) + } + if artifact != nil { + defer artifact.Destroy() + } + + tpl, err := template.Parse(strings.NewReader(config)) + if err != nil { + t.Fatalf("Unable to parse test config: %s", err) + } + + compressor := PostProcessor{} + compressor.Configure(tpl.PostProcessors[0][0].Config) + artifactOut, _, err := compressor.PostProcess(ui, artifact) + if err != nil { + t.Fatalf("Failed to compress artifact: %s", err) + } + + return artifactOut +} From 801e5aaa30ccb73fcf4ebee49b4c519aba610e16 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 18 Jun 2015 18:25:46 -0700 Subject: [PATCH 484/956] Move test configs into the test func --- .../compress/post-processor_test.go | 97 +++++++++---------- 1 file changed, 48 insertions(+), 49 deletions(-) diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index 31525adf7..a4812723f 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -54,19 +54,18 @@ func TestDetectFilename(t *testing.T) { } } -const simpleTestCase = ` -{ - "post-processors": [ - { - "type": "compress", - "output": "package.tar.gz" - } - ] -} -` - func TestSimpleCompress(t *testing.T) { - artifact := testArchive(t, simpleTestCase) + const config = ` + { + "post-processors": [ + { + "type": "compress", + "output": "package.tar.gz" + } + ] + } + ` + artifact := testArchive(t, config) defer artifact.Destroy() fi, err := os.Stat("package.tar.gz") @@ -78,19 +77,19 @@ func TestSimpleCompress(t *testing.T) { } } -const zipTestCase = ` -{ - "post-processors": [ - { - "type": "compress", - "output": "package.zip" - } - ] -} -` - func TestZipArchive(t *testing.T) { - artifact := testArchive(t, zipTestCase) + const config = ` + { + "post-processors": [ + { + "type": "compress", + "output": "package.zip" + } + ] + } + ` + + artifact := testArchive(t, config) defer artifact.Destroy() // Verify things look good @@ -100,19 +99,19 @@ func TestZipArchive(t *testing.T) { } } -const tarTestCase = ` -{ - "post-processors": [ - { - "type": "compress", - "output": "package.tar" - } - ] -} -` - func TestTarArchive(t *testing.T) { - artifact := testArchive(t, tarTestCase) + const config = ` + { + "post-processors": [ + { + "type": "compress", + "output": "package.tar" + } + ] + } + ` + + artifact := testArchive(t, config) defer artifact.Destroy() // Verify things look good @@ -122,20 +121,20 @@ func TestTarArchive(t *testing.T) { } } -const optionsTestCase = ` -{ - "post-processors": [ - { - "type": "compress", - "output": "package.gz", - "compression_level": 9 - } - ] -} -` - func TestCompressOptions(t *testing.T) { - artifact := testArchive(t, optionsTestCase) + const config = ` + { + "post-processors": [ + { + "type": "compress", + "output": "package.gz", + "compression_level": 9 + } + ] + } + ` + + artifact := testArchive(t, config) defer artifact.Destroy() // Verify things look good From 2d92fd8733ca73487ed5fe9c08d1016d3f25cc9b Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 18 Jun 2015 19:08:13 -0700 Subject: [PATCH 485/956] Added test case for gzip that expands the data and compares to what we put in --- post-processor/compress/post-processor_test.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index a4812723f..d7bca6c7a 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -1,7 +1,9 @@ package compress import ( + "compress/gzip" "fmt" + "io/ioutil" "os" "strings" "testing" @@ -54,6 +56,8 @@ func TestDetectFilename(t *testing.T) { } } +const expectedFileContents = "Hello world!" + func TestSimpleCompress(t *testing.T) { const config = ` { @@ -137,10 +141,13 @@ func TestCompressOptions(t *testing.T) { artifact := testArchive(t, config) defer artifact.Destroy() - // Verify things look good - _, err := os.Stat("package.gz") - if err != nil { - t.Errorf("Unable to read archive: %s", err) + filename := "package.gz" + archive, _ := os.Open(filename) + gzipReader, _ := gzip.NewReader(archive) + data, _ := ioutil.ReadAll(gzipReader) + + if string(data) != expectedFileContents { + t.Errorf("Expected:\n%s\nFound:\n%s\n", expectedFileContents, data) } } From 1b691cbb5d7ca0b2e61bef5049aee3baf29e2741 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 19 Jun 2015 06:06:30 +0200 Subject: [PATCH 486/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 985c4d4fa..b89c01f8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -144,6 +144,7 @@ BUG FIXES: * post-processor/atlas: Fix index out of range panic [GH-1959] * post-processor/vagrant-cloud: Fixed failing on response * post-processor/vagrant-cloud: Don't delete version on error [GH-2014] + * post-processor/vagrant-cloud: Retry failed uploads a few times * provisioner/chef-client: Fix permissions issues on default dir [GH-2255] * provisioner/chef-client: Node cleanup works now. [GH-2257] * provisioner/puppet-masterless: Allow manifest_file to be a directory From c3b10ba10997adb4ce2206177275c3391dbc646e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 19 Jun 2015 06:07:36 +0200 Subject: [PATCH 487/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b89c01f8d..66c611975 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -153,6 +153,8 @@ BUG FIXES: * provisioner/shell: inline commands failing will fail the provisioner [GH-2069] * provisioner/shell: single quotes in env vars are escaped [GH-2229] * provisioner/shell: Temporary file is deleted after run [GH-2259] + * provisioner/shell: Randomize default script name to avoid strange + race issues from Windows. [GH-2270] ## 0.7.5 (December 9, 2014) From ed4acbc9033f40772bad07edc22e6f6ecdb72027 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 19 Jun 2015 06:09:22 +0200 Subject: [PATCH 488/956] update CHANGELOG --- CHANGELOG.md | 1 + website/source/docs/provisioners/puppet-server.html.markdown | 3 +++ 2 files changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 66c611975..70a976086 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ IMPROVEMENTS: * provisioner/puppet-masterless: `working_directory` option [GH-1831] * provisioner/puppet-masterless: `packer_build_name` and `packer_build_type` are default facts. [GH-1878] + * provisioner/puppet-server: `ignore_exit_codes` option added [GH-2280] BUG FIXES: diff --git a/website/source/docs/provisioners/puppet-server.html.markdown b/website/source/docs/provisioners/puppet-server.html.markdown index ecc33f36c..1cbc9c2d4 100644 --- a/website/source/docs/provisioners/puppet-server.html.markdown +++ b/website/source/docs/provisioners/puppet-server.html.markdown @@ -51,6 +51,9 @@ required. They are listed below: * `facter` (object of key/value strings) - Additional Facter facts to make available to the Puppet run. +* `ignore_exit_codes` (bool) - If true, Packer will never consider the + provisioner a failure. + * `options` (string) - Additional command line options to pass to `puppet agent` when Puppet is ran. From 59766067c0725b1bbcab9f5b3ac10648f657bc10 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 19 Jun 2015 06:13:51 +0200 Subject: [PATCH 489/956] provisioner/windows-restart: make test pass on slow machines --- provisioner/windows-restart/provisioner_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/provisioner/windows-restart/provisioner_test.go b/provisioner/windows-restart/provisioner_test.go index bbb89e116..247452c22 100644 --- a/provisioner/windows-restart/provisioner_test.go +++ b/provisioner/windows-restart/provisioner_test.go @@ -4,9 +4,10 @@ import ( "bytes" "errors" "fmt" - "github.com/mitchellh/packer/packer" "testing" "time" + + "github.com/mitchellh/packer/packer" ) func testConfig() map[string]interface{} { @@ -256,7 +257,7 @@ func TestProvision_waitForCommunicatorWithCancel(t *testing.T) { comm := new(packer.MockCommunicator) p.comm = comm p.ui = ui - retryableSleep = 10 * time.Millisecond + retryableSleep = 5 * time.Second p.cancel = make(chan struct{}) var err error @@ -273,6 +274,7 @@ func TestProvision_waitForCommunicatorWithCancel(t *testing.T) { }() go func() { + time.Sleep(10 * time.Millisecond) p.Cancel() waitDone <- true }() From 5c290fdbc710f260926f550bd60a916eb83eae67 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 19 Jun 2015 06:15:16 +0200 Subject: [PATCH 490/956] builder/amazon use proper key pair name --- builder/amazon/ebs/builder.go | 2 +- builder/amazon/instance/builder.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index c13947ac4..ecf972362 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -90,7 +90,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &awscommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.TemporaryKeyPairName, + KeyPairName: b.config.SSHKeyPairName, TemporaryKeyPairName: b.config.TemporaryKeyPairName, PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, }, diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 5e8068718..46862e183 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -179,7 +179,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &awscommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.TemporaryKeyPairName, + KeyPairName: b.config.SSHKeyPairName, PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, TemporaryKeyPairName: b.config.TemporaryKeyPairName, }, From 9c0f37b2c99cca00fef568ad5af79796681ea654 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 19 Jun 2015 06:21:20 +0200 Subject: [PATCH 491/956] builder/amazon: fix weird edge cases with key names [GH-2281] --- builder/amazon/common/step_key_pair.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index 5bed27b10..6e7f2bb2b 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -23,17 +23,14 @@ type StepKeyPair struct { func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { if s.PrivateKeyFile != "" { - if s.KeyPairName != "" { - s.keyName = s.KeyPairName // need to get from config - } - privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile) if err != nil { - state.Put("error", fmt.Errorf("Error loading configured private key file: %s", err)) + state.Put("error", fmt.Errorf( + "Error loading configured private key file: %s", err)) return multistep.ActionHalt } - state.Put("keyPair", s.keyName) + state.Put("keyPair", s.KeyPairName) state.Put("privateKey", string(privateKeyBytes)) return multistep.ActionContinue @@ -43,7 +40,8 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.TemporaryKeyPairName)) - keyResp, err := ec2conn.CreateKeyPair(&ec2.CreateKeyPairInput{KeyName: &s.KeyPairName}) + keyResp, err := ec2conn.CreateKeyPair(&ec2.CreateKeyPairInput{ + KeyName: &s.TemporaryKeyPairName}) if err != nil { state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err)) return multistep.ActionHalt From f41429b6b43c0740732e32805f4f7fd91d08e46e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 19 Jun 2015 06:24:29 +0200 Subject: [PATCH 492/956] update CHANGELOG /cc @cbednarski - I try to do this after every PR just so I don't forget, not a bad practice to get into. This is perhaps arguably an "improvement" but I think due to the magnitude of code change I think its worth it in the features sectin. :) --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 70a976086..25f52920d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,9 @@ FEATURES: batch files. [GH-2243] * **New provisioner: windows-restart**: Restart a Windows machines and wait for it to come back online. [GH-2243] + * **Compress post-processor supports multiple algorithms:** The compress + post-processor now supports lz4 compression and compresses gzip in + parallel for much faster throughput. IMPROVEMENTS: From c9714ce69e5a68ef673b2455909cfee4070cc4e3 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Thu, 18 Jun 2015 13:23:48 -0500 Subject: [PATCH 493/956] builder/amazon-ebs: Clean up orphan volumes Fixes #1783 --- builder/amazon/common/block_device.go | 2 +- builder/amazon/ebs/builder.go | 3 + builder/amazon/ebs/step_cleanup_volumes.go | 117 +++++++++++++++++++++ 3 files changed, 121 insertions(+), 1 deletion(-) create mode 100644 builder/amazon/ebs/step_cleanup_volumes.go diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index e97cd4107..a01dfc83d 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -42,7 +42,7 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { // You cannot specify Encrypted if you specify a Snapshot ID if blockDevice.SnapshotId != "" { ebsBlockDevice.SnapshotID = &blockDevice.SnapshotId - } else { + } else if blockDevice.Encrypted { ebsBlockDevice.Encrypted = &blockDevice.Encrypted } diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index c13947ac4..35dc98a6d 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -99,6 +99,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe CommConfig: &b.config.RunConfig.Comm, VpcId: b.config.VpcId, }, + &stepCleanupVolumes{ + BlockDevices: b.config.BlockDevices, + }, &awscommon.StepRunSourceInstance{ Debug: b.config.PackerDebug, ExpectedRootDevice: "ebs", diff --git a/builder/amazon/ebs/step_cleanup_volumes.go b/builder/amazon/ebs/step_cleanup_volumes.go new file mode 100644 index 000000000..c66f9d786 --- /dev/null +++ b/builder/amazon/ebs/step_cleanup_volumes.go @@ -0,0 +1,117 @@ +package ebs + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/builder/amazon/common" + "github.com/mitchellh/packer/packer" +) + +// stepCleanupVolumes cleans up any orphaned volumes that were not designated to +// remain after termination of the instance. These volumes are typically ones +// that are marked as "delete on terminate:false" in the source_ami of a build. +type stepCleanupVolumes struct { + BlockDevices common.BlockDevices +} + +func (s *stepCleanupVolumes) Run(state multistep.StateBag) multistep.StepAction { + // stepCleanupVolumes is for Cleanup only + return multistep.ActionContinue +} + +func (s *stepCleanupVolumes) Cleanup(state multistep.StateBag) { + ec2conn := state.Get("ec2").(*ec2.EC2) + instanceRaw := state.Get("instance") + var instance *ec2.Instance + if instanceRaw != nil { + instance = instanceRaw.(*ec2.Instance) + } + ui := state.Get("ui").(packer.Ui) + amisRaw := state.Get("amis") + if amisRaw == nil { + ui.Say("No AMIs to cleanup") + return + } + + if instance == nil { + ui.Say("No volumes to clean up, skipping") + return + } + + ui.Say("Cleaning up any extra volumes...") + + save := make(map[string]bool) + for _, b := range s.BlockDevices.AMIMappings { + if !b.DeleteOnTermination { + save[b.DeviceName] = true + } + } + + for _, b := range s.BlockDevices.LaunchMappings { + if !b.DeleteOnTermination { + save[b.DeviceName] = true + } + } + + // Collect Volume information from the cached Instance as a map of volume-id + // to device name, to compare with save list above + var vl []*string + volList := make(map[string]string) + for _, bdm := range instance.BlockDeviceMappings { + if bdm.EBS != nil { + vl = append(vl, bdm.EBS.VolumeID) + volList[*bdm.EBS.VolumeID] = *bdm.DeviceName + } + } + + // Using the volume list from the cached Instance, check with AWS for up to + // date information on them + resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{ + Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("volume-id"), + Values: vl, + }, + }, + }) + + if err != nil { + ui.Say(fmt.Sprintf("Error describing volumes: %s", err)) + return + } + + // If any of the returned volumes are in a "deleting" stage or otherwise not + // available, remove them from the list of volumes + for _, v := range resp.Volumes { + if v.State != nil && *v.State != "available" { + delete(volList, *v.VolumeID) + } + } + + if len(resp.Volumes) == 0 { + ui.Say("No volumes to clean up, skipping") + return + } + + // Filter out any devices marked for saving + for saveName, _ := range save { + for volKey, volName := range volList { + if volName == saveName { + delete(volList, volKey) + } + } + } + + // Destroy remaining volumes + for k, _ := range volList { + ui.Say(fmt.Sprintf("Destroying volume (%s)...", k)) + _, err := ec2conn.DeleteVolume(&ec2.DeleteVolumeInput{VolumeID: aws.String(k)}) + if err != nil { + ui.Say(fmt.Sprintf("Error deleting volume: %s", k)) + } + + } +} From 7165156b646fc159ba2a62175e82ea652f4c1e37 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Fri, 19 Jun 2015 15:56:05 -0500 Subject: [PATCH 494/956] add note on EBS cleanup behavior --- website/.ruby-version | 1 + website/source/docs/builders/amazon-ebs.html.markdown | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 website/.ruby-version diff --git a/website/.ruby-version b/website/.ruby-version new file mode 100644 index 000000000..b1b25a5ff --- /dev/null +++ b/website/.ruby-version @@ -0,0 +1 @@ +2.2.2 diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 5420c10fd..4fc48c441 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -278,6 +278,13 @@ Here is an example using the optional AMI tags. This will add the tags } ``` +-> **Note:** Packer users pre-built AMIs as the source for building images. +These source AMIs may include volumes that are not flagged to be destroyed on +termiation of the instance building the new image. Packer will attempt to clean +up all residual volumes that are not designated by the user to remain after +termination. If you need to preserve those source volumes, you can overwrite the +termination setting by specifying `delete_on_termination=false` in the +`launch_device_mappings` block for the device. [1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html [2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html From 80fc1f032b29d227d1fbd836cc4c0f1ac4ba8292 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 19 Jun 2015 15:06:06 -0700 Subject: [PATCH 495/956] provisioner/shell-local: a first stab --- provisioner/shell-local/communicator.go | 81 +++++++++++++++ provisioner/shell-local/provisioner.go | 109 ++++++++++++++++++++ provisioner/shell-local/provisioner_test.go | 67 ++++++++++++ 3 files changed, 257 insertions(+) create mode 100644 provisioner/shell-local/communicator.go create mode 100644 provisioner/shell-local/provisioner.go create mode 100644 provisioner/shell-local/provisioner_test.go diff --git a/provisioner/shell-local/communicator.go b/provisioner/shell-local/communicator.go new file mode 100644 index 000000000..5cf3cd980 --- /dev/null +++ b/provisioner/shell-local/communicator.go @@ -0,0 +1,81 @@ +package shell + +import ( + "fmt" + "io" + "os" + "os/exec" + "syscall" + + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" +) + +type Communicator struct { + ExecuteCommand []string + Ctx interpolate.Context +} + +func (c *Communicator) Start(cmd *packer.RemoteCmd) error { + // Render the template so that we know how to execute the command + c.Ctx.Data = &ExecuteCommandTemplate{ + Command: cmd.Command, + } + for i, field := range c.ExecuteCommand { + command, err := interpolate.Render(field, &c.Ctx) + if err != nil { + return fmt.Errorf("Error processing command: %s", err) + } + + c.ExecuteCommand[i] = command + } + + // Build the local command to execute + localCmd := exec.Command(c.ExecuteCommand[0], c.ExecuteCommand[1:]...) + localCmd.Stdin = cmd.Stdin + localCmd.Stdout = cmd.Stdout + localCmd.Stderr = cmd.Stderr + + // Start it. If it doesn't work, then error right away. + if err := localCmd.Start(); err != nil { + return err + } + + // We've started successfully. Start a goroutine to wait for + // it to complete and track exit status. + go func() { + var exitStatus int + err := localCmd.Wait() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + exitStatus = 1 + + // There is no process-independent way to get the REAL + // exit status so we just try to go deeper. + if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { + exitStatus = status.ExitStatus() + } + } + } + + cmd.SetExited(exitStatus) + }() + + return nil +} + +func (c *Communicator) Upload(string, io.Reader, *os.FileInfo) error { + return fmt.Errorf("upload not supported") +} + +func (c *Communicator) UploadDir(string, string, []string) error { + return fmt.Errorf("uploadDir not supported") +} + +func (c *Communicator) Download(string, io.Writer) error { + return fmt.Errorf("download not supported") +} + +type ExecuteCommandTemplate struct { + Command string +} diff --git a/provisioner/shell-local/provisioner.go b/provisioner/shell-local/provisioner.go new file mode 100644 index 000000000..499be7f1d --- /dev/null +++ b/provisioner/shell-local/provisioner.go @@ -0,0 +1,109 @@ +package shell + +import ( + "errors" + "fmt" + "runtime" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" +) + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + + // Command is the command to execute + Command string + + // ExecuteCommand is the command used to execute the command. + ExecuteCommand []string `mapstructure:"execute_command"` + + ctx interpolate.Context +} + +type Provisioner struct { + config Config +} + +func (p *Provisioner) Prepare(raws ...interface{}) error { + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "execute_command", + }, + }, + }, raws...) + if err != nil { + return err + } + + if len(p.config.ExecuteCommand) == 0 { + if runtime.GOOS == "windows" { + p.config.ExecuteCommand = []string{ + "cmd", + "/C", + "{{.Command}}", + } + } else { + p.config.ExecuteCommand = []string{ + "/bin/sh", + "-c", + "{{.Command}}", + } + } + } + + var errs *packer.MultiError + if p.config.Command == "" { + errs = packer.MultiErrorAppend(errs, + errors.New("command must be specified")) + } + + if len(p.config.ExecuteCommand) == 0 { + errs = packer.MultiErrorAppend(errs, + errors.New("execute_command must not be empty")) + } + + if errs != nil && len(errs.Errors) > 0 { + return errs + } + + return nil +} + +func (p *Provisioner) Provision(ui packer.Ui, _ packer.Communicator) error { + // Make another communicator for local + comm := &Communicator{ + Ctx: p.config.ctx, + ExecuteCommand: p.config.ExecuteCommand, + } + + // Build the remote command + cmd := &packer.RemoteCmd{Command: p.config.Command} + + ui.Say(fmt.Sprintf( + "Executing local command: %s", + p.config.Command)) + if err := cmd.StartWithUi(comm, ui); err != nil { + return fmt.Errorf( + "Error executing command: %s\n\n"+ + "Please see output above for more information.", + p.config.Command) + } + if cmd.ExitStatus != 0 { + return fmt.Errorf( + "Erroneous exit code %s while executing command: %s\n\n"+ + "Please see output above for more information.", + cmd.ExitStatus, + p.config.Command) + } + + return nil +} + +func (p *Provisioner) Cancel() { + // Just do nothing. When the process ends, so will our provisioner +} diff --git a/provisioner/shell-local/provisioner_test.go b/provisioner/shell-local/provisioner_test.go new file mode 100644 index 000000000..ad8f3065d --- /dev/null +++ b/provisioner/shell-local/provisioner_test.go @@ -0,0 +1,67 @@ +package shell + +import ( + "testing" + + "github.com/mitchellh/packer/packer" +) + +func TestProvisioner_impl(t *testing.T) { + var _ packer.Provisioner = new(Provisioner) +} + +func TestConfigPrepare(t *testing.T) { + cases := []struct { + Key string + Value interface{} + Err bool + }{ + { + "unknown_key", + "bad", + true, + }, + + { + "command", + nil, + true, + }, + } + + for _, tc := range cases { + raw := testConfig(t) + + if tc.Value == nil { + delete(raw, tc.Key) + } else { + raw[tc.Key] = tc.Value + } + + var p Provisioner + err := p.Prepare(raw) + if tc.Err { + testConfigErr(t, err, tc.Key) + } else { + testConfigOk(t, err) + } + } +} + +func testConfig(t *testing.T) map[string]interface{} { + return map[string]interface{}{ + "command": "echo foo", + } +} + +func testConfigErr(t *testing.T, err error, extra string) { + if err == nil { + t.Fatalf("should error: %s", extra) + } +} + +func testConfigOk(t *testing.T, err error) { + if err != nil { + t.Fatalf("bad: %s", err) + } +} From 2dc6155c30b243efc85067b8629bdd33da26952f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 19 Jun 2015 15:27:44 -0700 Subject: [PATCH 496/956] provisioner/shell-local: tests --- provisioner/shell-local/communicator_test.go | 45 ++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 provisioner/shell-local/communicator_test.go diff --git a/provisioner/shell-local/communicator_test.go b/provisioner/shell-local/communicator_test.go new file mode 100644 index 000000000..90402324b --- /dev/null +++ b/provisioner/shell-local/communicator_test.go @@ -0,0 +1,45 @@ +package shell + +import ( + "bytes" + "runtime" + "strings" + "testing" + + "github.com/mitchellh/packer/packer" +) + +func TestCommunicator_impl(t *testing.T) { + var _ packer.Communicator = new(Communicator) +} + +func TestCommunicator(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("windows not supported for this test") + return + } + + c := &Communicator{ + ExecuteCommand: []string{"/bin/sh", "-c", "{{.Command}}"}, + } + + var buf bytes.Buffer + cmd := &packer.RemoteCmd{ + Command: "echo foo", + Stdout: &buf, + } + + if err := c.Start(cmd); err != nil { + t.Fatalf("err: %s", err) + } + + cmd.Wait() + + if cmd.ExitStatus != 0 { + t.Fatalf("err bad exit status: %d", cmd.ExitStatus) + } + + if strings.TrimSpace(buf.String()) != "foo" { + t.Fatalf("bad: %s", buf.String()) + } +} From d5c0f13d8f14d88e576818ce7217eaeb36e11fac Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 19 Jun 2015 15:31:17 -0700 Subject: [PATCH 497/956] website: document local shell --- .../docs/provisioners/shell-local.html.md | 45 +++++++++++++++++++ website/source/layouts/docs.erb | 3 +- 2 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 website/source/docs/provisioners/shell-local.html.md diff --git a/website/source/docs/provisioners/shell-local.html.md b/website/source/docs/provisioners/shell-local.html.md new file mode 100644 index 000000000..b986cd5ef --- /dev/null +++ b/website/source/docs/provisioners/shell-local.html.md @@ -0,0 +1,45 @@ +--- +layout: "docs" +page_title: "Local Shell Provisioner" +description: |- + The shell Packer provisioner provisions machines built by Packer using shell scripts. Shell provisioning is the easiest way to get software installed and configured on a machine. +--- + +# Local Shell Provisioner + +Type: `shell-local` + +The local shell provisioner executes a local shell script on the machine +running Packer. The [remote shell](/docs/provisioners/shell.html) +provisioner executes shell scripts on a remote machine. + +## Basic Example + +The example below is fully functional. + +```javascript +{ + "type": "shell-local", + "command": "echo foo" +} +``` + +## Configuration Reference + +The reference of available configuration options is listed below. The only +required element is "command". + +Required: + +* `command` (string) - The command to execute. This will be executed + within the context of a shell as specified by `execute_command`. + +Optional parameters: + +* `execute_command` (array of strings) - The command to use to execute the script. + By default this is `["/bin/sh", "-c", "{{.Command}"]`. The value is an array + of arguments executed directly by the OS. + The value of this is + treated as [configuration template](/docs/templates/configuration-templates.html). + The only available variable is `Command` which is the command to execute. + diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 8099b461a..b4101422c 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -47,7 +47,8 @@
    • Provisioners

    • -
    • Shell Scripts
    • +
    • Remote Shell
    • +
    • Local Shell
    • File Uploads
    • PowerShell
    • Windows Shell
    • From a0b6928dceb22395074ba80f4c1eb7df9bf359fc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 19 Jun 2015 15:31:40 -0700 Subject: [PATCH 498/956] plugin: add the plugin --- plugin/provisioner-shell-local/main.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 plugin/provisioner-shell-local/main.go diff --git a/plugin/provisioner-shell-local/main.go b/plugin/provisioner-shell-local/main.go new file mode 100644 index 000000000..4f46a3ed3 --- /dev/null +++ b/plugin/provisioner-shell-local/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "github.com/mitchellh/packer/packer/plugin" + "github.com/mitchellh/packer/provisioner/shell-local" +) + +func main() { + server, err := plugin.Server() + if err != nil { + panic(err) + } + server.RegisterProvisioner(new(shell.Provisioner)) + server.Serve() +} From 6ec428cc38165fbce18a826674baa379b70ab20c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 21 Jun 2015 19:53:21 -0700 Subject: [PATCH 499/956] provisioner/shell: retry file delete [GH-2286] --- provisioner/shell/provisioner.go | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index c2ae4d938..2f64759b9 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -270,16 +270,25 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) } - // Delete the temporary file we created - cmd = &packer.RemoteCmd{ - Command: fmt.Sprintf("rm -f %s", p.config.RemotePath), + // Delete the temporary file we created. We retry this a few times + // since if the above rebooted we have to wait until the reboot + // completes. + err = p.retryable(func() error { + cmd = &packer.RemoteCmd{ + Command: fmt.Sprintf("rm -f %s", p.config.RemotePath), + } + if err := comm.Start(cmd); err != nil { + return fmt.Errorf( + "Error removing temporary script at %s: %s", + p.config.RemotePath, err) + } + cmd.Wait() + return nil + }) + if err != nil { + return err } - if err := comm.Start(cmd); err != nil { - return fmt.Errorf( - "Error removing temporary script at %s: %s", - p.config.RemotePath, err) - } - cmd.Wait() + if cmd.ExitStatus != 0 { return fmt.Errorf( "Error removing temporary script at %s!", From 9c6d0dfc8b29e5642e1727731eb41a112660e0fc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 21 Jun 2015 19:56:55 -0700 Subject: [PATCH 500/956] builder/virtualbox: fix type casting /cc @pearkes - This fixes that crash --- builder/virtualbox/common/ssh.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/virtualbox/common/ssh.go b/builder/virtualbox/common/ssh.go index 2584528dd..b04a14ac8 100644 --- a/builder/virtualbox/common/ssh.go +++ b/builder/virtualbox/common/ssh.go @@ -12,7 +12,7 @@ func CommHost(state multistep.StateBag) (string, error) { } func SSHPort(state multistep.StateBag) (int, error) { - sshHostPort := state.Get("sshHostPort").(uint) + sshHostPort := state.Get("sshHostPort").(int) return int(sshHostPort), nil } From 897888fde3d12d9ea627a423714f4296c49826db Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 21 Jun 2015 19:58:18 -0700 Subject: [PATCH 501/956] common: fix potential panic case --- common/download.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/download.go b/common/download.go index b5798b76c..c9d0bf11d 100644 --- a/common/download.go +++ b/common/download.go @@ -118,7 +118,7 @@ func (d *DownloadClient) Get() (string, error) { finalPath = url.Path // Remove forward slash on absolute Windows file URLs before processing - if runtime.GOOS == "windows" && finalPath[0] == '/' { + if runtime.GOOS == "windows" && len(finalPath) > 0 && finalPath[0] == '/' { finalPath = finalPath[1:len(finalPath)] } } else { From 93b5ae5b3c460184d16258a50df889281ae76cd2 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 22 Jun 2015 10:33:35 -0500 Subject: [PATCH 502/956] fix typo --- website/source/docs/builders/amazon-ebs.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 4fc48c441..b7f16eef9 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -278,7 +278,7 @@ Here is an example using the optional AMI tags. This will add the tags } ``` --> **Note:** Packer users pre-built AMIs as the source for building images. +-> **Note:** Packer uses pre-built AMIs as the source for building images. These source AMIs may include volumes that are not flagged to be destroyed on termiation of the instance building the new image. Packer will attempt to clean up all residual volumes that are not designated by the user to remain after From dff6cf1a83d9be8ea211f150db2c60d875ddafb9 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 22 Jun 2015 10:48:54 -0500 Subject: [PATCH 503/956] code tweak after review --- builder/amazon/ebs/step_cleanup_volumes.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/builder/amazon/ebs/step_cleanup_volumes.go b/builder/amazon/ebs/step_cleanup_volumes.go index c66f9d786..56ebe5527 100644 --- a/builder/amazon/ebs/step_cleanup_volumes.go +++ b/builder/amazon/ebs/step_cleanup_volumes.go @@ -43,16 +43,17 @@ func (s *stepCleanupVolumes) Cleanup(state multistep.StateBag) { ui.Say("Cleaning up any extra volumes...") - save := make(map[string]bool) + // We don't actually care about the value here, but we need Set behavior + save := make(map[string]struct{}) for _, b := range s.BlockDevices.AMIMappings { if !b.DeleteOnTermination { - save[b.DeviceName] = true + save[b.DeviceName] = struct{}{} } } for _, b := range s.BlockDevices.LaunchMappings { if !b.DeleteOnTermination { - save[b.DeviceName] = true + save[b.DeviceName] = struct{}{} } } From 54e081d5afd6dcda39cda544634ee1df877740ec Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 09:09:12 -0700 Subject: [PATCH 504/956] builder/virtualbox: only remove output dir after check --- builder/virtualbox/common/step_output_dir.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/builder/virtualbox/common/step_output_dir.go b/builder/virtualbox/common/step_output_dir.go index e01928b7a..1443e5791 100644 --- a/builder/virtualbox/common/step_output_dir.go +++ b/builder/virtualbox/common/step_output_dir.go @@ -17,6 +17,8 @@ import ( type StepOutputDir struct { Force bool Path string + + cleanup bool } func (s *StepOutputDir) Run(state multistep.StateBag) multistep.StepAction { @@ -36,6 +38,9 @@ func (s *StepOutputDir) Run(state multistep.StateBag) multistep.StepAction { os.RemoveAll(s.Path) } + // Enable cleanup + s.cleanup = true + // Create the directory if err := os.MkdirAll(s.Path, 0755); err != nil { state.Put("error", err) @@ -56,6 +61,10 @@ func (s *StepOutputDir) Run(state multistep.StateBag) multistep.StepAction { } func (s *StepOutputDir) Cleanup(state multistep.StateBag) { + if !s.cleanup { + return + } + _, cancelled := state.GetOk(multistep.StateCancelled) _, halted := state.GetOk(multistep.StateHalted) From d6004564870d25cc8d9a5dc564f9b57f1c965a25 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 09:17:09 -0700 Subject: [PATCH 505/956] builder/virtualbox: fix interpolation weirdness --- builder/virtualbox/iso/builder.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go index dae107170..f6c19ef99 100644 --- a/builder/virtualbox/iso/builder.go +++ b/builder/virtualbox/iso/builder.go @@ -57,8 +57,12 @@ type Config struct { } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { + // Set some defaults + b.config.VMName = fmt.Sprintf("packer-%s-{{timestamp}}", b.config.PackerBuildName) + err := config.Decode(&b.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &b.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "boot_command", @@ -112,10 +116,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.ISOInterface = "ide" } - if b.config.VMName == "" { - b.config.VMName = fmt.Sprintf("packer-%s-{{timestamp}}", b.config.PackerBuildName) - } - if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" && b.config.HardDriveInterface != "scsi" { errs = packer.MultiErrorAppend( errs, errors.New("hard_drive_interface can only be ide, sata, or scsi")) From 84189f7a28936116ac41f9d8ed7b1fe65e2b5621 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 09:22:42 -0700 Subject: [PATCH 506/956] builder/*: properly save interpolation context --- builder/amazon/chroot/builder.go | 12 ++++++------ builder/amazon/chroot/step_mount_device.go | 2 +- builder/amazon/ebs/builder.go | 14 +++++++------- builder/amazon/instance/builder.go | 14 +++++++------- builder/amazon/instance/step_bundle_volume.go | 2 +- builder/amazon/instance/step_upload_bundle.go | 2 +- builder/digitalocean/config.go | 9 +++++---- builder/docker/config.go | 5 +++-- builder/googlecompute/config.go | 5 +++-- builder/openstack/builder.go | 3 ++- builder/parallels/iso/builder.go | 3 ++- builder/parallels/pvm/config.go | 3 ++- builder/qemu/builder.go | 3 ++- builder/virtualbox/ovf/config.go | 3 ++- builder/vmware/iso/builder.go | 3 ++- builder/vmware/vmx/config.go | 3 ++- 16 files changed, 48 insertions(+), 38 deletions(-) diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index 18b07b81c..a5ac070d4 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -35,7 +35,7 @@ type Config struct { MountPath string `mapstructure:"mount_path"` SourceAmi string `mapstructure:"source_ami"` - ctx *interpolate.Context + ctx interpolate.Context } type wrappedCommandTemplate struct { @@ -48,10 +48,10 @@ type Builder struct { } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - b.config.ctx = &interpolate.Context{Funcs: awscommon.TemplateFuncs} + b.config.ctx.Funcs = awscommon.TemplateFuncs err := config.Decode(&b.config, &config.DecodeOpts{ Interpolate: true, - InterpolateContext: b.config.ctx, + InterpolateContext: &b.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "command_wrapper", @@ -96,8 +96,8 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { // Accumulate any errors var errs *packer.MultiError - errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.ctx)...) - errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(&b.config.ctx)...) for _, mounts := range b.config.ChrootMounts { if len(mounts) != 3 { @@ -132,7 +132,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ec2conn := ec2.New(config) wrappedCommand := func(command string) (string, error) { - ctx := *b.config.ctx + ctx := b.config.ctx ctx.Data = &wrappedCommandTemplate{Command: command} return interpolate.Render(b.config.CommandWrapper, &ctx) } diff --git a/builder/amazon/chroot/step_mount_device.go b/builder/amazon/chroot/step_mount_device.go index 0e3cdad52..cf10535df 100644 --- a/builder/amazon/chroot/step_mount_device.go +++ b/builder/amazon/chroot/step_mount_device.go @@ -33,7 +33,7 @@ func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction { device := state.Get("device").(string) wrappedCommand := state.Get("wrappedCommand").(CommandWrapper) - ctx := *config.ctx + ctx := config.ctx ctx.Data = &mountPathData{Device: filepath.Base(device)} mountPath, err := interpolate.Render(config.MountPath, &ctx) diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index ecf972362..692485701 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -29,7 +29,7 @@ type Config struct { awscommon.BlockDevices `mapstructure:",squash"` awscommon.RunConfig `mapstructure:",squash"` - ctx *interpolate.Context + ctx interpolate.Context } type Builder struct { @@ -38,10 +38,10 @@ type Builder struct { } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - b.config.ctx = &interpolate.Context{Funcs: awscommon.TemplateFuncs} + b.config.ctx.Funcs = awscommon.TemplateFuncs err := config.Decode(&b.config, &config.DecodeOpts{ Interpolate: true, - InterpolateContext: b.config.ctx, + InterpolateContext: &b.config.ctx, }, raws...) if err != nil { return nil, err @@ -49,10 +49,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { // Accumulate any errors var errs *packer.MultiError - errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.ctx)...) - errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(b.config.ctx)...) - errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.ctx)...) - errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...) if errs != nil && len(errs.Errors) > 0 { return nil, errs diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 46862e183..17760d918 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -41,7 +41,7 @@ type Config struct { X509KeyPath string `mapstructure:"x509_key_path"` X509UploadPath string `mapstructure:"x509_upload_path"` - ctx *interpolate.Context + ctx interpolate.Context } type Builder struct { @@ -50,10 +50,10 @@ type Builder struct { } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - b.config.ctx = &interpolate.Context{Funcs: awscommon.TemplateFuncs} + b.config.ctx.Funcs = awscommon.TemplateFuncs err := config.Decode(&b.config, &config.DecodeOpts{ Interpolate: true, - InterpolateContext: b.config.ctx, + InterpolateContext: &b.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "bundle_upload_command", @@ -114,10 +114,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { // Accumulate any errors var errs *packer.MultiError - errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.ctx)...) - errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(b.config.ctx)...) - errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.ctx)...) - errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(&b.config.ctx)...) + errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...) if b.config.AccountId == "" { errs = packer.MultiErrorAppend(errs, errors.New("account_id is required")) diff --git a/builder/amazon/instance/step_bundle_volume.go b/builder/amazon/instance/step_bundle_volume.go index fe1092741..bd362f91f 100644 --- a/builder/amazon/instance/step_bundle_volume.go +++ b/builder/amazon/instance/step_bundle_volume.go @@ -42,7 +42,7 @@ func (s *StepBundleVolume) Run(state multistep.StateBag) multistep.StepAction { Prefix: config.BundlePrefix, PrivatePath: config.X509UploadPath, } - config.BundleVolCommand, err = interpolate.Render(config.BundleVolCommand, config.ctx) + config.BundleVolCommand, err = interpolate.Render(config.BundleVolCommand, &config.ctx) if err != nil { err := fmt.Errorf("Error processing bundle volume command: %s", err) state.Put("error", err) diff --git a/builder/amazon/instance/step_upload_bundle.go b/builder/amazon/instance/step_upload_bundle.go index 2f6b328c6..8e3a960b2 100644 --- a/builder/amazon/instance/step_upload_bundle.go +++ b/builder/amazon/instance/step_upload_bundle.go @@ -44,7 +44,7 @@ func (s *StepUploadBundle) Run(state multistep.StateBag) multistep.StepAction { Region: region, SecretKey: config.SecretKey, } - config.BundleUploadCommand, err = interpolate.Render(config.BundleUploadCommand, config.ctx) + config.BundleUploadCommand, err = interpolate.Render(config.BundleUploadCommand, &config.ctx) if err != nil { err := fmt.Errorf("Error processing bundle upload command: %s", err) state.Put("error", err) diff --git a/builder/digitalocean/config.go b/builder/digitalocean/config.go index 3bddf2f7e..617b88b68 100644 --- a/builder/digitalocean/config.go +++ b/builder/digitalocean/config.go @@ -31,7 +31,7 @@ type Config struct { DropletName string `mapstructure:"droplet_name"` UserData string `mapstructure:"user_data"` - ctx *interpolate.Context + ctx interpolate.Context } func NewConfig(raws ...interface{}) (*Config, []string, error) { @@ -39,8 +39,9 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { var md mapstructure.Metadata err := config.Decode(c, &config.DecodeOpts{ - Metadata: &md, - Interpolate: true, + Metadata: &md, + Interpolate: true, + InterpolateContext: &c.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "run_command", @@ -85,7 +86,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { } var errs *packer.MultiError - if es := c.Comm.Prepare(c.ctx); len(es) > 0 { + if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { errs = packer.MultiErrorAppend(errs, es...) } if c.APIToken == "" { diff --git a/builder/docker/config.go b/builder/docker/config.go index 34fda4309..af5f25fec 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -37,8 +37,9 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { var md mapstructure.Metadata err := config.Decode(c, &config.DecodeOpts{ - Metadata: &md, - Interpolate: true, + Metadata: &md, + Interpolate: true, + InterpolateContext: &c.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "run_command", diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go index 762976385..4603f2769 100644 --- a/builder/googlecompute/config.go +++ b/builder/googlecompute/config.go @@ -41,13 +41,14 @@ type Config struct { account accountFile privateKeyBytes []byte stateTimeout time.Duration - ctx *interpolate.Context + ctx interpolate.Context } func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) err := config.Decode(c, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &c.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "run_command", diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index 3f2c494fa..d6b528695 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -35,7 +35,8 @@ type Builder struct { func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { err := config.Decode(&b.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &b.config.ctx, }, raws...) if err != nil { return nil, err diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index 46fa73687..fdab718bc 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -57,7 +57,8 @@ type Config struct { func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { err := config.Decode(&b.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &b.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "boot_command", diff --git a/builder/parallels/pvm/config.go b/builder/parallels/pvm/config.go index 8f6d9a915..a4b5f10b5 100644 --- a/builder/parallels/pvm/config.go +++ b/builder/parallels/pvm/config.go @@ -34,7 +34,8 @@ type Config struct { func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) err := config.Decode(c, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &c.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "boot_command", diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index 9df908989..46b9cabd3 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -127,7 +127,8 @@ type Config struct { func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { err := config.Decode(&b.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &b.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "boot_command", diff --git a/builder/virtualbox/ovf/config.go b/builder/virtualbox/ovf/config.go index 837c3f37a..e881150c0 100644 --- a/builder/virtualbox/ovf/config.go +++ b/builder/virtualbox/ovf/config.go @@ -42,7 +42,8 @@ type Config struct { func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) err := config.Decode(c, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &c.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "boot_command", diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index fa8deb983..f2489c50f 100755 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -68,7 +68,8 @@ type Config struct { func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { err := config.Decode(&b.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &b.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "boot_command", diff --git a/builder/vmware/vmx/config.go b/builder/vmware/vmx/config.go index 9010d4b58..5f06127fe 100644 --- a/builder/vmware/vmx/config.go +++ b/builder/vmware/vmx/config.go @@ -35,7 +35,8 @@ type Config struct { func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) err := config.Decode(c, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &c.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "boot_command", From 93bb0d86afa2cf598aac07a30fe51900d3f77029 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 09:25:15 -0700 Subject: [PATCH 507/956] builder/virtualbox: fix default names --- builder/virtualbox/iso/builder.go | 8 +++++--- builder/virtualbox/ovf/config.go | 4 +++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go index f6c19ef99..0758e9bdd 100644 --- a/builder/virtualbox/iso/builder.go +++ b/builder/virtualbox/iso/builder.go @@ -57,9 +57,6 @@ type Config struct { } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { - // Set some defaults - b.config.VMName = fmt.Sprintf("packer-%s-{{timestamp}}", b.config.PackerBuildName) - err := config.Decode(&b.config, &config.DecodeOpts{ Interpolate: true, InterpolateContext: &b.config.ctx, @@ -116,6 +113,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.ISOInterface = "ide" } + if b.config.VMName == "" { + b.config.VMName = fmt.Sprintf( + "packer-%s-%d", b.config.PackerBuildName, interpolate.InitTime.Unix()) + } + if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" && b.config.HardDriveInterface != "scsi" { errs = packer.MultiErrorAppend( errs, errors.New("hard_drive_interface can only be ide, sata, or scsi")) diff --git a/builder/virtualbox/ovf/config.go b/builder/virtualbox/ovf/config.go index e881150c0..c1afd8c25 100644 --- a/builder/virtualbox/ovf/config.go +++ b/builder/virtualbox/ovf/config.go @@ -66,8 +66,10 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { if c.GuestAdditionsPath == "" { c.GuestAdditionsPath = "VBoxGuestAdditions.iso" } + if c.VMName == "" { - c.VMName = fmt.Sprintf("packer-%s-{{timestamp}}", c.PackerBuildName) + c.VMName = fmt.Sprintf( + "packer-%s-%d", c.PackerBuildName, interpolate.InitTime.Unix()) } // Prepare the errors From 6cb51f21de746404e7ba1be3a5bfe00181bf1dd1 Mon Sep 17 00:00:00 2001 From: Salvador Girones Date: Mon, 22 Jun 2015 10:13:49 -0700 Subject: [PATCH 508/956] Make ResourcePool optional --- post-processor/vsphere/post-processor.go | 20 +++++++++++-------- .../post-processors/vsphere.html.markdown | 2 +- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/post-processor/vsphere/post-processor.go b/post-processor/vsphere/post-processor.go index f9a6c37c3..0b25c7b2c 100644 --- a/post-processor/vsphere/post-processor.go +++ b/post-processor/vsphere/post-processor.go @@ -72,7 +72,6 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { "diskmode": &p.config.DiskMode, "host": &p.config.Host, "password": &p.config.Password, - "resource_pool": &p.config.ResourcePool, "username": &p.config.Username, "vm_name": &p.config.VMName, } @@ -107,6 +106,17 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac return nil, false, fmt.Errorf("VMX file not found") } + ovftool_uri := fmt.Sprintf("vi://%s:%s@%s/%s/host/%s", + url.QueryEscape(p.config.Username), + url.QueryEscape(p.config.Password), + p.config.Host, + p.config.Datacenter, + p.config.Cluster) + + if p.config.ResourcePool != "" { + ovftool_uri += "/Resources/" + p.config.ResourcePool + } + args := []string{ fmt.Sprintf("--noSSLVerify=%t", p.config.Insecure), "--acceptAllEulas", @@ -116,13 +126,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac fmt.Sprintf("--network=%s", p.config.VMNetwork), fmt.Sprintf("--vmFolder=%s", p.config.VMFolder), fmt.Sprintf("%s", vmx), - fmt.Sprintf("vi://%s:%s@%s/%s/host/%s/Resources/%s/", - url.QueryEscape(p.config.Username), - url.QueryEscape(p.config.Password), - p.config.Host, - p.config.Datacenter, - p.config.Cluster, - p.config.ResourcePool), + fmt.Sprintf("%s", ovftool_uri), } ui.Message(fmt.Sprintf("Uploading %s to vSphere", vmx)) diff --git a/website/source/docs/post-processors/vsphere.html.markdown b/website/source/docs/post-processors/vsphere.html.markdown index a6f790bc1..ca3f3f54a 100644 --- a/website/source/docs/post-processors/vsphere.html.markdown +++ b/website/source/docs/post-processors/vsphere.html.markdown @@ -35,7 +35,7 @@ Required: endpoint. * `resource_pool` (string) - The resource pool to upload the VM to. - This is _not required_ if `datastore` is specified. + This is _not required_. * `username` (string) - The username to use to authenticate to the vSphere endpoint. From 42000dda6687cfaeb36633a9326d5ebc9ff8b14d Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 22 Jun 2015 12:57:36 -0500 Subject: [PATCH 509/956] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 25f52920d..e171a2f7b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ IMPROVEMENTS: * builder/amazon: Add `force_deregister` option for automatic AMI deregistration [GH-2221] * builder/amazon: Now applies tags to EBS snapshots [GH-2212] + * builder/amazon: Clean up orphaned volumes from Source AMIs [GH-1783] * builder/amazon: Support custom keypairs [GH-1837] * builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829] * builder/digitalocean: User data support [GH-2113] From bf456f35f9867c86d864df184198291a799349e0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 12:02:38 -0700 Subject: [PATCH 510/956] common: download client tests /cc @cbednarski --- common/download_test.go | 124 +++++++++++++++++++++++++- common/test-fixtures/root/another.txt | 1 + common/test-fixtures/root/basic.txt | 1 + 3 files changed, 123 insertions(+), 3 deletions(-) create mode 100644 common/test-fixtures/root/another.txt create mode 100644 common/test-fixtures/root/basic.txt diff --git a/common/download_test.go b/common/download_test.go index 57b4ba7bc..effbf0059 100644 --- a/common/download_test.go +++ b/common/download_test.go @@ -10,7 +10,7 @@ import ( "testing" ) -func TestDownloadClient_VerifyChecksum(t *testing.T) { +func TestDownloadClientVerifyChecksum(t *testing.T) { tf, err := ioutil.TempFile("", "packer") if err != nil { t.Fatalf("tempfile error: %s", err) @@ -43,7 +43,125 @@ func TestDownloadClient_VerifyChecksum(t *testing.T) { } } -func TestDownloadClientUsesDefaultUserAgent(t *testing.T) { +func TestDownloadClient_basic(t *testing.T) { + tf, _ := ioutil.TempFile("", "packer") + tf.Close() + os.Remove(tf.Name()) + + ts := httptest.NewServer(http.FileServer(http.Dir("./test-fixtures/root"))) + defer ts.Close() + + client := NewDownloadClient(&DownloadConfig{ + Url: ts.URL + "/basic.txt", + TargetPath: tf.Name(), + }) + path, err := client.Get() + if err != nil { + t.Fatalf("err: %s", err) + } + + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("err: %s", err) + } + + if string(raw) != "hello\n" { + t.Fatalf("bad: %s", string(raw)) + } +} + +func TestDownloadClient_checksumBad(t *testing.T) { + checksum, err := hex.DecodeString("b2946ac92492d2347c6235b4d2611184") + if err != nil { + t.Fatalf("err: %s", err) + } + + tf, _ := ioutil.TempFile("", "packer") + tf.Close() + os.Remove(tf.Name()) + + ts := httptest.NewServer(http.FileServer(http.Dir("./test-fixtures/root"))) + defer ts.Close() + + client := NewDownloadClient(&DownloadConfig{ + Url: ts.URL + "/basic.txt", + TargetPath: tf.Name(), + Hash: HashForType("md5"), + Checksum: checksum, + }) + if _, err := client.Get(); err == nil { + t.Fatal("should error") + } +} + +func TestDownloadClient_checksumGood(t *testing.T) { + checksum, err := hex.DecodeString("b1946ac92492d2347c6235b4d2611184") + if err != nil { + t.Fatalf("err: %s", err) + } + + tf, _ := ioutil.TempFile("", "packer") + tf.Close() + os.Remove(tf.Name()) + + ts := httptest.NewServer(http.FileServer(http.Dir("./test-fixtures/root"))) + defer ts.Close() + + client := NewDownloadClient(&DownloadConfig{ + Url: ts.URL + "/basic.txt", + TargetPath: tf.Name(), + Hash: HashForType("md5"), + Checksum: checksum, + }) + path, err := client.Get() + if err != nil { + t.Fatalf("err: %s", err) + } + + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("err: %s", err) + } + + if string(raw) != "hello\n" { + t.Fatalf("bad: %s", string(raw)) + } +} + +func TestDownloadClient_checksumNoDownload(t *testing.T) { + checksum, err := hex.DecodeString("3740570a423feec44c2a759225a9fcf9") + if err != nil { + t.Fatalf("err: %s", err) + } + + ts := httptest.NewServer(http.FileServer(http.Dir("./test-fixtures/root"))) + defer ts.Close() + + client := NewDownloadClient(&DownloadConfig{ + Url: ts.URL + "/basic.txt", + TargetPath: "./test-fixtures/root/another.txt", + Hash: HashForType("md5"), + Checksum: checksum, + }) + path, err := client.Get() + if err != nil { + t.Fatalf("err: %s", err) + } + + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("err: %s", err) + } + + // If this says "hello" it means we downloaded it. We faked out + // the downloader above by giving it the checksum for "another", but + // requested the download of "hello" + if string(raw) != "another\n" { + t.Fatalf("bad: %s", string(raw)) + } +} + +func TestDownloadClient_usesDefaultUserAgent(t *testing.T) { tf, err := ioutil.TempFile("", "packer") if err != nil { t.Fatalf("tempfile error: %s", err) @@ -97,7 +215,7 @@ func TestDownloadClientUsesDefaultUserAgent(t *testing.T) { } } -func TestDownloadClientSetsUserAgent(t *testing.T) { +func TestDownloadClient_setsUserAgent(t *testing.T) { tf, err := ioutil.TempFile("", "packer") if err != nil { t.Fatalf("tempfile error: %s", err) diff --git a/common/test-fixtures/root/another.txt b/common/test-fixtures/root/another.txt new file mode 100644 index 000000000..9b24da92a --- /dev/null +++ b/common/test-fixtures/root/another.txt @@ -0,0 +1 @@ +another diff --git a/common/test-fixtures/root/basic.txt b/common/test-fixtures/root/basic.txt new file mode 100644 index 000000000..ce0136250 --- /dev/null +++ b/common/test-fixtures/root/basic.txt @@ -0,0 +1 @@ +hello From 2f530534d2f5c84f81922c040f14a0f6b5aa2b33 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 12:14:35 -0700 Subject: [PATCH 511/956] common/download: resume test --- common/download.go | 53 ++++++++++++++++------------------------- common/download_test.go | 35 +++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 33 deletions(-) diff --git a/common/download.go b/common/download.go index 81102069c..6cd2a7a15 100644 --- a/common/download.go +++ b/common/download.go @@ -99,8 +99,6 @@ func (d *DownloadClient) Cancel() { } func (d *DownloadClient) Get() (string, error) { - var f *os.File - // If we already have the file and it matches, then just return the target path. if verify, _ := d.VerifyChecksum(d.config.TargetPath); verify { log.Println("Initial checksum matched, no download needed.") @@ -115,6 +113,7 @@ func (d *DownloadClient) Get() (string, error) { log.Printf("Parsed URL: %#v", url) // Files when we don't copy the file are special cased. + var f *os.File var finalPath string if url.Scheme == "file" && !d.config.CopyFile { finalPath = url.Path @@ -199,6 +198,15 @@ func (*HTTPDownloader) Cancel() { func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { log.Printf("Starting download: %s", src.String()) + + // Seek to the beginning by default + if _, err := dst.Seek(0, 0); err != nil { + return err + } + + // Make the request. We first make a HEAD request so we can check + // if the server supports range queries. If the server/URL doesn't + // support HEAD requests, we just fall back to GET. req, err := http.NewRequest("HEAD", src.String(), nil) if err != nil { return err @@ -215,41 +223,21 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { } resp, err := httpClient.Do(req) - if err != nil || resp.StatusCode != 200 { - req.Method = "GET" - resp, err = httpClient.Do(req) - if err != nil { - return err - } - } - - if resp.StatusCode != 200 { - log.Printf( - "Non-200 status code: %d. Getting error body.", resp.StatusCode) - if req.Method != "GET" { - req.Method = "GET" - resp, err = httpClient.Do(req) - if err != nil { - return err + if err == nil && (resp.StatusCode >= 200 && resp.StatusCode < 300) { + // If the HEAD request succeeded, then attempt to set the range + // query if we can. + if resp.Header.Get("Accept-Ranges") == "bytes" { + if fi, err := dst.Stat(); err == nil { + if _, err = dst.Seek(0, os.SEEK_END); err == nil { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) + d.progress = uint(fi.Size()) + } } } - errorBody := new(bytes.Buffer) - io.Copy(errorBody, resp.Body) - return fmt.Errorf("HTTP error '%d'! Remote side responded:\n%s", - resp.StatusCode, errorBody.String()) } + // Set the request to GET now, and redo the query to download req.Method = "GET" - d.progress = 0 - - if resp.Header.Get("Accept-Ranges") == "bytes" { - if fi, err := dst.Stat(); err == nil { - if _, err = dst.Seek(0, os.SEEK_END); err == nil { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) - d.progress = uint(fi.Size()) - } - } - } resp, err = httpClient.Do(req) if err != nil { @@ -257,7 +245,6 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { } d.total = uint(resp.ContentLength) - var buffer [4096]byte for { n, err := resp.Body.Read(buffer[:]) diff --git a/common/download_test.go b/common/download_test.go index effbf0059..dc5bd29ed 100644 --- a/common/download_test.go +++ b/common/download_test.go @@ -161,6 +161,41 @@ func TestDownloadClient_checksumNoDownload(t *testing.T) { } } +func TestDownloadClient_resume(t *testing.T) { + tf, _ := ioutil.TempFile("", "packer") + tf.Write([]byte("w")) + tf.Close() + + ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + rw.Header().Set("Accept-Ranges", "bytes") + rw.WriteHeader(204) + return + } + + http.ServeFile(rw, r, "./test-fixtures/root/basic.txt") + })) + defer ts.Close() + + client := NewDownloadClient(&DownloadConfig{ + Url: ts.URL, + TargetPath: tf.Name(), + }) + path, err := client.Get() + if err != nil { + t.Fatalf("err: %s", err) + } + + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("err: %s", err) + } + + if string(raw) != "wello\n" { + t.Fatalf("bad: %s", string(raw)) + } +} + func TestDownloadClient_usesDefaultUserAgent(t *testing.T) { tf, err := ioutil.TempFile("", "packer") if err != nil { From a5ac80661a73b93f71a3d54ac58572dc1c430bcd Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 12:15:03 -0700 Subject: [PATCH 512/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e171a2f7b..1ee7b862d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ FEATURES: IMPROVEMENTS: * core: Interrupt handling for SIGTERM signal as well. [GH-1858] + * core: HTTP downloads support resuming [GH-2106] * builder/*: Add `ssh_handshake_attempts` to configure the number of handshake attempts done before failure [GH-2237] * builder/amazon: Add `force_deregister` option for automatic AMI From 944b4bf46cf3266abcbfdf6da4b33097c232e361 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 12:17:29 -0700 Subject: [PATCH 513/956] common: delete file if checksum fails --- builder/amazon/common/block_device_test.go | 2 -- common/download.go | 9 +++++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index b99a22747..063c480f1 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -46,7 +46,6 @@ func TestBlockDevice(t *testing.T) { DeviceName: aws.String("/dev/sdb"), VirtualName: aws.String(""), EBS: &ec2.EBSBlockDevice{ - Encrypted: aws.Boolean(false), VolumeType: aws.String(""), VolumeSize: aws.Long(8), DeleteOnTermination: aws.Boolean(false), @@ -67,7 +66,6 @@ func TestBlockDevice(t *testing.T) { DeviceName: aws.String("/dev/sdb"), VirtualName: aws.String("ephemeral0"), EBS: &ec2.EBSBlockDevice{ - Encrypted: aws.Boolean(false), VolumeType: aws.String("io1"), VolumeSize: aws.Long(8), DeleteOnTermination: aws.Boolean(true), diff --git a/common/download.go b/common/download.go index 6cd2a7a15..6b346f99d 100644 --- a/common/download.go +++ b/common/download.go @@ -136,10 +136,10 @@ func (d *DownloadClient) Get() (string, error) { if err != nil { return "", err } - defer f.Close() log.Printf("Downloading: %s", url.String()) err = d.downloader.Download(f, url) + f.Close() if err != nil { return "", err } @@ -149,7 +149,12 @@ func (d *DownloadClient) Get() (string, error) { var verify bool verify, err = d.VerifyChecksum(finalPath) if err == nil && !verify { - err = fmt.Errorf("checksums didn't match expected: %s", hex.EncodeToString(d.config.Checksum)) + // Delete the file + os.Remove(finalPath) + + err = fmt.Errorf( + "checksums didn't match expected: %s", + hex.EncodeToString(d.config.Checksum)) } } From 1a68a36fccd50966dd7e56454e5ee7212859db15 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 12:19:13 -0700 Subject: [PATCH 514/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ee7b862d..39ba7962e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ FEATURES: builder. This is useful for provisioners. [GH-2232] * **New config function: `template_dir`**: The directory to the template being built. This should be used for template-relative paths. [GH-54] + * **New provisioner: shell-local**: Runs a local shell script. [GH-770] * **New provisioner: powershell**: Provision Windows machines with PowerShell scripts. [GH-2243] * **New provisioner: windows-shell**: Provision Windows machines with From 5241d8c6d6239f770474251ef85095472efb2471 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 12:24:27 -0700 Subject: [PATCH 515/956] post-processor/*: fix interpolation context --- post-processor/atlas/post-processor.go | 3 ++- post-processor/compress/post-processor.go | 9 +++++---- post-processor/docker-import/post-processor.go | 3 ++- post-processor/docker-push/post-processor.go | 3 ++- post-processor/docker-save/post-processor.go | 3 ++- post-processor/docker-tag/post-processor.go | 3 ++- post-processor/vagrant-cloud/post-processor.go | 3 ++- post-processor/vagrant/post-processor.go | 5 +++-- post-processor/vsphere/post-processor.go | 3 ++- 9 files changed, 22 insertions(+), 13 deletions(-) diff --git a/post-processor/atlas/post-processor.go b/post-processor/atlas/post-processor.go index 2d3e4e39f..edfc1d7c4 100644 --- a/post-processor/atlas/post-processor.go +++ b/post-processor/atlas/post-processor.go @@ -53,7 +53,8 @@ type PostProcessor struct { func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 1a4b97595..8b70bc456 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -31,7 +31,7 @@ type Config struct { Archive string Algorithm string - ctx *interpolate.Context + ctx interpolate.Context } type PostProcessor struct { @@ -52,7 +52,8 @@ var ( func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, @@ -69,7 +70,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { p.config.OutputPath = "packer_{{.BuildName}}_{{.Provider}}" } - if err = interpolate.Validate(p.config.OutputPath, p.config.ctx); err != nil { + if err = interpolate.Validate(p.config.OutputPath, &p.config.ctx); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing target template: %s", err)) } @@ -94,7 +95,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { errs, fmt.Errorf("%s must be set", key)) } - *ptr, err = interpolate.Render(p.config.OutputPath, p.config.ctx) + *ptr, err = interpolate.Render(p.config.OutputPath, &p.config.ctx) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", key, err)) diff --git a/post-processor/docker-import/post-processor.go b/post-processor/docker-import/post-processor.go index cb0e4ec7a..f9a9a18a9 100644 --- a/post-processor/docker-import/post-processor.go +++ b/post-processor/docker-import/post-processor.go @@ -27,7 +27,8 @@ type PostProcessor struct { func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, diff --git a/post-processor/docker-push/post-processor.go b/post-processor/docker-push/post-processor.go index 7fd2e4a32..d56f91382 100644 --- a/post-processor/docker-push/post-processor.go +++ b/post-processor/docker-push/post-processor.go @@ -33,7 +33,8 @@ type PostProcessor struct { func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, diff --git a/post-processor/docker-save/post-processor.go b/post-processor/docker-save/post-processor.go index ab6170802..2e3b36b4e 100644 --- a/post-processor/docker-save/post-processor.go +++ b/post-processor/docker-save/post-processor.go @@ -31,7 +31,8 @@ type PostProcessor struct { func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, diff --git a/post-processor/docker-tag/post-processor.go b/post-processor/docker-tag/post-processor.go index a531f2820..2f52e965d 100644 --- a/post-processor/docker-tag/post-processor.go +++ b/post-processor/docker-tag/post-processor.go @@ -31,7 +31,8 @@ type PostProcessor struct { func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, diff --git a/post-processor/vagrant-cloud/post-processor.go b/post-processor/vagrant-cloud/post-processor.go index ace2a5c71..53d75dac1 100644 --- a/post-processor/vagrant-cloud/post-processor.go +++ b/post-processor/vagrant-cloud/post-processor.go @@ -47,7 +47,8 @@ type PostProcessor struct { func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "box_download_url", diff --git a/post-processor/vagrant/post-processor.go b/post-processor/vagrant/post-processor.go index 63200272b..bc39c27fa 100644 --- a/post-processor/vagrant/post-processor.go +++ b/post-processor/vagrant/post-processor.go @@ -170,8 +170,9 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac func (p *PostProcessor) configureSingle(c *Config, raws ...interface{}) error { var md mapstructure.Metadata err := config.Decode(c, &config.DecodeOpts{ - Metadata: &md, - Interpolate: true, + Metadata: &md, + Interpolate: true, + InterpolateContext: &c.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "output", diff --git a/post-processor/vsphere/post-processor.go b/post-processor/vsphere/post-processor.go index 0b25c7b2c..f13395765 100644 --- a/post-processor/vsphere/post-processor.go +++ b/post-processor/vsphere/post-processor.go @@ -43,7 +43,8 @@ type PostProcessor struct { func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, From 7c8e86c9c5fe7303b1cec16f248b0d6617598c55 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 12:26:54 -0700 Subject: [PATCH 516/956] provisioner/*: fix interpolation context --- provisioner/ansible-local/provisioner.go | 3 ++- provisioner/chef-client/provisioner.go | 3 ++- provisioner/chef-solo/provisioner.go | 3 ++- provisioner/file/provisioner.go | 3 ++- provisioner/powershell/provisioner.go | 3 ++- provisioner/puppet-masterless/provisioner.go | 3 ++- provisioner/puppet-server/provisioner.go | 4 ++-- provisioner/salt-masterless/provisioner.go | 3 ++- provisioner/shell-local/provisioner.go | 3 ++- provisioner/shell/provisioner.go | 3 ++- provisioner/windows-restart/provisioner.go | 3 ++- provisioner/windows-shell/provisioner.go | 3 ++- 12 files changed, 24 insertions(+), 13 deletions(-) diff --git a/provisioner/ansible-local/provisioner.go b/provisioner/ansible-local/provisioner.go index 1d18ffc49..10e8b1390 100644 --- a/provisioner/ansible-local/provisioner.go +++ b/provisioner/ansible-local/provisioner.go @@ -57,7 +57,8 @@ type Provisioner struct { func (p *Provisioner) Prepare(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index 2d42d361d..4d9430347 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -69,7 +69,8 @@ type InstallChefTemplate struct { func (p *Provisioner) Prepare(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "execute_command", diff --git a/provisioner/chef-solo/provisioner.go b/provisioner/chef-solo/provisioner.go index 686dc250e..61f734805 100644 --- a/provisioner/chef-solo/provisioner.go +++ b/provisioner/chef-solo/provisioner.go @@ -73,7 +73,8 @@ type InstallChefTemplate struct { func (p *Provisioner) Prepare(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "execute_command", diff --git a/provisioner/file/provisioner.go b/provisioner/file/provisioner.go index 9bc2a646c..f88641095 100644 --- a/provisioner/file/provisioner.go +++ b/provisioner/file/provisioner.go @@ -32,7 +32,8 @@ type Provisioner struct { func (p *Provisioner) Prepare(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index b31781d88..31ba2b34a 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -99,7 +99,8 @@ type ExecuteCommandTemplate struct { func (p *Provisioner) Prepare(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "execute_command", diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index 8534aab32..177cae23c 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -66,7 +66,8 @@ type ExecuteTemplate struct { func (p *Provisioner) Prepare(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "execute_command", diff --git a/provisioner/puppet-server/provisioner.go b/provisioner/puppet-server/provisioner.go index 24981e759..9f1c8bead 100644 --- a/provisioner/puppet-server/provisioner.go +++ b/provisioner/puppet-server/provisioner.go @@ -44,7 +44,6 @@ type Config struct { // If true, packer will ignore all exit-codes from a puppet run IgnoreExitCodes bool `mapstructure:"ignore_exit_codes"` - } type Provisioner struct { @@ -63,7 +62,8 @@ type ExecuteTemplate struct { func (p *Provisioner) Prepare(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, diff --git a/provisioner/salt-masterless/provisioner.go b/provisioner/salt-masterless/provisioner.go index 9c9ef8b4c..e8f9ed1cc 100644 --- a/provisioner/salt-masterless/provisioner.go +++ b/provisioner/salt-masterless/provisioner.go @@ -44,7 +44,8 @@ type Provisioner struct { func (p *Provisioner) Prepare(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, diff --git a/provisioner/shell-local/provisioner.go b/provisioner/shell-local/provisioner.go index 499be7f1d..bbe423b72 100644 --- a/provisioner/shell-local/provisioner.go +++ b/provisioner/shell-local/provisioner.go @@ -29,7 +29,8 @@ type Provisioner struct { func (p *Provisioner) Prepare(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "execute_command", diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index 2f64759b9..3c32b3fc2 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -73,7 +73,8 @@ type ExecuteCommandTemplate struct { func (p *Provisioner) Prepare(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "execute_command", diff --git a/provisioner/windows-restart/provisioner.go b/provisioner/windows-restart/provisioner.go index 7c1c5ada9..4b6af609e 100644 --- a/provisioner/windows-restart/provisioner.go +++ b/provisioner/windows-restart/provisioner.go @@ -43,7 +43,8 @@ type Provisioner struct { func (p *Provisioner) Prepare(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "execute_command", diff --git a/provisioner/windows-shell/provisioner.go b/provisioner/windows-shell/provisioner.go index 50c0aaeb1..6e65c8c20 100644 --- a/provisioner/windows-shell/provisioner.go +++ b/provisioner/windows-shell/provisioner.go @@ -76,7 +76,8 @@ type ExecuteCommandTemplate struct { func (p *Provisioner) Prepare(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, + Interpolate: true, + InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "execute_command", From 5db4d7c01f303ef0e83625f3921d326dba35670d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 12:37:52 -0700 Subject: [PATCH 517/956] fmt --- builder/amazon/chroot/step_mount_extra.go | 2 +- post-processor/vsphere/post-processor.go | 24 +++++++++---------- .../salt-masterless/provisioner_test.go | 18 +++++++------- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/builder/amazon/chroot/step_mount_extra.go b/builder/amazon/chroot/step_mount_extra.go index aa63b4b61..b3ef764b5 100644 --- a/builder/amazon/chroot/step_mount_extra.go +++ b/builder/amazon/chroot/step_mount_extra.go @@ -115,7 +115,7 @@ func (s *StepMountExtra) CleanupFunc(state multistep.StateBag) error { } } } - + unmountCommand, err := wrappedCommand(fmt.Sprintf("umount %s", path)) if err != nil { return fmt.Errorf("Error creating unmount command: %s", err) diff --git a/post-processor/vsphere/post-processor.go b/post-processor/vsphere/post-processor.go index f13395765..39cd8c15b 100644 --- a/post-processor/vsphere/post-processor.go +++ b/post-processor/vsphere/post-processor.go @@ -68,13 +68,13 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { // First define all our templatable parameters that are _required_ templates := map[string]*string{ - "cluster": &p.config.Cluster, - "datacenter": &p.config.Datacenter, - "diskmode": &p.config.DiskMode, - "host": &p.config.Host, - "password": &p.config.Password, - "username": &p.config.Username, - "vm_name": &p.config.VMName, + "cluster": &p.config.Cluster, + "datacenter": &p.config.Datacenter, + "diskmode": &p.config.DiskMode, + "host": &p.config.Host, + "password": &p.config.Password, + "username": &p.config.Username, + "vm_name": &p.config.VMName, } for key, ptr := range templates { if *ptr == "" { @@ -108,11 +108,11 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac } ovftool_uri := fmt.Sprintf("vi://%s:%s@%s/%s/host/%s", - url.QueryEscape(p.config.Username), - url.QueryEscape(p.config.Password), - p.config.Host, - p.config.Datacenter, - p.config.Cluster) + url.QueryEscape(p.config.Username), + url.QueryEscape(p.config.Password), + p.config.Host, + p.config.Datacenter, + p.config.Cluster) if p.config.ResourcePool != "" { ovftool_uri += "/Resources/" + p.config.ResourcePool diff --git a/provisioner/salt-masterless/provisioner_test.go b/provisioner/salt-masterless/provisioner_test.go index c15053b21..9c8320769 100644 --- a/provisioner/salt-masterless/provisioner_test.go +++ b/provisioner/salt-masterless/provisioner_test.go @@ -113,18 +113,18 @@ func TestProvisionerSudo(t *testing.T) { t.Fatalf("err: %s", err) } - withSudo := p.sudo("echo hello") - if withSudo != "sudo echo hello" { - t.Fatalf("sudo command not generated correctly") - } + withSudo := p.sudo("echo hello") + if withSudo != "sudo echo hello" { + t.Fatalf("sudo command not generated correctly") + } - config["disable_sudo"] = true + config["disable_sudo"] = true err = p.Prepare(config) if err != nil { t.Fatalf("err: %s", err) } - withoutSudo := p.sudo("echo hello") - if withoutSudo != "echo hello" { - t.Fatalf("sudo-less command not generated correctly") - } + withoutSudo := p.sudo("echo hello") + if withoutSudo != "echo hello" { + t.Fatalf("sudo-less command not generated correctly") + } } From 1aa1399e85af3bda4e64cc6d1393575d8a9ac15e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 12:46:13 -0700 Subject: [PATCH 518/956] builder/parallels: fix compilation issuse --- builder/parallels/common/prlctl_post_config.go | 17 +++-------------- builder/parallels/iso/builder.go | 4 ++-- builder/parallels/pvm/builder.go | 2 +- builder/parallels/pvm/config.go | 2 +- 4 files changed, 7 insertions(+), 18 deletions(-) diff --git a/builder/parallels/common/prlctl_post_config.go b/builder/parallels/common/prlctl_post_config.go index 23c2d5520..2490bc996 100644 --- a/builder/parallels/common/prlctl_post_config.go +++ b/builder/parallels/common/prlctl_post_config.go @@ -1,28 +1,17 @@ package common import ( - "fmt" - "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" ) type PrlctlPostConfig struct { PrlctlPost [][]string `mapstructure:"prlctl_post"` } -func (c *PrlctlPostConfig) Prepare(t *packer.ConfigTemplate) []error { +func (c *PrlctlPostConfig) Prepare(ctx *interpolate.Context) []error { if c.PrlctlPost == nil { c.PrlctlPost = make([][]string, 0) } - errs := make([]error, 0) - for i, args := range c.PrlctlPost { - for j, arg := range args { - if err := t.Validate(arg); err != nil { - errs = append(errs, - fmt.Errorf("Error processing prlctl_post[%d][%d]: %s", i, j, err)) - } - } - } - - return errs + return nil } diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index 0c3bc0384..4a75b0b47 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -79,7 +79,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { errs, b.config.OutputConfig.Prepare(&b.config.ctx, &b.config.PackerConfig)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.PrlctlConfig.Prepare(&b.config.ctx)...) - errs = packer.MultiErrorAppend(errs, b.config.PrlctlPostConfig.Prepare(&b.config.tpl)...) + errs = packer.MultiErrorAppend(errs, b.config.PrlctlPostConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.PrlctlVersionConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.ShutdownConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.SSHConfig.Prepare(&b.config.ctx)...) @@ -270,7 +270,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, ¶llelscommon.StepPrlctl{ Commands: b.config.PrlctlPost, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, } diff --git a/builder/parallels/pvm/builder.go b/builder/parallels/pvm/builder.go index 87a92989a..471b59bef 100644 --- a/builder/parallels/pvm/builder.go +++ b/builder/parallels/pvm/builder.go @@ -103,7 +103,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, ¶llelscommon.StepPrlctl{ Commands: b.config.PrlctlPost, - Tpl: b.config.tpl, + Ctx: b.config.ctx, }, } diff --git a/builder/parallels/pvm/config.go b/builder/parallels/pvm/config.go index 767ed913c..f03584bf2 100644 --- a/builder/parallels/pvm/config.go +++ b/builder/parallels/pvm/config.go @@ -58,7 +58,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { errs = packer.MultiErrorAppend(errs, c.FloppyConfig.Prepare(&c.ctx)...) errs = packer.MultiErrorAppend(errs, c.OutputConfig.Prepare(&c.ctx, &c.PackerConfig)...) errs = packer.MultiErrorAppend(errs, c.PrlctlConfig.Prepare(&c.ctx)...) - errs = packer.MultiErrorAppend(errs, c.PrlctlPostConfig.Prepare(&c.tpl)...) + errs = packer.MultiErrorAppend(errs, c.PrlctlPostConfig.Prepare(&c.ctx)...) errs = packer.MultiErrorAppend(errs, c.PrlctlVersionConfig.Prepare(&c.ctx)...) errs = packer.MultiErrorAppend(errs, c.RunConfig.Prepare(&c.ctx)...) errs = packer.MultiErrorAppend(errs, c.ShutdownConfig.Prepare(&c.ctx)...) From 9bc0dfa3896e70a201e529f015dc735632993612 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 14:50:25 -0700 Subject: [PATCH 519/956] builder/vmware: disable VNC before packaging --- builder/vmware/common/step_clean_vmx.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/builder/vmware/common/step_clean_vmx.go b/builder/vmware/common/step_clean_vmx.go index df7b42763..bf76f5863 100644 --- a/builder/vmware/common/step_clean_vmx.go +++ b/builder/vmware/common/step_clean_vmx.go @@ -55,6 +55,9 @@ func (s StepCleanVMX) Run(state multistep.StateBag) multistep.StepAction { vmxData[ide+"filename"] = "auto detect" } + ui.Message("Disabling VNC server...") + vmxData["remotedisplay.vnc.enabled"] = "FALSE" + // Rewrite the VMX if err := WriteVMX(vmxPath, vmxData); err != nil { state.Put("error", fmt.Errorf("Error writing VMX: %s", err)) From 117579808f420f8893154f44fe7189491208d792 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 14:58:27 -0700 Subject: [PATCH 520/956] common: add the current progress to the total size --- common/download.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/download.go b/common/download.go index 6b346f99d..c895ee1e7 100644 --- a/common/download.go +++ b/common/download.go @@ -249,7 +249,7 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { return err } - d.total = uint(resp.ContentLength) + d.total = d.progress + uint(resp.ContentLength) var buffer [4096]byte for { n, err := resp.Body.Read(buffer[:]) From 0416939c08c5552fad71a70951d8012ddfedd9fd Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 14:59:38 -0700 Subject: [PATCH 521/956] common: always reset progress to 0 for downloads --- common/download.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/common/download.go b/common/download.go index c895ee1e7..16c0724c3 100644 --- a/common/download.go +++ b/common/download.go @@ -209,6 +209,9 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { return err } + // Reset our progress + d.progress = 0 + // Make the request. We first make a HEAD request so we can check // if the server supports range queries. If the server/URL doesn't // support HEAD requests, we just fall back to GET. From b77042dc8570aa4fef99617163ddf05f7c38b0f9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 15:01:22 -0700 Subject: [PATCH 522/956] provisioner/shell-local: fix vet --- provisioner/shell-local/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/shell-local/provisioner.go b/provisioner/shell-local/provisioner.go index bbe423b72..f3c451951 100644 --- a/provisioner/shell-local/provisioner.go +++ b/provisioner/shell-local/provisioner.go @@ -96,7 +96,7 @@ func (p *Provisioner) Provision(ui packer.Ui, _ packer.Communicator) error { } if cmd.ExitStatus != 0 { return fmt.Errorf( - "Erroneous exit code %s while executing command: %s\n\n"+ + "Erroneous exit code %d while executing command: %s\n\n"+ "Please see output above for more information.", cmd.ExitStatus, p.config.Command) From 5852bd7cd77d85128f326a19b98c09ea0ba40504 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 22 Jun 2015 17:56:35 -0700 Subject: [PATCH 523/956] builder/qemu: default acceleration to tcg on Windows [GH-2284] --- builder/qemu/builder.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index 46b9cabd3..4ac22b59b 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -7,6 +7,7 @@ import ( "os" "os/exec" "path/filepath" + "runtime" "strings" "time" @@ -153,7 +154,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } if b.config.Accelerator == "" { - b.config.Accelerator = "kvm" + if runtime.GOOS == "windows" { + b.config.Accelerator = "tcg" + } else { + b.config.Accelerator = "kvm" + } } if b.config.HTTPPortMin == 0 { From f374edc2b8ffdd001754117a9c44654dc0af4f6f Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 22 Jun 2015 17:08:41 -0500 Subject: [PATCH 524/956] builder/amazon-chroot: Fixes for amazon-chroot builder These are needed for chroot builder to work --- builder/amazon/chroot/step_create_volume.go | 2 +- builder/amazon/chroot/step_instance_info.go | 2 +- builder/amazon/chroot/step_register_ami.go | 17 +++++++++++++---- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/builder/amazon/chroot/step_create_volume.go b/builder/amazon/chroot/step_create_volume.go index 2d7205e8d..81486f5e2 100644 --- a/builder/amazon/chroot/step_create_volume.go +++ b/builder/amazon/chroot/step_create_volume.go @@ -29,7 +29,7 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction { log.Printf("Searching for root device of the image (%s)", *image.RootDeviceName) var rootDevice *ec2.BlockDeviceMapping for _, device := range image.BlockDeviceMappings { - if device.DeviceName == image.RootDeviceName { + if *device.DeviceName == *image.RootDeviceName { rootDevice = device break } diff --git a/builder/amazon/chroot/step_instance_info.go b/builder/amazon/chroot/step_instance_info.go index ee8dbb3e6..b77c9e8a1 100644 --- a/builder/amazon/chroot/step_instance_info.go +++ b/builder/amazon/chroot/step_instance_info.go @@ -49,7 +49,7 @@ func (s *StepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - instance := &instancesResp.Reservations[0].Instances[0] + instance := instancesResp.Reservations[0].Instances[0] state.Put("instance", instance) return multistep.ActionContinue diff --git a/builder/amazon/chroot/step_register_ami.go b/builder/amazon/chroot/step_register_ami.go index ee2cf48e4..1d61068cf 100644 --- a/builder/amazon/chroot/step_register_ami.go +++ b/builder/amazon/chroot/step_register_ami.go @@ -24,14 +24,20 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { blockDevices := make([]*ec2.BlockDeviceMapping, len(image.BlockDeviceMappings)) for i, device := range image.BlockDeviceMappings { newDevice := device - if newDevice.DeviceName == image.RootDeviceName { + if *newDevice.DeviceName == *image.RootDeviceName { if newDevice.EBS != nil { - newDevice.EBS.SnapshotID = &snapshotId + newDevice.EBS.SnapshotID = aws.String(snapshotId) } else { - newDevice.EBS = &ec2.EBSBlockDevice{SnapshotID: &snapshotId} + newDevice.EBS = &ec2.EBSBlockDevice{SnapshotID: aws.String(snapshotId)} } } + // assume working from a snapshot, so we unset the Encrypted field if set, + // otherwise AWS API will return InvalidParameter + if newDevice.EBS.Encrypted != nil { + newDevice.EBS.Encrypted = nil + } + blockDevices[i] = newDevice } @@ -82,7 +88,10 @@ func buildRegisterOpts(config *Config, image *ec2.Image, blockDevices []*ec2.Blo Architecture: image.Architecture, RootDeviceName: image.RootDeviceName, BlockDeviceMappings: blockDevices, - VirtualizationType: &config.AMIVirtType, + } + + if config.AMIVirtType != "" { + registerOpts.VirtualizationType = aws.String(config.AMIVirtType) } if config.AMIVirtType != "hvm" { From 9f8b0041f0eee4c1c3d638a20ed235f8a7fbd4b5 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 22 Jun 2015 22:08:31 -0500 Subject: [PATCH 525/956] builder/amazon-chroot: use source image virtualization type, unless specified --- builder/amazon/chroot/step_register_ami.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builder/amazon/chroot/step_register_ami.go b/builder/amazon/chroot/step_register_ami.go index 1d61068cf..0074cfbbe 100644 --- a/builder/amazon/chroot/step_register_ami.go +++ b/builder/amazon/chroot/step_register_ami.go @@ -88,6 +88,7 @@ func buildRegisterOpts(config *Config, image *ec2.Image, blockDevices []*ec2.Blo Architecture: image.Architecture, RootDeviceName: image.RootDeviceName, BlockDeviceMappings: blockDevices, + VirtualizationType: image.VirtualizationType, } if config.AMIVirtType != "" { From 036c9ddf7c88e9b7acc9daf6e39109483d83ec85 Mon Sep 17 00:00:00 2001 From: Vasiliy Tolstov Date: Tue, 23 Jun 2015 12:27:27 +0300 Subject: [PATCH 526/956] fix file download provisioner when file used with download direction we don't need to check source on builder because it on machine. Signed-off-by: Vasiliy Tolstov --- provisioner/file/provisioner.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/provisioner/file/provisioner.go b/provisioner/file/provisioner.go index f88641095..19cebe263 100644 --- a/provisioner/file/provisioner.go +++ b/provisioner/file/provisioner.go @@ -47,10 +47,6 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } var errs *packer.MultiError - if _, err := os.Stat(p.config.Source); err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Bad source '%s': %s", p.config.Source, err)) - } if p.config.Direction != "download" && p.config.Direction != "upload" { errs = packer.MultiErrorAppend(errs, From 2b36bcc502471bda23fd69beb2cf4c3a6f759f7e Mon Sep 17 00:00:00 2001 From: Vasiliy Tolstov Date: Tue, 23 Jun 2015 14:28:03 +0300 Subject: [PATCH 527/956] allow import qemu builded artifacts Signed-off-by: Vasiliy Tolstov --- post-processor/docker-import/post-processor.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/post-processor/docker-import/post-processor.go b/post-processor/docker-import/post-processor.go index f9a9a18a9..8074d1a02 100644 --- a/post-processor/docker-import/post-processor.go +++ b/post-processor/docker-import/post-processor.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/mitchellh/packer/builder/docker" + "github.com/mitchellh/packer/builder/qemu" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" @@ -42,9 +43,12 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { - if artifact.BuilderId() != docker.BuilderId { + switch artifact.BuilderId() { + case docker.BuilderId, qemu.BuilderId: + break + default: err := fmt.Errorf( - "Unknown artifact type: %s\nCan only import from Docker builder artifacts.", + "Unknown artifact type: %s\nCan only import from Docker, Qemu builder artifacts.", artifact.BuilderId()) return nil, false, err } From 8a9a59c3ca49c24b19f3b3a8f88f367a24e9660b Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 23 Jun 2015 09:11:29 -0500 Subject: [PATCH 528/956] check if newDevice.EBS is nil --- builder/amazon/chroot/step_register_ami.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/chroot/step_register_ami.go b/builder/amazon/chroot/step_register_ami.go index 0074cfbbe..09df1e13d 100644 --- a/builder/amazon/chroot/step_register_ami.go +++ b/builder/amazon/chroot/step_register_ami.go @@ -34,7 +34,7 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { // assume working from a snapshot, so we unset the Encrypted field if set, // otherwise AWS API will return InvalidParameter - if newDevice.EBS.Encrypted != nil { + if newDevice.EBS != nil && newDevice.EBS.Encrypted != nil { newDevice.EBS.Encrypted = nil } From 5d2ea088a2f2de56224cf400784b2fb928d35cfb Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 23 Jun 2015 10:35:59 -0500 Subject: [PATCH 529/956] builder/amazon-chroot: add root_volume_size to resize chroot root volume --- builder/amazon/chroot/builder.go | 9 +++++++-- builder/amazon/chroot/step_create_volume.go | 13 ++++++++++--- builder/amazon/chroot/step_register_ami.go | 8 +++++++- 3 files changed, 24 insertions(+), 6 deletions(-) diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index a5ac070d4..c5449a60a 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -34,6 +34,7 @@ type Config struct { DevicePath string `mapstructure:"device_path"` MountPath string `mapstructure:"mount_path"` SourceAmi string `mapstructure:"source_ami"` + RootVolumeSize int64 `mapstructure:"root_volume_size"` ctx interpolate.Context } @@ -159,7 +160,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &StepCheckRootDevice{}, &StepFlock{}, &StepPrepareDevice{}, - &StepCreateVolume{}, + &StepCreateVolume{ + RootVolumeSize: b.config.RootVolumeSize, + }, &StepAttachVolume{}, &StepEarlyUnflock{}, &StepMountDevice{}, @@ -172,7 +175,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ForceDeregister: b.config.AMIForceDeregister, AMIName: b.config.AMIName, }, - &StepRegisterAMI{}, + &StepRegisterAMI{ + RootVolumeSize: b.config.RootVolumeSize, + }, &awscommon.StepAMIRegionCopy{ AccessConfig: &b.config.AccessConfig, Regions: b.config.AMIRegions, diff --git a/builder/amazon/chroot/step_create_volume.go b/builder/amazon/chroot/step_create_volume.go index 81486f5e2..40925483a 100644 --- a/builder/amazon/chroot/step_create_volume.go +++ b/builder/amazon/chroot/step_create_volume.go @@ -4,6 +4,8 @@ import ( "fmt" "log" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" @@ -16,7 +18,8 @@ import ( // Produces: // volume_id string - The ID of the created volume type StepCreateVolume struct { - volumeId string + volumeId string + RootVolumeSize int64 } func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction { @@ -43,14 +46,18 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction { } ui.Say("Creating the root volume...") + vs := *rootDevice.EBS.VolumeSize + if s.RootVolumeSize > *rootDevice.EBS.VolumeSize { + vs = s.RootVolumeSize + } createVolume := &ec2.CreateVolumeInput{ AvailabilityZone: instance.Placement.AvailabilityZone, - Size: rootDevice.EBS.VolumeSize, + Size: aws.Long(vs), SnapshotID: rootDevice.EBS.SnapshotID, VolumeType: rootDevice.EBS.VolumeType, IOPS: rootDevice.EBS.IOPS, } - log.Printf("Create args: %#v", createVolume) + log.Printf("Create args: %s", awsutil.StringValue(createVolume)) createVolumeResp, err := ec2conn.CreateVolume(createVolume) if err != nil { diff --git a/builder/amazon/chroot/step_register_ami.go b/builder/amazon/chroot/step_register_ami.go index 09df1e13d..5314ef0a1 100644 --- a/builder/amazon/chroot/step_register_ami.go +++ b/builder/amazon/chroot/step_register_ami.go @@ -11,7 +11,9 @@ import ( ) // StepRegisterAMI creates the AMI. -type StepRegisterAMI struct{} +type StepRegisterAMI struct { + RootVolumeSize int64 +} func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { config := state.Get("config").(*Config) @@ -30,6 +32,10 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { } else { newDevice.EBS = &ec2.EBSBlockDevice{SnapshotID: aws.String(snapshotId)} } + + if s.RootVolumeSize > *newDevice.EBS.VolumeSize { + newDevice.EBS.VolumeSize = aws.Long(s.RootVolumeSize) + } } // assume working from a snapshot, so we unset the Encrypted field if set, From d429b75fccf5ea1332d0536172930f83ba10eafe Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 23 Jun 2015 08:39:57 -0700 Subject: [PATCH 530/956] post-processor/vagrant: fix interpolation --- post-processor/vagrant/post-processor.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/post-processor/vagrant/post-processor.go b/post-processor/vagrant/post-processor.go index bc39c27fa..dd0a4fda2 100644 --- a/post-processor/vagrant/post-processor.go +++ b/post-processor/vagrant/post-processor.go @@ -76,13 +76,12 @@ func (p *PostProcessor) PostProcessProvider(name string, provider Provider, ui p ui.Say(fmt.Sprintf("Creating Vagrant box for '%s' provider", name)) - outputPath, err := interpolate.Render(config.OutputPath, &interpolate.Context{ - Data: &outputPathTemplate{ - ArtifactId: artifact.Id(), - BuildName: config.PackerBuildName, - Provider: name, - }, - }) + config.ctx.Data = &outputPathTemplate{ + ArtifactId: artifact.Id(), + BuildName: config.PackerBuildName, + Provider: name, + } + outputPath, err := interpolate.Render(config.OutputPath, &config.ctx) if err != nil { return nil, false, err } From 44dc977cb8842a21f9cb3c7b62ae2ee03c34ad72 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 23 Jun 2015 10:52:42 -0500 Subject: [PATCH 531/956] document root_volume_resize for amazon-chroot --- website/source/docs/builders/amazon-chroot.html.markdown | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index 637153dab..60b6eb008 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -133,6 +133,9 @@ AMI if one with the same name already exists. Default `false`. template where the `.Device` variable is replaced with the name of the device where the volume is attached. +* `root_volume_size` (integer) – The size of the root volume for the chroot +environment, and the resulting AMI + * `tags` (object of key/value strings) - Tags applied to the AMI. ## Basic Example From 29cef0eae41f33e3d3b40a54b533768aee1c99be Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 23 Jun 2015 11:26:13 -0500 Subject: [PATCH 532/956] builder/amazon-chroot: add mount_options configuration option --- builder/amazon/chroot/builder.go | 5 ++++- builder/amazon/chroot/step_mount_device.go | 13 +++++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index c5449a60a..636de8df7 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -35,6 +35,7 @@ type Config struct { MountPath string `mapstructure:"mount_path"` SourceAmi string `mapstructure:"source_ami"` RootVolumeSize int64 `mapstructure:"root_volume_size"` + MountOptions []string `mapstructure:"mount_options"` ctx interpolate.Context } @@ -165,7 +166,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, &StepAttachVolume{}, &StepEarlyUnflock{}, - &StepMountDevice{}, + &StepMountDevice{ + MountOptions: b.config.MountOptions, + }, &StepMountExtra{}, &StepCopyFiles{}, &StepChrootProvision{}, diff --git a/builder/amazon/chroot/step_mount_device.go b/builder/amazon/chroot/step_mount_device.go index cf10535df..8f7d27485 100644 --- a/builder/amazon/chroot/step_mount_device.go +++ b/builder/amazon/chroot/step_mount_device.go @@ -6,6 +6,7 @@ import ( "log" "os" "path/filepath" + "strings" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" @@ -23,7 +24,8 @@ type mountPathData struct { // mount_path string - The location where the volume was mounted. // mount_device_cleanup CleanupFunc - To perform early cleanup type StepMountDevice struct { - mountPath string + mountPath string + MountOptions []string } func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction { @@ -70,8 +72,15 @@ func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction { ui.Say("Mounting the root device...") stderr := new(bytes.Buffer) + + // build mount options from mount_options config, usefull for nouuid options + // or other specific device type settings for mount + opts := "" + if len(s.MountOptions) > 0 { + opts = "-o " + strings.Join(s.MountOptions, " -o ") + } mountCommand, err := wrappedCommand( - fmt.Sprintf("mount %s %s", deviceMount, mountPath)) + fmt.Sprintf("mount %s %s %s", opts, deviceMount, mountPath)) if err != nil { err := fmt.Errorf("Error creating mount command: %s", err) state.Put("error", err) From 24106de3780ddb1dddf8b4a76809460329dcde54 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 23 Jun 2015 11:31:43 -0500 Subject: [PATCH 533/956] builder/amazon-chroot: document mount_options config --- website/source/docs/builders/amazon-chroot.html.markdown | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index 60b6eb008..e0553ef7a 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -133,6 +133,12 @@ AMI if one with the same name already exists. Default `false`. template where the `.Device` variable is replaced with the name of the device where the volume is attached. +* `mount_options` (array of strings) – Options to supply the `mount` command +when mounting devices. Each option will be prefixed with `-o ` and supplied to +the `mount` command ran by Packer. Because this command is ran in a shell, user +discrestion is advised. See [this manual page for the mount command][1] for valid +file system specific options + * `root_volume_size` (integer) – The size of the root volume for the chroot environment, and the resulting AMI @@ -225,3 +231,6 @@ prevent packages installed by your provisioners from starting services: ] } ``` + + +[1]: http://linuxcommand.org/man_pages/mount8.html From 3a54e6899ded7f6db6ff5bce27f44e2f472d5cf3 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 23 Jun 2015 11:34:42 -0500 Subject: [PATCH 534/956] code cleanup --- builder/amazon/chroot/step_mount_device.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/builder/amazon/chroot/step_mount_device.go b/builder/amazon/chroot/step_mount_device.go index 8f7d27485..cb022e36c 100644 --- a/builder/amazon/chroot/step_mount_device.go +++ b/builder/amazon/chroot/step_mount_device.go @@ -24,8 +24,9 @@ type mountPathData struct { // mount_path string - The location where the volume was mounted. // mount_device_cleanup CleanupFunc - To perform early cleanup type StepMountDevice struct { - mountPath string MountOptions []string + + mountPath string } func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction { From 230691bb2fe83c4a1f60cc71b2cbf8a0ba298b3f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 23 Jun 2015 09:34:47 -0700 Subject: [PATCH 535/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 39ba7962e..af2658bee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,6 +71,7 @@ IMPROVEMENTS: * builder/openstack: Machine will be stopped prior to imaging if the cluster supports the `startstop` extension. [GH-2223] * builder/openstack: Support for user data [GH-2224] + * builder/qemu: Default accelerator to "tcg" on Windows [GH-2291] * builder/virtualbox: Added option: `ssh_skip_nat_mapping` to skip the automatic port forward for SSH and to use the guest port directly. [GH-1078] * builder/virtualbox: Added SCSI support From c8d451dda230c77648b9b866c343e9f3ebc47013 Mon Sep 17 00:00:00 2001 From: Clint Date: Tue, 23 Jun 2015 12:11:38 -0500 Subject: [PATCH 536/956] Update CHANGELOG.md --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index af2658bee..b12f1d4d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,10 @@ IMPROVEMENTS: * builder/amazon: Now applies tags to EBS snapshots [GH-2212] * builder/amazon: Clean up orphaned volumes from Source AMIs [GH-1783] * builder/amazon: Support custom keypairs [GH-1837] + * builder/amazon-chroot: Can now resize the root volume of the resulting + AMI with the `root_volume_size` option [GH-2289] + * builder/amazon-chroot: Add `mount_options` configuration option for providing + options to the `mount` command [GH-2296] * builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829] * builder/digitalocean: User data support [GH-2113] * builder/googlecompute: Option to use internal IP for connections. [GH-2152] From 88e689faa22605759ee91c22bad9a5f463fcf78a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 23 Jun 2015 10:59:34 -0700 Subject: [PATCH 537/956] v0.8.0 --- CHANGELOG.md | 4 ++-- version.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b12f1d4d7..e323fe818 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.8.0 (unreleased) +## 0.8.0 (June 23, 2015) BACKWARDS INCOMPATIBILITIES: @@ -58,7 +58,7 @@ IMPROVEMENTS: * builder/amazon: Now applies tags to EBS snapshots [GH-2212] * builder/amazon: Clean up orphaned volumes from Source AMIs [GH-1783] * builder/amazon: Support custom keypairs [GH-1837] - * builder/amazon-chroot: Can now resize the root volume of the resulting + * builder/amazon-chroot: Can now resize the root volume of the resulting AMI with the `root_volume_size` option [GH-2289] * builder/amazon-chroot: Add `mount_options` configuration option for providing options to the `mount` command [GH-2296] diff --git a/version.go b/version.go index dbfc8da12..cfe8f5e6b 100644 --- a/version.go +++ b/version.go @@ -9,4 +9,4 @@ const Version = "0.8.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" From b06158aeeabfa9207f471cd62a34605c6cb9a4a8 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 23 Jun 2015 14:01:02 -0700 Subject: [PATCH 538/956] website: update homepage copy --- website/Gemfile.lock | 3 --- website/source/index.html.erb | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/website/Gemfile.lock b/website/Gemfile.lock index b49383f77..625e13326 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -175,6 +175,3 @@ PLATFORMS DEPENDENCIES middleman-hashicorp! - -BUNDLED WITH - 1.10.2 diff --git a/website/source/index.html.erb b/website/source/index.html.erb index 6c6ddc5dd..6d38bb645 100644 --- a/website/source/index.html.erb +++ b/website/source/index.html.erb @@ -10,7 +10,7 @@ description: |-

      - Packer is a tool for creating identical machine images for multiple platforms from a single source configuration. + Packer is a tool for creating machine and container images for multiple platforms from a single source configuration.

      @@ -58,7 +58,7 @@ description: |-

      Works Great With

      Out of the box Packer comes with support to build images for - Amazon EC2, DigitalOcean, Google Compute Engine, QEMU, + Amazon EC2, DigitalOcean, Docker, Google Compute Engine, QEMU, VirtualBox, VMware, and more. Support for more platforms is on the way, and anyone can add new platforms via plugins. From 0ed15e039af45934935b7a638b03cb53138cff2e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 23 Jun 2015 14:02:29 -0700 Subject: [PATCH 539/956] website: fix the abnormally large image sizes --- website/source/assets/stylesheets/_components.scss | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/website/source/assets/stylesheets/_components.scss b/website/source/assets/stylesheets/_components.scss index c17c9dc1b..72af67f90 100644 --- a/website/source/assets/stylesheets/_components.scss +++ b/website/source/assets/stylesheets/_components.scss @@ -321,6 +321,10 @@ header .header { .person { margin-bottom: 30px; + img { + width: 125px; + } + h3 { text-transform: none; } From 4244a6ce8de66ac81d9b68887b0f342c76f419b1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 23 Jun 2015 14:39:29 -0700 Subject: [PATCH 540/956] website: communicator section --- .../docs/templates/builders.html.markdown | 12 ++ .../docs/templates/communicator.html.md | 103 ++++++++++++++++++ website/source/layouts/docs.erb | 1 + 3 files changed, 116 insertions(+) create mode 100644 website/source/docs/templates/communicator.html.md diff --git a/website/source/docs/templates/builders.html.markdown b/website/source/docs/templates/builders.html.markdown index 0226d30d5..2afb0a95c 100644 --- a/website/source/docs/templates/builders.html.markdown +++ b/website/source/docs/templates/builders.html.markdown @@ -61,3 +61,15 @@ you can specify a custom name using the `name` key within the builder definition This is particularly useful if you have multiple builds defined that use the same underlying builder. In this case, you must specify a name for at least one of them since the names must be unique. + +## Communicators + +Every build is associated with a single +[communicator](/docs/templates/communicator.html). Communicators are +used to establish a connection for provisioning a remote machine (such +as an AWS instance or local virtual machine). + +All the examples for the various builders show some communicator (usually +SSH), but the communicators are highly customizable so we recommend +reading the +[communicator documentation](/docs/templates/communicator.html). diff --git a/website/source/docs/templates/communicator.html.md b/website/source/docs/templates/communicator.html.md new file mode 100644 index 000000000..fbbe79343 --- /dev/null +++ b/website/source/docs/templates/communicator.html.md @@ -0,0 +1,103 @@ +--- +layout: "docs" +page_title: "Templates: Communicators" +description: |- + Communicators are the mechanism Packer uses to upload files, execute scripts, etc. with the machine being created. +--- + +# Templates: Communicators + +Communicators are the mechanism Packer uses to upload files, execute +scripts, etc. with the machine being created. + +Communicators are configured within the [builder](/docs/templates/builders.html) +section. Packer currently supports three kinds of communicators: + + * `none` - No communicator will be used. If this is set, most provisioners + also can't be used. + + * `ssh` - An SSH connection will be established to the machine. This is + usually the default. + + * `winrm` - A WinRM connection will be established. + +In addition to the above, some builders have custom communicators they can +use. For example, the Docker builder has a "docker" communicator that uses +`docker exec` and `docker cp` to execute scripts and copy files. + +## Using a Communicator + +By default, the SSH communicator is usually used. Additional configuration +may not even be necesssary, since some builders such as Amazon automatically +configure everything. + +However, to specify a communicator, you set the `communicator` key within +a build. Multiple builds can have different communicators. Example: + +```javascript +{ + "builders": [{ + "type": "amazon-ebs", + "communicator": "ssh" + }] +} +``` + +After specifying the `communicator`, you can specify a number of other +configuration parameters for that communicator. These are documented below. + +## SSH Communicator + +The SSH communicator has the following options: + + * `ssh_host` (string) - The address to SSH to. This usually is automatically + configured by the builder. + + * `ssh_port` (int) - The port to connect to SSH. This defaults to 22. + + * `ssh_username` (string) - The username to connect to SSH with. + + * `ssh_password` (string) - A plaintext password to use to authenticate + with SSH. + + * `ssh_private_key_file` (string) - Path to a PEM encoded private key + file to use to authentiate with SSH. + + * `ssh_pty` (bool) - If true, a PTY will be requested for the SSH connection. + This defaults to false. + + * `ssh_timeout` (string) - The time to wait for SSH to become available. + Packer uses this to determine when the machine has booted so this is usually + quite long. Example value: "10m" + + * `ssh_handshake_attempts` (int) - The number of handshakes to attempt with + SSH once it can connect. This defaults to 10. + + * `ssh_bastion_host` (string) - A bastion host to use for the actual + SSH connection. + + * `ssh_bastion_port` (int) - The port of the bastion host. Defaults to 22. + + * `ssh_bastion_username` (string) - The username to connect to the bastion host. + + * `ssh_bastion_password` (string) - The password to use to authenticate + with the bastion host. + + * `ssh_bastion_private_key_file` (string) - A private key file to use + to authenticate with the bastion host. + +## WinRM Communicator + +The WinRM communicator has the following options. + + * `winrm_host` (string) - The address for WinRM to connect to. + + * `winrm_port` (int) - The WinRM port to connect to. This defaults to 5985. + + * `winrm_username` (string) - The username to use to connect to WinRM. + + * `winrm_password` (string) - The password to use to connect to WinRM. + + * `winrm_timeout` (string) - The amount of time to wait for WinRM to + become available. This defaults to "30m" since setting up a Windows + machine generally takes a long time. diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index b4101422c..d0c331b1f 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -25,6 +25,7 @@

    • Provisioners
    • Post-Processors
    • Push
    • +
    • Communicators
    • Configuration Templates
    • User Variables
    • Veewee-to-Packer
    • From 3cc90768a2509a2046aaba1b646fa6d3c814048e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 23 Jun 2015 14:44:57 -0700 Subject: [PATCH 541/956] website: update docs to link to communicator --- .../docs/builders/amazon-chroot.html.markdown | 4 +++ .../docs/builders/amazon-ebs.html.markdown | 19 +++++--------- .../builders/amazon-instance.html.markdown | 15 +++-------- .../docs/builders/digitalocean.html.markdown | 14 +++------- .../source/docs/builders/docker.html.markdown | 4 +++ .../docs/builders/googlecompute.markdown | 15 +++++------ .../source/docs/builders/null.html.markdown | 21 ++------------- .../docs/builders/openstack.html.markdown | 14 +++------- .../docs/builders/parallels-iso.html.markdown | 19 +++----------- .../docs/builders/parallels-pvm.html.markdown | 19 +++----------- .../source/docs/builders/qemu.html.markdown | 21 +++------------ .../builders/virtualbox-iso.html.markdown | 19 +++----------- .../builders/virtualbox-ovf.html.markdown | 19 +++----------- .../docs/builders/vmware-iso.html.markdown | 26 +++---------------- .../docs/builders/vmware-vmx.html.markdown | 23 +++------------- 15 files changed, 62 insertions(+), 190 deletions(-) diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index e0553ef7a..453811c8a 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -51,6 +51,10 @@ There are many configuration options available for the builder. They are segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `access_key` (string) - The access key used to communicate with AWS. diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index b7f16eef9..596f8d230 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -31,6 +31,10 @@ There are many configuration options available for the builder. They are segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `access_key` (string) - The access key used to communicate with AWS. @@ -154,20 +158,9 @@ AMI if one with the same name already exists. Default `false`. generate a temporary keypair. `ssh_private_key_file` must be specified with this. -* `ssh_port` (integer) - The port that SSH will be available on. This defaults - to port 22. - -* `ssh_private_key_file` (string) - Use this ssh private key file instead of - a generated ssh key pair for connecting to the instance. This key file must - already exist on the `source_ami` - * `ssh_private_ip` (bool) - If true, then SSH will always use the private IP if available. -* `ssh_timeout` (string) - The time to wait for SSH to become available - before timing out. The format of this value is a duration such as "5s" - or "5m". The default SSH timeout is "5m", or five minutes. - * `subnet_id` (string) - If using VPC, the ID of the subnet, such as "subnet-12345def", where Packer will launch the EC2 instance. This field is required if you are using an non-default VPC. @@ -279,11 +272,11 @@ Here is an example using the optional AMI tags. This will add the tags ``` -> **Note:** Packer uses pre-built AMIs as the source for building images. -These source AMIs may include volumes that are not flagged to be destroyed on +These source AMIs may include volumes that are not flagged to be destroyed on termiation of the instance building the new image. Packer will attempt to clean up all residual volumes that are not designated by the user to remain after termination. If you need to preserve those source volumes, you can overwrite the -termination setting by specifying `delete_on_termination=false` in the +termination setting by specifying `delete_on_termination=false` in the `launch_device_mappings` block for the device. [1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index bacb5ee58..163a93584 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -36,6 +36,10 @@ There are many configuration options available for the builder. They are segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `access_key` (string) - The access key used to communicate with AWS. @@ -193,20 +197,9 @@ AMI if one with the same name already exists. Default `false`. generate a temporary keypair. `ssh_private_key_file` must be specified with this. -* `ssh_port` (integer) - The port that SSH will be available on. This defaults - to port 22. - -* `ssh_private_key_file` (string) - Use this ssh private key file instead of - a generated ssh key pair for connecting to the instance. This key file must - already exist on the `source_ami` - * `ssh_private_ip` (bool) - If true, then SSH will always use the private IP if available. -* `ssh_timeout` (string) - The time to wait for SSH to become available - before timing out. The format of this value is a duration such as "5s" - or "5m". The default SSH timeout is "5m", or five minutes. - * `subnet_id` (string) - If using VPC, the ID of the subnet, such as "subnet-12345def", where Packer will launch the EC2 instance. This field is required if you are using an non-default VPC. diff --git a/website/source/docs/builders/digitalocean.html.markdown b/website/source/docs/builders/digitalocean.html.markdown index 829424e3d..c9ef3b315 100644 --- a/website/source/docs/builders/digitalocean.html.markdown +++ b/website/source/docs/builders/digitalocean.html.markdown @@ -24,6 +24,10 @@ There are many configuration options available for the builder. They are segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `api_token` (string) - The client TOKEN to use to access your account. @@ -53,16 +57,6 @@ each category, the available configuration keys are alphabetized. To help make this unique, use a function like `timestamp` (see [configuration templates](/docs/templates/configuration-templates.html) for more info) -* `ssh_port` (integer) - The port that SSH will be available on. Defaults to port - 22. - -* `ssh_timeout` (string) - The time to wait for SSH to become available - before timing out. The format of this value is a duration such as "5s" - or "5m". The default SSH timeout is "1m". - -* `ssh_username` (string) - The username to use in order to communicate - over SSH to the running droplet. Default is "root". - * `state_timeout` (string) - The time to wait, as a duration string, for a droplet to enter a desired state (such as "active") before timing out. The default state timeout is "6m". diff --git a/website/source/docs/builders/docker.html.markdown b/website/source/docs/builders/docker.html.markdown index 78ca90a56..c760742ee 100644 --- a/website/source/docs/builders/docker.html.markdown +++ b/website/source/docs/builders/docker.html.markdown @@ -62,6 +62,10 @@ Configuration options are organized below into two categories: required and optional. Within each category, the available options are alphabetized and described. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `commit` (boolean) - If true, the container will be committed to an diff --git a/website/source/docs/builders/googlecompute.markdown b/website/source/docs/builders/googlecompute.markdown index 097e69268..a572a0371 100644 --- a/website/source/docs/builders/googlecompute.markdown +++ b/website/source/docs/builders/googlecompute.markdown @@ -15,7 +15,7 @@ Compute Engine doesn't allow the creation of images from scratch. ## Authentication -Authenticating with Google Cloud services requires at most one JSON file, +Authenticating with Google Cloud services requires at most one JSON file, called the _account file_. The _account file_ is **not** required if you are running the `googlecompute` Packer builder from a GCE instance with a properly-configured [Compute Engine Service Account](https://cloud.google.com/compute/docs/authentication). @@ -23,7 +23,7 @@ the `googlecompute` Packer builder from a GCE instance with a properly-configure ### Running With a Compute Engine Service Account If you run the `googlecompute` Packer builder from a GCE instance, you can configure that instance to use a [Compute Engine Service Account](https://cloud.google.com/compute/docs/authentication). This will allow Packer to authenticate -to Google Cloud without having to bake in a separate credential/authentication file. +to Google Cloud without having to bake in a separate credential/authentication file. To create a GCE instance that uses a service account, provide the required scopes when launching the instance. @@ -85,6 +85,10 @@ existing GCE image. The account file is obtained in the previous section. Configuration options are organized below into two categories: required and optional. Within each category, the available options are alphabetized and described. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `project_id` (string) - The project ID that will be used to launch instances @@ -120,13 +124,6 @@ each category, the available options are alphabetized and described. * `network` (string) - The Google Compute network to use for the launched instance. Defaults to `"default"`. -* `ssh_port` (integer) - The SSH port. Defaults to `22`. - -* `ssh_timeout` (string) - The time to wait for SSH to become available. - Defaults to `"1m"`. - -* `ssh_username` (string) - The SSH username. Defaults to `"root"`. - * `state_timeout` (string) - The time to wait for instance state changes. Defaults to `"5m"`. diff --git a/website/source/docs/builders/null.html.markdown b/website/source/docs/builders/null.html.markdown index 1229db359..74c4465fb 100644 --- a/website/source/docs/builders/null.html.markdown +++ b/website/source/docs/builders/null.html.markdown @@ -29,23 +29,6 @@ no provisioners are defined, but it will connect to the specified host via ssh. ## Configuration Reference -Configuration options are organized into two categories: required and -optional. Within each category, the available options are alphabetized and -described. - -### Required: - -* `host` (string) - The hostname or IP address to connect to. - -* `ssh_password` (string) - The password to be used for the ssh connection. - Cannot be combined with ssh_private_key_file. - -* `ssh_private_key_file` (string) - The filename of the ssh private key to be - used for the ssh connection. E.g. /home/user/.ssh/identity_rsa. - -* `ssh_username` (string) - The username to be used for the ssh connection. - -### Optional: - -* `port` (integer) - ssh port to connect to, defaults to 22. +The null builder has no configuration parameters other than the +[communicator](/docs/templates/communicator.html) settings. diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index fcd210dec..fec1a85a6 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -27,6 +27,10 @@ There are many configuration options available for the builder. They are segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `flavor` (string) - The ID, name, or full URL for the desired flavor for the @@ -82,16 +86,6 @@ each category, the available configuration keys are alphabetized. If not specified, Packer will use the environment variable `OS_REGION_NAME`, if set. -* `ssh_port` (integer) - The port that SSH will be available on. Defaults to port - 22. - -* `ssh_timeout` (string) - The time to wait for SSH to become available - before timing out. The format of this value is a duration such as "5s" - or "1m". The default SSH timeout is "5m". - -* `ssh_username` (string) - The username to use in order to communicate - over SSH to the running server. The default is "root". - * `ssh_interface` (string) - The type of interface to connect via SSH. Values useful for Rackspace are "public" or "private", and the default behavior is to connect via whichever is returned first from the OpenStack API. diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index b7e2af527..f0192b301 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -50,6 +50,10 @@ There are many configuration options available for the Parallels builder. They are organized below into two categories: required and optional. Within each category, the available options are alphabetized and described. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO @@ -190,21 +194,6 @@ each category, the available options are alphabetized and described. If it doesn't shut down in this time, it is an error. By default, the timeout is "5m", or five minutes. -* `ssh_key_path` (string) - Path to a private key to use for authenticating - with SSH. By default this is not set (key-based auth won't be used). - The associated public key is expected to already be configured on the - VM being prepared by some other process (kickstart, etc.). - -* `ssh_password` (string) - The password for `ssh_username` to use to - authenticate with SSH. By default this is the empty string. - -* `ssh_port` (integer) - The port that SSH will be listening on in the guest - virtual machine. By default this is 22. - -* `ssh_wait_timeout` (string) - The duration to wait for SSH to become - available. By default this is "20m", or 20 minutes. Note that this should - be quite long since the timer begins as soon as the virtual machine is booted. - * `vm_name` (string) - This is the name of the PVM directory for the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. diff --git a/website/source/docs/builders/parallels-pvm.html.markdown b/website/source/docs/builders/parallels-pvm.html.markdown index fea19fa83..4083a57fd 100644 --- a/website/source/docs/builders/parallels-pvm.html.markdown +++ b/website/source/docs/builders/parallels-pvm.html.markdown @@ -46,6 +46,10 @@ There are many configuration options available for the Parallels builder. They are organized below into two categories: required and optional. Within each category, the available options are alphabetized and described. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `source_path` (string) - The path to a PVM directory that acts as @@ -133,21 +137,6 @@ each category, the available options are alphabetized and described. If it doesn't shut down in this time, it is an error. By default, the timeout is "5m", or five minutes. -* `ssh_key_path` (string) - Path to a private key to use for authenticating - with SSH. By default this is not set (key-based auth won't be used). - The associated public key is expected to already be configured on the - VM being prepared by some other process (kickstart, etc.). - -* `ssh_password` (string) - The password for `ssh_username` to use to - authenticate with SSH. By default this is the empty string. - -* `ssh_port` (integer) - The port that SSH will be listening on in the guest - virtual machine. By default this is 22. - -* `ssh_wait_timeout` (string) - The duration to wait for SSH to become - available. By default this is "20m", or 20 minutes. Note that this should - be quite long since the timer begins as soon as the virtual machine is booted. - * `vm_name` (string) - This is the name of the virtual machine when it is imported as well as the name of the PVM directory when the virtual machine is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index 0e22ccc3b..ae5d4464f 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -73,6 +73,10 @@ There are many configuration options available for the Qemu builder. They are organized below into two categories: required and optional. Within each category, the available options are alphabetized and described. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO @@ -241,23 +245,6 @@ qemu-system-x86 command. The arguments are all printed for review. Packer will choose a randomly available port in this range to use as the host port. -* `ssh_key_path` (string) - Path to a private key to use for authenticating - with SSH. By default this is not set (key-based auth won't be used). - The associated public key is expected to already be configured on the - VM being prepared by some other process (kickstart, etc.). - -* `ssh_password` (string) - The password for `ssh_username` to use to - authenticate with SSH. By default this is the empty string. - -* `ssh_port` (integer) - The port that SSH will be listening on in the guest - virtual machine. By default this is 22. The Qemu builder will map, via - port forward, a port on the host machine to the port listed here so - machines outside the installing VM can access the VM. - -* `ssh_wait_timeout` (string) - The duration to wait for SSH to become - available. By default this is "20m", or 20 minutes. Note that this should - be quite long since the timer begins as soon as the virtual machine is booted. - * `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index 07204a33c..616b0a8e4 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -48,6 +48,10 @@ There are many configuration options available for the VirtualBox builder. They are organized below into two categories: required and optional. Within each category, the available options are alphabetized and described. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO @@ -196,21 +200,6 @@ each category, the available options are alphabetized and described. Packer will choose a randomly available port in this range to use as the host port. -* `ssh_key_path` (string) - Path to a private key to use for authenticating - with SSH. By default this is not set (key-based auth won't be used). - The associated public key is expected to already be configured on the - VM being prepared by some other process (kickstart, etc.). - -* `ssh_password` (string) - The password for `ssh_username` to use to - authenticate with SSH. By default this is the empty string. - -* `ssh_port` (integer) - The port that SSH will be listening on in the guest - virtual machine. By default this is 22. - -* `ssh_wait_timeout` (string) - The duration to wait for SSH to become - available. By default this is "20m", or 20 minutes. Note that this should - be quite long since the timer begins as soon as the virtual machine is booted. - * `ssh_skip_nat_mapping` (bool) - Defaults to false. When enabled, Packer does not setup forwarded port mapping for SSH requests and uses `ssh_port` on the host to communicate to the virtual machine diff --git a/website/source/docs/builders/virtualbox-ovf.html.markdown b/website/source/docs/builders/virtualbox-ovf.html.markdown index 9635d0e60..db5247738 100644 --- a/website/source/docs/builders/virtualbox-ovf.html.markdown +++ b/website/source/docs/builders/virtualbox-ovf.html.markdown @@ -54,6 +54,10 @@ There are many configuration options available for the VirtualBox builder. They are organized below into two categories: required and optional. Within each category, the available options are alphabetized and described. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `source_path` (string) - The path to an OVF or OVA file that acts as @@ -171,21 +175,6 @@ each category, the available options are alphabetized and described. Packer will choose a randomly available port in this range to use as the host port. -* `ssh_key_path` (string) - Path to a private key to use for authenticating - with SSH. By default this is not set (key-based auth won't be used). - The associated public key is expected to already be configured on the - VM being prepared by some other process (kickstart, etc.). - -* `ssh_password` (string) - The password for `ssh_username` to use to - authenticate with SSH. By default this is the empty string. - -* `ssh_port` (integer) - The port that SSH will be listening on in the guest - virtual machine. By default this is 22. - -* `ssh_wait_timeout` (string) - The duration to wait for SSH to become - available. By default this is "20m", or 20 minutes. Note that this should - be quite long since the timer begins as soon as the virtual machine is booted. - * `ssh_skip_nat_mapping` (bool) - Defaults to false. When enabled, Packer does not setup forwarded port mapping for SSH requests and uses `ssh_port` on the host to communicate to the virtual machine diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index 177bfc608..2bb3a402f 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -49,6 +49,10 @@ There are many configuration options available for the VMware builder. They are organized below into two categories: required and optional. Within each category, the available options are alphabetized and described. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO @@ -201,28 +205,6 @@ each category, the available options are alphabetized and described. slightly larger. If you find this to be the case, you can disable compaction using this configuration value. -* `ssh_host` (string) - Hostname or IP address of the host. By default, DHCP - is used to connect to the host and this field is not used. - -* `ssh_key_path` (string) - Path to a private key to use for authenticating - with SSH. By default this is not set (key-based auth won't be used). - The associated public key is expected to already be configured on the - VM being prepared by some other process (kickstart, etc.). - -* `ssh_password` (string) - The password for `ssh_username` to use to - authenticate with SSH. By default this is the empty string. - -* `ssh_port` (integer) - The port that SSH will listen on within the virtual - machine. By default this is 22. - -* `ssh_skip_request_pty` (boolean) - If true, a pty will not be requested as - part of the SSH connection. By default, this is "false", so a pty - _will_ be requested. - -* `ssh_wait_timeout` (string) - The duration to wait for SSH to become - available. By default this is "20m", or 20 minutes. Note that this should - be quite long since the timer begins as soon as the virtual machine is booted. - * `tools_upload_flavor` (string) - The flavor of the VMware Tools ISO to upload into the VM. Valid values are "darwin", "linux", and "windows". By default, this is empty, which means VMware tools won't be uploaded. diff --git a/website/source/docs/builders/vmware-vmx.html.markdown b/website/source/docs/builders/vmware-vmx.html.markdown index bbdd8925c..e28ea3f89 100644 --- a/website/source/docs/builders/vmware-vmx.html.markdown +++ b/website/source/docs/builders/vmware-vmx.html.markdown @@ -44,6 +44,10 @@ There are many configuration options available for the VMware builder. They are organized below into two categories: required and optional. Within each category, the available options are alphabetized and described. +In addition to the options listed here, a +[communicator](/docs/templates/communicator.html) +can be configured for this builder. + ### Required: * `source_path` (string) - Path to the source VMX file to clone. @@ -126,25 +130,6 @@ each category, the available options are alphabetized and described. slightly larger. If you find this to be the case, you can disable compaction using this configuration value. -* `ssh_key_path` (string) - Path to a private key to use for authenticating - with SSH. By default this is not set (key-based auth won't be used). - The associated public key is expected to already be configured on the - VM being prepared by some other process (kickstart, etc.). - -* `ssh_password` (string) - The password for `ssh_username` to use to - authenticate with SSH. By default this is the empty string. - -* `ssh_port` (integer) - The port that SSH will listen on within the virtual - machine. By default this is 22. - -* `ssh_skip_request_pty` (boolean) - If true, a pty will not be requested as - part of the SSH connection. By default, this is "false", so a pty - _will_ be requested. - -* `ssh_wait_timeout` (string) - The duration to wait for SSH to become - available. By default this is "20m", or 20 minutes. Note that this should - be quite long since the timer begins as soon as the virtual machine is booted. - * `vm_name` (string) - This is the name of the VMX file for the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. From c0bc74990beddd2efafa217d2e2b45a163f11f94 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 23 Jun 2015 14:49:36 -0700 Subject: [PATCH 542/956] website: note SSH forwarding --- website/source/docs/templates/communicator.html.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/website/source/docs/templates/communicator.html.md b/website/source/docs/templates/communicator.html.md index fbbe79343..438983c98 100644 --- a/website/source/docs/templates/communicator.html.md +++ b/website/source/docs/templates/communicator.html.md @@ -48,6 +48,10 @@ configuration parameters for that communicator. These are documented below. ## SSH Communicator +The SSH communicator connects to the host via SSH. If you have an SSH +agent enabled on the machine running Packer, it will automatically forward +the SSH agent to the remote host. + The SSH communicator has the following options: * `ssh_host` (string) - The address to SSH to. This usually is automatically From 32f469dc9a671ec4dbaae2890dabb1b1410f0b5b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 23 Jun 2015 14:49:56 -0700 Subject: [PATCH 543/956] up version fo dev --- CHANGELOG.md | 4 ++++ version.go | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e323fe818..cb82d81a1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.8.1 (unreleased) + + + ## 0.8.0 (June 23, 2015) BACKWARDS INCOMPATIBILITIES: diff --git a/version.go b/version.go index cfe8f5e6b..e4b31afba 100644 --- a/version.go +++ b/version.go @@ -4,9 +4,9 @@ package main var GitCommit string // The main version number that is being run at the moment. -const Version = "0.8.0" +const Version = "0.8.1" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" From a019575026fd8bec6f96c82eb76fb7f698ffcd19 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 23 Jun 2015 14:52:37 -0700 Subject: [PATCH 544/956] helper/communicator: support disabling SSH agent --- communicator/ssh/communicator.go | 8 ++++++++ helper/communicator/config.go | 1 + helper/communicator/step_connect_ssh.go | 7 ++++--- website/source/docs/templates/communicator.html.md | 2 ++ 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 2cc299b30..d6f00351d 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -37,6 +37,9 @@ type Config struct { // Pty, if true, will request a pty from the remote end. Pty bool + + // DisableAgent, if true, will not forward the SSH agent. + DisableAgent bool } // Creates a new packer.Communicator implementation over SSH. This takes @@ -287,6 +290,11 @@ func (c *comm) connectToAgent() { return } + if c.config.DisableAgent { + log.Printf("[INFO] SSH agent forwarding is diabled.") + return + } + // open connection to the local agent socketLocation := os.Getenv("SSH_AUTH_SOCK") if socketLocation == "" { diff --git a/helper/communicator/config.go b/helper/communicator/config.go index e3da09618..0f19c4e68 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -22,6 +22,7 @@ type Config struct { SSHPrivateKey string `mapstructure:"ssh_private_key_file"` SSHPty bool `mapstructure:"ssh_pty"` SSHTimeout time.Duration `mapstructure:"ssh_timeout"` + SSHDisableAgent bool `mapstructure:"ssh_disable_agent"` SSHHandshakeAttempts int `mapstructure:"ssh_handshake_attempts"` SSHBastionHost string `mapstructure:"ssh_bastion_host"` SSHBastionPort int `mapstructure:"ssh_bastion_port"` diff --git a/helper/communicator/step_connect_ssh.go b/helper/communicator/step_connect_ssh.go index fd6b585f8..0d302f779 100644 --- a/helper/communicator/step_connect_ssh.go +++ b/helper/communicator/step_connect_ssh.go @@ -158,9 +158,10 @@ func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, cancel <-chan stru // Then we attempt to connect via SSH config := &ssh.Config{ - Connection: connFunc, - SSHConfig: sshConfig, - Pty: s.Config.SSHPty, + Connection: connFunc, + SSHConfig: sshConfig, + Pty: s.Config.SSHPty, + DisableAgent: s.Config.SSHDisableAgent, } log.Println("[INFO] Attempting SSH connection...") diff --git a/website/source/docs/templates/communicator.html.md b/website/source/docs/templates/communicator.html.md index 438983c98..8a450ac50 100644 --- a/website/source/docs/templates/communicator.html.md +++ b/website/source/docs/templates/communicator.html.md @@ -77,6 +77,8 @@ The SSH communicator has the following options: * `ssh_handshake_attempts` (int) - The number of handshakes to attempt with SSH once it can connect. This defaults to 10. + * `ssh_disable_agent` (bool) - If true, SSH agent forwarding will be disabled. + * `ssh_bastion_host` (string) - A bastion host to use for the actual SSH connection. From 49664ca9c677f15605f49036efc0ddd163e270db Mon Sep 17 00:00:00 2001 From: "John (Jack) Brown" Date: Tue, 23 Jun 2015 22:10:04 -0700 Subject: [PATCH 545/956] Fix DigitalOcean section of getting started docs. This updates the example for adding DigitalOcean as a provider in the beginning tutorial. It looks like it hadn't been updated in some time, and was using old settings. I've updated it and confirmed it works. --- .../parallel-builds.html.markdown | 54 +++++++++++++++---- 1 file changed, 44 insertions(+), 10 deletions(-) diff --git a/website/source/intro/getting-started/parallel-builds.html.markdown b/website/source/intro/getting-started/parallel-builds.html.markdown index 8a125719e..700f03c28 100644 --- a/website/source/intro/getting-started/parallel-builds.html.markdown +++ b/website/source/intro/getting-started/parallel-builds.html.markdown @@ -49,9 +49,9 @@ you're not okay with this, just follow along. !> **Warning!** You _will_ be charged $0.01 by DigitalOcean per image created with Packer because of the time the "droplet" is running. -Once you sign up for an account, grab your client ID and API key from -the [DigitalOcean API access page](https://www.digitalocean.com/api_access). -Save these values somewhere, you'll need them in a second. +Once you sign up for an account, grab your API token from +the [DigitalOcean API access page](https://cloud.digitalocean.com/settings/applications). +Save these values somewhere; you'll need them in a second. ## Modifying the Template @@ -62,8 +62,10 @@ array. ```javascript { "type": "digitalocean", - "api_key": "{{user `do_api_key`}}", - "client_id": "{{user `do_client_id`}}" + "api_token": "{{user `do_api_token`}}", + "image": "ubuntu-14-04-x64", + "region": "nyc3", + "size": "512mb", } ``` @@ -72,13 +74,46 @@ to include the access keys for DigitalOcean. ```javascript "variables": { - "do_api_key": "", - "do_client_id": "" + "do_api_token": "", // ... } ``` -The entire template should now [look like this](https://gist.github.com/pearkes/cc5f8505eee5403a43a6). +The entire template should now look like this: + +```javascript +{ + "variables": { + "aws_access_key": "", + "aws_secret_key": "", + "do_api_token": "" + }, + "builders": [{ + "type": "amazon-ebs", + "access_key": "{{user `aws_access_key`}}", + "secret_key": "{{user `aws_secret_key`}}", + "region": "us-east-1", + "source_ami": "ami-c65be9ae", + "instance_type": "t1.micro", + "ssh_username": "ubuntu", + "ami_name": "packer-example {{timestamp}}" + },{ + "type": "digitalocean", + "api_token": "{{user `do_api_token`}}", + "image": "ubuntu-14-04-x64", + "region": "nyc3", + "size": "512mb" + }], + "provisioners": [{ + "type": "shell", + "inline": [ + "sleep 30", + "sudo apt-get update", + "sudo apt-get install -y redis-server" + ] + }] +} +``` Additional builders are simply added to the `builders` array in the template. This tells Packer to build multiple images. The builder `type` values don't @@ -104,8 +139,7 @@ same. $ packer build \ -var 'aws_access_key=YOUR ACCESS KEY' \ -var 'aws_secret_key=YOUR SECRET KEY' \ - -var 'do_api_key=YOUR API KEY' \ - -var 'do_client_id=YOUR CLIENT ID' \ + -var 'do_api_token=YOUR API TOKEN' \ example.json ==> amazon-ebs: amazon-ebs output will be in this color. ==> digitalocean: digitalocean output will be in this color. From 18438cf291f12bdc1d4341ceb7fcc44395a7835a Mon Sep 17 00:00:00 2001 From: "Schreiter, Wulf-Thilo" Date: Wed, 24 Jun 2015 13:46:59 +0200 Subject: [PATCH 546/956] Add knife config by template Since the chef-client provisioner is cleaning the node and client at the chef-server from the provisioned node it needs to have a flexible configuration This is replacing the used knife flags: -s '' -k '/tmp/packer-chef-client/client.pem' -u '' and puts their values into a generated knife.rb Additionally the knife.rb may include the optional ssl_verify_mode attribute to enable the verify mode verify_none Background: When deleting node and client to a self-hosted chef-server using self signed cerfiticates the usage of knife node delete -y -s '' -k '/tmp/packer-chef-client/client.pem' -u '' will lead into a ssl verification failure. The error output of the knife call is somthing like: 2015/06/24 12:29:17 ui: docker: WARNING: No knife configuration file found docker: WARNING: No knife configuration file found 2015/06/24 12:29:17 ui: docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: ERROR: SSL Validation failure connecting to host: 172.16.117.63 - SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 ui: docker: ERROR: Could not establish a secure connection to the server. docker: ERROR: Could not establish a secure connection to the server. 2015/06/24 12:29:17 ui: docker: Use 'knife ssl check' to troubleshoot your SSL configuration. docker: Use 'knife ssl check' to troubleshoot your SSL configuration. 2015/06/24 12:29:17 ui: docker: If your Chef Server uses a self-signed certificate, you can use docker: If your Chef Server uses a self-signed certificate, you can use 2015/06/24 12:29:17 ui: docker: 'knife ssl fetch' to make knife trust the server's certificates. docker: 'knife ssl fetch' to make knife trust the server's certificates. 2015/06/24 12:29:17 ui: docker: docker: 2015/06/24 12:29:17 ui: docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed docker: Original Exception: OpenSSL::SSL::SSLError: SSL_connect returned=1 errno=0 state=SSLv3 read server certificate B: certificate verify failed 2015/06/24 12:29:17 packer-builder-docker: 2015/06/24 12:29:17 Executed command exit status: 100 --- provisioner/chef-client/provisioner.go | 61 +++++++++++++++++++++----- 1 file changed, 51 insertions(+), 10 deletions(-) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index 4d9430347..498033925 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -187,14 +187,20 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } err = p.executeChef(ui, comm, configPath, jsonPath) + + knifeConfigPath, err2 := p.createKnifeConfig( + ui, comm, nodeName, serverUrl, p.config.ClientKey, p.config.SslVerifyMode) + if err2 != nil { + return fmt.Errorf("Error creating knife config on node: %s", err2) + } if !p.config.SkipCleanNode { - if err2 := p.cleanNode(ui, comm, nodeName); err2 != nil { + if err2 := p.cleanNode(ui, comm, nodeName, knifeConfigPath); err2 != nil { return fmt.Errorf("Error cleaning up chef node: %s", err2) } } if !p.config.SkipCleanClient { - if err2 := p.cleanClient(ui, comm, nodeName); err2 != nil { + if err2 := p.cleanClient(ui, comm, nodeName, knifeConfigPath); err2 != nil { return fmt.Errorf("Error cleaning up chef client: %s", err2) } } @@ -273,6 +279,32 @@ func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeN return remotePath, nil } +func (p *Provisioner) createKnifeConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, clientKey string, sslVerifyMode string) (string, error) { + ui.Message("Creating configuration file 'knife.rb'") + + // Read the template + tpl := DefaultKnifeTemplate + + ctx := p.config.ctx + ctx.Data = &ConfigTemplate{ + NodeName: nodeName, + ServerUrl: serverUrl, + ClientKey: clientKey, + SslVerifyMode: sslVerifyMode, + } + configString, err := interpolate.Render(tpl, &ctx) + if err != nil { + return "", err + } + + remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "knife.rb")) + if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil { + return "", err + } + + return remotePath, nil +} + func (p *Provisioner) createJson(ui packer.Ui, comm packer.Communicator) (string, error) { ui.Message("Creating JSON attribute file") @@ -334,32 +366,30 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri return nil } -func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node string) error { +func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node string, knifeConfigPath string) error { ui.Say("Cleaning up chef node...") args := []string{"node", "delete", node} - if err := p.knifeExec(ui, comm, node, args); err != nil { + if err := p.knifeExec(ui, comm, node, knifeConfigPath, args); err != nil { return fmt.Errorf("Failed to cleanup node: %s", err) } return nil } -func (p *Provisioner) cleanClient(ui packer.Ui, comm packer.Communicator, node string) error { +func (p *Provisioner) cleanClient(ui packer.Ui, comm packer.Communicator, node string, knifeConfigPath string) error { ui.Say("Cleaning up chef client...") args := []string{"client", "delete", node} - if err := p.knifeExec(ui, comm, node, args); err != nil { + if err := p.knifeExec(ui, comm, node, knifeConfigPath, args); err != nil { return fmt.Errorf("Failed to cleanup client: %s", err) } return nil } -func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node string, args []string) error { +func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node string, knifeConfigPath string, args []string) error { flags := []string{ "-y", - "-s", fmt.Sprintf("'%s'", p.config.ServerUrl), - "-k", fmt.Sprintf("'%s'", p.config.ClientKey), - "-u", fmt.Sprintf("'%s'", node), + "-c", knifeConfigPath, } cmdText := fmt.Sprintf( @@ -573,3 +603,14 @@ environment "{{.ChefEnvironment}}" ssl_verify_mode :{{.SslVerifyMode}} {{end}} ` + +var DefaultKnifeTemplate = ` +log_level :info +log_location STDOUT +chef_server_url "{{.ServerUrl}}" +client_key "{{.ClientKey}}" +node_name "{{.NodeName}}" +{{if ne .SslVerifyMode ""}} +ssl_verify_mode :{{.SslVerifyMode}} +{{end}} +` From def6f8aefc2025e05ec822875449af1abcbf3705 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Wed, 24 Jun 2015 10:28:01 -0500 Subject: [PATCH 547/956] clean up character encodings --- .../docs/builders/amazon-chroot.html.markdown | 6 +++--- .../docs/builders/amazon-ebs.html.markdown | 20 +++++++++---------- .../builders/amazon-instance.html.markdown | 20 +++++++++---------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index 453811c8a..d6b61ca8b 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -128,7 +128,7 @@ can be configured for this builder. * `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. -* `force_deregister` (boolean) – Force Packer to first deregister an existing +* `force_deregister` (boolean) - Force Packer to first deregister an existing AMI if one with the same name already exists. Default `false`. * `mount_path` (string) - The path where the volume will be mounted. This is @@ -137,13 +137,13 @@ AMI if one with the same name already exists. Default `false`. template where the `.Device` variable is replaced with the name of the device where the volume is attached. -* `mount_options` (array of strings) – Options to supply the `mount` command +* `mount_options` (array of strings) - Options to supply the `mount` command when mounting devices. Each option will be prefixed with `-o ` and supplied to the `mount` command ran by Packer. Because this command is ran in a shell, user discrestion is advised. See [this manual page for the mount command][1] for valid file system specific options -* `root_volume_size` (integer) – The size of the root volume for the chroot +* `root_volume_size` (integer) - The size of the root volume for the chroot environment, and the resulting AMI * `tags` (object of key/value strings) - Tags applied to the AMI. diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 596f8d230..25a06a957 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -67,22 +67,22 @@ can be configured for this builder. * `ami_block_device_mappings` (array of block device mappings) - Add the block device mappings to the AMI. The block device mappings allow for keys: - - `device_name` (string) – The device name exposed to the instance (for + - `device_name` (string) - The device name exposed to the instance (for example, "/dev/sdh" or "xvdh") - - `virtual_name` (string) – The virtual device name. See the documentation on + - `virtual_name` (string) - The virtual device name. See the documentation on [Block Device Mapping][1] for more information - - `snapshot_id` (string) – The ID of the snapshot - - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) + - `snapshot_id` (string) - The ID of the snapshot + - `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes - - `volume_size` (integer) – The size of the volume, in GiB. Required if not + - `volume_size` (integer) - The size of the volume, in GiB. Required if not specifying a `snapshot_id` - - `delete_on_termination` (boolean) – Indicates whether the EBS volume is + - `delete_on_termination` (boolean) - Indicates whether the EBS volume is deleted on instance termination - - `encrypted` (boolean) – Indicates whether to encrypt the volume or not - - `no_device` (boolean) – Suppresses the specified device included in the + - `encrypted` (boolean) - Indicates whether to encrypt the volume or not + - `no_device` (boolean) - Suppresses the specified device included in the block device mapping of the AMI - - `iops` (integer) – The number of I/O operations per second (IOPS) that the + - `iops` (integer) - The number of I/O operations per second (IOPS) that the volume supports. See the documentation on [IOPs][2] for more information @@ -116,7 +116,7 @@ can be configured for this builder. * `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. -* `force_deregister` (boolean) – Force Packer to first deregister an existing +* `force_deregister` (boolean) - Force Packer to first deregister an existing AMI if one with the same name already exists. Default `false`. * `iam_instance_profile` (string) - The name of an diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 163a93584..f670e4b66 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -87,22 +87,22 @@ can be configured for this builder. * `ami_block_device_mappings` (array of block device mappings) - Add the block device mappings to the AMI. The block device mappings allow for keys: - - `device_name` (string) – The device name exposed to the instance (for + - `device_name` (string) - The device name exposed to the instance (for example, "/dev/sdh" or "xvdh") - - `virtual_name` (string) – The virtual device name. See the documentation on + - `virtual_name` (string) - The virtual device name. See the documentation on [Block Device Mapping][1] for more information - - `snapshot_id` (string) – The ID of the snapshot - - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) + - `snapshot_id` (string) - The ID of the snapshot + - `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes - - `volume_size` (integer) – The size of the volume, in GiB. Required if not + - `volume_size` (integer) - The size of the volume, in GiB. Required if not specifying a `snapshot_id` - - `delete_on_termination` (boolean) – Indicates whether the EBS volume is + - `delete_on_termination` (boolean) - Indicates whether the EBS volume is deleted on instance termination - - `encrypted` (boolean) – Indicates whether to encrypt the volume or not - - `no_device` (boolean) – Suppresses the specified device included in the + - `encrypted` (boolean) - Indicates whether to encrypt the volume or not + - `no_device` (boolean) - Suppresses the specified device included in the block device mapping of the AMI - - `iops` (integer) – The number of I/O operations per second (IOPS) that the + - `iops` (integer) - The number of I/O operations per second (IOPS) that the volume supports. See the documentation on [IOPs][2] for more information * `ami_description` (string) - The description to set for the resulting @@ -155,7 +155,7 @@ can be configured for this builder. * `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. -* `force_deregister` (boolean) – Force Packer to first deregister an existing +* `force_deregister` (boolean) - Force Packer to first deregister an existing AMI if one with the same name already exists. Default `false`. * `iam_instance_profile` (string) - The name of an From 7af5d5419a29a8e150eeb015510a4db98c81c42b Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Wed, 24 Jun 2015 11:41:55 -0500 Subject: [PATCH 548/956] builder/amazon: Fix issue with sharing AMIs with ami_users --- .../common/step_modify_ami_attributes.go | 5 ++ builder/amazon/ebs/builder_acc_test.go | 70 +++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/builder/amazon/common/step_modify_ami_attributes.go b/builder/amazon/common/step_modify_ami_attributes.go index 98bcfaf8c..a0c5dccfb 100644 --- a/builder/amazon/common/step_modify_ami_attributes.go +++ b/builder/amazon/common/step_modify_ami_attributes.go @@ -54,11 +54,16 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc if len(s.Users) > 0 { users := make([]*string, len(s.Users)) + adds := make([]*ec2.LaunchPermission, len(s.Users)) for i, u := range s.Users { users[i] = &u + adds[i] = &ec2.LaunchPermission{UserID: &u} } options["users"] = &ec2.ModifyImageAttributeInput{ UserIDs: users, + LaunchPermission: &ec2.LaunchPermissionModifications{ + Add: adds, + }, } } diff --git a/builder/amazon/ebs/builder_acc_test.go b/builder/amazon/ebs/builder_acc_test.go index 1b4de70ce..844b70ccc 100644 --- a/builder/amazon/ebs/builder_acc_test.go +++ b/builder/amazon/ebs/builder_acc_test.go @@ -5,6 +5,7 @@ import ( "os" "testing" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/packer/builder/amazon/common" builderT "github.com/mitchellh/packer/helper/builder/testing" @@ -44,6 +45,60 @@ func TestBuilderAcc_forceDeregister(t *testing.T) { }) } +func TestBuilderAcc_amiSharing(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Builder: &Builder{}, + Template: testBuilderAccSharing, + Check: checkAMISharing(1, "932021504756"), + }) +} + +func checkAMISharing(count int, uid string) builderT.TestCheckFunc { + return func(artifacts []packer.Artifact) error { + if len(artifacts) > 1 { + return fmt.Errorf("more than 1 artifact") + } + + // Get the actual *Artifact pointer so we can access the AMIs directly + artifactRaw := artifacts[0] + artifact, ok := artifactRaw.(*common.Artifact) + if !ok { + return fmt.Errorf("unknown artifact: %#v", artifactRaw) + } + + // describe the image, get block devices with a snapshot + ec2conn, _ := testEC2Conn() + imageResp, err := ec2conn.DescribeImageAttribute(&ec2.DescribeImageAttributeInput{ + Attribute: aws.String("launchPermission"), + ImageID: aws.String(artifact.Amis["us-east-1"]), + }) + + if err != nil { + return fmt.Errorf("Error retrieving Image Attributes for AMI Artifact (%#v) in AMI Sharing Test: %s", artifact, err) + } + + // Launch Permissions are in addition to the userid that created it, so if + // you add 3 additional ami_users, you expect 2 Launch Permissions here + if len(imageResp.LaunchPermissions) != count { + return fmt.Errorf("Error in Image Attributes, expected (%d) Launch Permissions, got (%d)", count, len(imageResp.LaunchPermissions)) + } + + found := false + for _, lp := range imageResp.LaunchPermissions { + if uid == *lp.UserID { + found = true + } + } + + if !found { + return fmt.Errorf("Error in Image Attributes, expected User ID (%s) to have Launch Permissions, but was not found", uid) + } + + return nil + } +} + func checkRegionCopy(regions []string) builderT.TestCheckFunc { return func(artifacts []packer.Artifact) error { if len(artifacts) > 1 { @@ -138,6 +193,21 @@ const testBuilderAccForceDeregister = ` } ` +// share with catsby +const testBuilderAccSharing = ` +{ + "builders": [{ + "type": "test", + "region": "us-east-1", + "instance_type": "m3.medium", + "source_ami": "ami-76b2a71e", + "ssh_username": "ubuntu", + "ami_name": "packer-test {{timestamp}}", + "ami_users":["932021504756"] + }] +} +` + func buildForceDeregisterConfig(name, flag string) string { return fmt.Sprintf(testBuilderAccForceDeregister, name, flag) } From f765d570096a10e01fc3733e8bb30ed031186344 Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 24 Jun 2015 13:25:08 -0500 Subject: [PATCH 549/956] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cb82d81a1..6cb14f96d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,8 @@ ## 0.8.1 (unreleased) +BUG FIXES: + * builder/amazon: Fix issue with sharing AMIs when using `ami_users` [GH-2308] ## 0.8.0 (June 23, 2015) From aa954c9406f9440d5654ecc1443ac337ef7ae3d3 Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Wed, 24 Jun 2015 11:58:00 -0700 Subject: [PATCH 550/956] updated how vmx entries are handled --- builder/vmware/common/step_configure_vmx.go | 2 -- builder/vmware/common/vmx.go | 20 ++++++++++++++++---- 2 files changed, 16 insertions(+), 6 deletions(-) mode change 100644 => 100755 builder/vmware/common/step_configure_vmx.go mode change 100644 => 100755 builder/vmware/common/vmx.go diff --git a/builder/vmware/common/step_configure_vmx.go b/builder/vmware/common/step_configure_vmx.go old mode 100644 new mode 100755 index 401d53055..14c68e76a --- a/builder/vmware/common/step_configure_vmx.go +++ b/builder/vmware/common/step_configure_vmx.go @@ -5,7 +5,6 @@ import ( "io/ioutil" "log" "regexp" - "strings" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" @@ -53,7 +52,6 @@ func (s *StepConfigureVMX) Run(state multistep.StateBag) multistep.StepAction { // Set custom data for k, v := range s.CustomData { log.Printf("Setting VMX: '%s' = '%s'", k, v) - k = strings.ToLower(k) vmxData[k] = v } diff --git a/builder/vmware/common/vmx.go b/builder/vmware/common/vmx.go old mode 100644 new mode 100755 index e7cdb662f..ab0291807 --- a/builder/vmware/common/vmx.go +++ b/builder/vmware/common/vmx.go @@ -17,7 +17,7 @@ import ( func ParseVMX(contents string) map[string]string { results := make(map[string]string) - lineRe := regexp.MustCompile(`^(.+?)\s*=\s*"(.*?)"\s*$`) + lineRe := regexp.MustCompile(`^(.+?)\s*=\s*"?(.*?)"?\s*$`) for _, line := range strings.Split(contents, "\n") { matches := lineRe.FindStringSubmatch(line) @@ -25,8 +25,7 @@ func ParseVMX(contents string) map[string]string { continue } - key := strings.ToLower(matches[1]) - results[key] = matches[2] + results[matches[1]] = matches[2] } return results @@ -43,9 +42,22 @@ func EncodeVMX(contents map[string]string) string { i++ } + // a list of VMX key fragments that should not be wrapped in quotes, + // fragments because multiple disks can use the virtualSSD suffix + noQuotes := []string { + "virtualSSD", + } + sort.Strings(keys) for _, k := range keys { - buf.WriteString(fmt.Sprintf("%s = \"%s\"\n", k, contents[k])) + pat := "%s = \"%s\"\n" + for _, q := range noQuotes { + if strings.Contains(k, q) { + pat = "%s = %s\n" + break; + } + } + buf.WriteString(fmt.Sprintf(pat, k, contents[k])) } return buf.String() From 1831a0905525ce9ff148a106ca9eb1d7b81501e4 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 24 Jun 2015 17:47:00 -0700 Subject: [PATCH 551/956] Fix compress crash - Changed config from pointer to value to fix crash - Removed acceptance flag from compress tests since they would have caught this --- post-processor/compress/post-processor.go | 2 +- post-processor/compress/post-processor_test.go | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 8b70bc456..72b85090b 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -35,7 +35,7 @@ type Config struct { } type PostProcessor struct { - config *Config + config Config } var ( diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index d7bca6c7a..db23cf3b1 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -9,7 +9,6 @@ import ( "testing" "github.com/mitchellh/packer/builder/file" - env "github.com/mitchellh/packer/helper/builder/testing" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template" ) @@ -187,11 +186,6 @@ func setup(t *testing.T) (packer.Ui, packer.Artifact, error) { } func testArchive(t *testing.T, config string) packer.Artifact { - if os.Getenv(env.TestEnvVar) == "" { - t.Skip(fmt.Sprintf( - "Acceptance tests skipped unless env '%s' set", env.TestEnvVar)) - } - ui, artifact, err := setup(t) if err != nil { t.Fatalf("Error bootstrapping test: %s", err) From eba28519db59f9a5253acd3c0aa17f109d3ea41a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 24 Jun 2015 18:29:32 -0700 Subject: [PATCH 552/956] Move vars to the top and cleanup extra whitespace --- post-processor/compress/post-processor.go | 25 +++++++++++------------ 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 72b85090b..bb6ce27bf 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -19,6 +19,18 @@ import ( "github.com/pierrec/lz4" ) +var ( + // ErrInvalidCompressionLevel is returned when the compression level passed + // to gzip is not in the expected range. See compress/flate for details. + ErrInvalidCompressionLevel = fmt.Errorf( + "Invalid compression level. Expected an integer from -1 to 9.") + + ErrWrongInputCount = fmt.Errorf( + "Can only have 1 input file when not using tar/zip") + + filenamePattern = regexp.MustCompile(`(?:\.([a-z0-9]+))`) +) + type Config struct { common.PackerConfig `mapstructure:",squash"` @@ -38,18 +50,6 @@ type PostProcessor struct { config Config } -var ( - // ErrInvalidCompressionLevel is returned when the compression level passed - // to gzip is not in the expected range. See compress/flate for details. - ErrInvalidCompressionLevel = fmt.Errorf( - "Invalid compression level. Expected an integer from -1 to 9.") - - ErrWrongInputCount = fmt.Errorf( - "Can only have 1 input file when not using tar/zip") - - filenamePattern = regexp.MustCompile(`(?:\.([a-z0-9]+))`) -) - func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ Interpolate: true, @@ -109,7 +109,6 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } return nil - } func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { From c537623bc60c2526d01888b0b684add62bbe4041 Mon Sep 17 00:00:00 2001 From: Jani Jappinen Date: Thu, 25 Jun 2015 17:25:28 +0300 Subject: [PATCH 553/956] Fix failing AMI snapshot tagging when copying to other regions. --- builder/amazon/common/step_create_tags.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/builder/amazon/common/step_create_tags.go b/builder/amazon/common/step_create_tags.go index 7c89e5a59..4750d7a08 100644 --- a/builder/amazon/common/step_create_tags.go +++ b/builder/amazon/common/step_create_tags.go @@ -34,8 +34,13 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction { // Declare list of resources to tag resourceIds := []*string{&ami} + regionconn := ec2.New(&aws.Config{ + Credentials: ec2conn.Config.Credentials, + Region: region, + }) + // Retrieve image list for given AMI - imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ + imageResp, err := regionconn.DescribeImages(&ec2.DescribeImagesInput{ ImageIDs: resourceIds, }) @@ -63,11 +68,6 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction { } } - regionconn := ec2.New(&aws.Config{ - Credentials: ec2conn.Config.Credentials, - Region: region, - }) - _, err = regionconn.CreateTags(&ec2.CreateTagsInput{ Resources: resourceIds, Tags: ec2Tags, From dd97a60c9b8d5e908a3e6bf2f0411bac5757b6cf Mon Sep 17 00:00:00 2001 From: Marcin Matlaszek Date: Thu, 25 Jun 2015 17:02:38 +0200 Subject: [PATCH 554/956] Fix for tag creation when creating new ec2 instance. --- builder/amazon/common/step_run_source_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index f88db5efc..2150616c2 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -256,7 +256,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi ec2Tags := make([]*ec2.Tag, 1, len(s.Tags)+1) ec2Tags[0] = &ec2.Tag{Key: aws.String("Name"), Value: aws.String("Packer Builder")} for k, v := range s.Tags { - ec2Tags = append(ec2Tags, &ec2.Tag{Key: &k, Value: &v}) + ec2Tags = append(ec2Tags, &ec2.Tag{Key: aws.String(k), Value: aws.String(v)}) } _, err = ec2conn.CreateTags(&ec2.CreateTagsInput{ From dd1fdc722a8239b3ab6da907219aaa2bb76d0c57 Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 25 Jun 2015 10:18:56 -0500 Subject: [PATCH 555/956] Update CHANGELOG.md --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6cb14f96d..aba79413d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,9 @@ BUG FIXES: * builder/amazon: Fix issue with sharing AMIs when using `ami_users` [GH-2308] + * builder/amazon: Fix for tag creation when creating new ec2 instance [GH-2317] + * builder/amazon: Fix failing AMI snapshot tagging when copying to other + regions [GH-2316] ## 0.8.0 (June 23, 2015) From 8ae0cfc75932bc8dc3e410fcc04de9742d29884c Mon Sep 17 00:00:00 2001 From: Tommy Ulfsparre Date: Thu, 25 Jun 2015 20:07:45 +0200 Subject: [PATCH 556/956] copy and convert to pointer --- builder/amazon/common/block_device.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index a01dfc83d..482b876f6 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -29,27 +29,27 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { for _, blockDevice := range b { ebsBlockDevice := &ec2.EBSBlockDevice{ - VolumeType: &blockDevice.VolumeType, - VolumeSize: &blockDevice.VolumeSize, - DeleteOnTermination: &blockDevice.DeleteOnTermination, + VolumeType: aws.String(blockDevice.VolumeType), + VolumeSize: aws.Long(blockDevice.VolumeSize), + DeleteOnTermination: aws.Boolean(blockDevice.DeleteOnTermination), } // IOPS is only valid for SSD Volumes if blockDevice.VolumeType != "" && blockDevice.VolumeType != "standard" && blockDevice.VolumeType != "gp2" { - ebsBlockDevice.IOPS = &blockDevice.IOPS + ebsBlockDevice.IOPS = aws.Long(blockDevice.IOPS) } // You cannot specify Encrypted if you specify a Snapshot ID if blockDevice.SnapshotId != "" { - ebsBlockDevice.SnapshotID = &blockDevice.SnapshotId + ebsBlockDevice.SnapshotID = aws.String(blockDevice.SnapshotId) } else if blockDevice.Encrypted { - ebsBlockDevice.Encrypted = &blockDevice.Encrypted + ebsBlockDevice.Encrypted = aws.Boolean(blockDevice.Encrypted) } mapping := &ec2.BlockDeviceMapping{ EBS: ebsBlockDevice, - DeviceName: &blockDevice.DeviceName, - VirtualName: &blockDevice.VirtualName, + DeviceName: aws.String(blockDevice.DeviceName), + VirtualName: aws.String(blockDevice.VirtualName), } if blockDevice.NoDevice { From fe0fde195dc6a862e2d8ed95b0e9d96440d82838 Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 25 Jun 2015 14:51:01 -0500 Subject: [PATCH 557/956] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index aba79413d..0084705b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ BUG FIXES: * builder/amazon: Fix issue with sharing AMIs when using `ami_users` [GH-2308] * builder/amazon: Fix for tag creation when creating new ec2 instance [GH-2317] + * builder/amazon: Fix issue with creating AMIs with multiple device mappings [GH-2320] * builder/amazon: Fix failing AMI snapshot tagging when copying to other regions [GH-2316] From 5896d96bdb2e590a5a6f6bca651f56f8a433df32 Mon Sep 17 00:00:00 2001 From: Peter Doherty Date: Thu, 25 Jun 2015 16:03:24 -0400 Subject: [PATCH 558/956] Update docs for default configuration template Update docs for default configuration template for the chef-client provisioner, to reflect the ability to set a custom validation_client_name --- website/source/docs/provisioners/chef-client.html.markdown | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/website/source/docs/provisioners/chef-client.html.markdown b/website/source/docs/provisioners/chef-client.html.markdown index 9a2a11379..3e56eecb2 100644 --- a/website/source/docs/provisioners/chef-client.html.markdown +++ b/website/source/docs/provisioners/chef-client.html.markdown @@ -113,11 +113,17 @@ The default value for the configuration template is: log_level :info log_location STDOUT chef_server_url "{{.ServerUrl}}" +{{if ne .ValidationClientName ""}} +validation_client_name "{{.ValidationClientName}}" +{{else}} validation_client_name "chef-validator" +{{end}} {{if ne .ValidationKeyPath ""}} validation_key "{{.ValidationKeyPath}}" {{end}} +{{if ne .NodeName ""}} node_name "{{.NodeName}}" +{{end}} ``` This template is a [configuration template](/docs/templates/configuration-templates.html) From 88fac0b49c06dcac0ecaa00ecbd6109f2554a42f Mon Sep 17 00:00:00 2001 From: Israel Shirk Date: Thu, 25 Jun 2015 19:03:00 -0600 Subject: [PATCH 559/956] Have nonesum run stat rather than nonesum because nonesum sums none. k. --- builder/vmware/iso/driver_esx5.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index 8162db468..e8534ceeb 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -396,10 +396,17 @@ func (d *ESX5Driver) upload(dst, src string) error { } func (d *ESX5Driver) verifyChecksum(ctype string, hash string, file string) bool { - stdin := bytes.NewBufferString(fmt.Sprintf("%s %s", hash, file)) - _, err := d.run(stdin, fmt.Sprintf("%ssum", ctype), "-c") - if err != nil { - return false + if (ctype == "none") { + err := d.sh("stat", file) + if err != nil { + return false + } + } else { + stdin := bytes.NewBufferString(fmt.Sprintf("%s %s", hash, file)) + _, err := d.run(stdin, fmt.Sprintf("%ssum", ctype), "-c") + if err != nil { + return false + } } return true } From 586cab11ee1440e85bc09cef381caafe241a94d4 Mon Sep 17 00:00:00 2001 From: Marcin Matlaszek Date: Fri, 26 Jun 2015 15:29:46 +0200 Subject: [PATCH 560/956] Fixes spot instance cleanup. --- builder/amazon/common/step_run_source_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index 2150616c2..09b053901 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -296,7 +296,7 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) { if s.spotRequest != nil { ui.Say("Cancelling the spot request...") input := &ec2.CancelSpotInstanceRequestsInput{ - SpotInstanceRequestIDs: []*string{s.spotRequest.InstanceID}, + SpotInstanceRequestIDs: []*string{s.spotRequest.SpotInstanceRequestID}, } if _, err := ec2conn.CancelSpotInstanceRequests(input); err != nil { ui.Error(fmt.Sprintf("Error cancelling the spot request, may still be around: %s", err)) From 4d003aa5a30b15068440c766c5d6daff3eeae78d Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Fri, 26 Jun 2015 10:43:13 -0500 Subject: [PATCH 561/956] builder/amazon-instance: Don't specify empty Virtualization Type --- builder/amazon/instance/step_register_ami.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/builder/amazon/instance/step_register_ami.go b/builder/amazon/instance/step_register_ami.go index c3150449d..f97c5df0e 100644 --- a/builder/amazon/instance/step_register_ami.go +++ b/builder/amazon/instance/step_register_ami.go @@ -3,6 +3,7 @@ package instance import ( "fmt" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" @@ -20,15 +21,17 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { ui.Say("Registering the AMI...") registerOpts := &ec2.RegisterImageInput{ ImageLocation: &manifestPath, - Name: &config.AMIName, + Name: aws.String(config.AMIName), BlockDeviceMappings: config.BlockDevices.BuildAMIDevices(), - VirtualizationType: &config.AMIVirtType, + } + + if config.AMIVirtType != "" { + registerOpts.VirtualizationType = aws.String(config.AMIVirtType) } // Set SriovNetSupport to "simple". See http://goo.gl/icuXh5 if config.AMIEnhancedNetworking { - simple := "simple" - registerOpts.SRIOVNetSupport = &simple + registerOpts.SRIOVNetSupport = aws.String("simple") } registerResp, err := ec2conn.RegisterImage(registerOpts) From e112d9b2880085e4b66aec8ce7cf2fc581927336 Mon Sep 17 00:00:00 2001 From: Clint Date: Fri, 26 Jun 2015 11:11:46 -0500 Subject: [PATCH 562/956] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0084705b4..d71df0047 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ BUG FIXES: * builder/amazon: Fix issue with creating AMIs with multiple device mappings [GH-2320] * builder/amazon: Fix failing AMI snapshot tagging when copying to other regions [GH-2316] + * builder/amazon-instance: Fix issue with creating AMIs without specifying a + virtualization type [GH-2330] ## 0.8.0 (June 23, 2015) From daf563e210ef7d0cfe0dcefa1cf13b6baab0de98 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 26 Jun 2015 10:22:15 -0700 Subject: [PATCH 563/956] Change aws.ami to amazon.ami to be consistent with usage in terraform --- website/source/docs/post-processors/atlas.html.markdown | 4 ++-- .../source/intro/getting-started/remote-builds.html.markdown | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index dc8f7d042..4e0da3846 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -37,7 +37,7 @@ The configuration allows you to specify and access the artifact in Atlas. have access to the organization, hashicorp in this example, in order to add an artifact to the organization in Atlas. -* `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will always be `aws.ami`. +* `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will always be `amazon.ami`. This field must be defined because Atlas can host other artifact types, such as Vagrant boxes. -> **Note:** If you want to upload Vagrant boxes to Atlas, for now use the [Vagrant Cloud post-processor](/docs/post-processors/vagrant-cloud.html). @@ -85,7 +85,7 @@ to `https://atlas.hashicorp.com/api/v1`. "type": "atlas", "token": "{{user `atlas_token`}}", "artifact": "hashicorp/foobar", - "artifact_type": "aws.ami", + "artifact_type": "amazon.ami", "metadata": { "created_at": "{{timestamp}}" } diff --git a/website/source/intro/getting-started/remote-builds.html.markdown b/website/source/intro/getting-started/remote-builds.html.markdown index d63bf560b..e5d1b48ff 100644 --- a/website/source/intro/getting-started/remote-builds.html.markdown +++ b/website/source/intro/getting-started/remote-builds.html.markdown @@ -66,7 +66,7 @@ Now we have Atlas building an AMI with Redis pre-configured. This is great, but { "type": "atlas", "artifact": "ATLAS_USERNAME/packer-tutorial", - "artifact_type": "aws.ami" + "artifact_type": "amazon.ami" } ] } From d8b6c94012460437f70230ed9c70cbe2685189fe Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Fri, 26 Jun 2015 10:52:21 -0700 Subject: [PATCH 564/956] Fix typo: diabled -> disabled --- communicator/ssh/communicator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index d6f00351d..ffcdac749 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -291,7 +291,7 @@ func (c *comm) connectToAgent() { } if c.config.DisableAgent { - log.Printf("[INFO] SSH agent forwarding is diabled.") + log.Printf("[INFO] SSH agent forwarding is disabled.") return } From 016d916d83df0e2f92f91197c951e0b7961b5e43 Mon Sep 17 00:00:00 2001 From: Tom Cahill Date: Thu, 25 Jun 2015 17:48:38 -0700 Subject: [PATCH 565/956] Fix adding groups to AMI launch permissions. Previously, groups were granted launch permissions by submitting a ModifyImageAttribute request with the UserGroups parameter set appropriately. This is no longer valid, as the LaunchPermission parameter must be set. --- .../common/step_modify_ami_attributes.go | 15 ++++++++--- builder/amazon/ebs/builder_acc_test.go | 26 ++++++++++++++----- 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/builder/amazon/common/step_modify_ami_attributes.go b/builder/amazon/common/step_modify_ami_attributes.go index a0c5dccfb..33bf6bf3a 100644 --- a/builder/amazon/common/step_modify_ami_attributes.go +++ b/builder/amazon/common/step_modify_ami_attributes.go @@ -44,12 +44,21 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc if len(s.Groups) > 0 { groups := make([]*string, len(s.Groups)) + adds := make([]*ec2.LaunchPermission, len(s.Groups)) + addGroups := &ec2.ModifyImageAttributeInput{ + LaunchPermission: &ec2.LaunchPermissionModifications{}, + } + for i, g := range s.Groups { groups[i] = &g + adds[i] = &ec2.LaunchPermission{ + Group: &g, + } } - options["groups"] = &ec2.ModifyImageAttributeInput{ - UserGroups: groups, - } + addGroups.UserGroups = groups + addGroups.LaunchPermission.Add = adds + + options["groups"] = addGroups } if len(s.Users) > 0 { diff --git a/builder/amazon/ebs/builder_acc_test.go b/builder/amazon/ebs/builder_acc_test.go index 844b70ccc..879f7a732 100644 --- a/builder/amazon/ebs/builder_acc_test.go +++ b/builder/amazon/ebs/builder_acc_test.go @@ -50,11 +50,11 @@ func TestBuilderAcc_amiSharing(t *testing.T) { PreCheck: func() { testAccPreCheck(t) }, Builder: &Builder{}, Template: testBuilderAccSharing, - Check: checkAMISharing(1, "932021504756"), + Check: checkAMISharing(2, "932021504756", "all"), }) } -func checkAMISharing(count int, uid string) builderT.TestCheckFunc { +func checkAMISharing(count int, uid, group string) builderT.TestCheckFunc { return func(artifacts []packer.Artifact) error { if len(artifacts) > 1 { return fmt.Errorf("more than 1 artifact") @@ -84,17 +84,28 @@ func checkAMISharing(count int, uid string) builderT.TestCheckFunc { return fmt.Errorf("Error in Image Attributes, expected (%d) Launch Permissions, got (%d)", count, len(imageResp.LaunchPermissions)) } - found := false + userFound := false for _, lp := range imageResp.LaunchPermissions { - if uid == *lp.UserID { - found = true + if lp.UserID != nil && uid == *lp.UserID { + userFound = true } } - if !found { + if !userFound { return fmt.Errorf("Error in Image Attributes, expected User ID (%s) to have Launch Permissions, but was not found", uid) } + groupFound := false + for _, lp := range imageResp.LaunchPermissions { + if lp.Group != nil && group == *lp.Group { + groupFound = true + } + } + + if !groupFound { + return fmt.Errorf("Error in Image Attributes, expected Group ID (%s) to have Launch Permissions, but was not found", group) + } + return nil } } @@ -203,7 +214,8 @@ const testBuilderAccSharing = ` "source_ami": "ami-76b2a71e", "ssh_username": "ubuntu", "ami_name": "packer-test {{timestamp}}", - "ami_users":["932021504756"] + "ami_users":["932021504756"], + "ami_groups":["all"] }] } ` From 6644d769824bdd2ce338760ff5e84dbde7821967 Mon Sep 17 00:00:00 2001 From: Tom Cahill Date: Fri, 26 Jun 2015 12:08:44 -0700 Subject: [PATCH 566/956] Use aws.String to obtain string pointers --- builder/amazon/common/step_modify_ami_attributes.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/amazon/common/step_modify_ami_attributes.go b/builder/amazon/common/step_modify_ami_attributes.go index 33bf6bf3a..712eea615 100644 --- a/builder/amazon/common/step_modify_ami_attributes.go +++ b/builder/amazon/common/step_modify_ami_attributes.go @@ -50,9 +50,9 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc } for i, g := range s.Groups { - groups[i] = &g + groups[i] = aws.String(g) adds[i] = &ec2.LaunchPermission{ - Group: &g, + Group: aws.String(g), } } addGroups.UserGroups = groups From 355364ce4ce560648350db970ab3382feaab2a6c Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 26 Jun 2015 14:59:01 -0700 Subject: [PATCH 567/956] Change some outdate references to Vagrant Cloud to point to Atlas instead --- website/source/docs/post-processors/atlas.html.markdown | 2 +- website/source/docs/post-processors/vagrant-cloud.html.markdown | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 4e0da3846..91b78e766 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -40,7 +40,7 @@ The configuration allows you to specify and access the artifact in Atlas. * `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will always be `amazon.ami`. This field must be defined because Atlas can host other artifact types, such as Vagrant boxes. --> **Note:** If you want to upload Vagrant boxes to Atlas, for now use the [Vagrant Cloud post-processor](/docs/post-processors/vagrant-cloud.html). +-> **Note:** If you want to upload Vagrant boxes to Atlas, use the [Atlas post-processor](/docs/post-processors/atlas.html). ### Optional: diff --git a/website/source/docs/post-processors/vagrant-cloud.html.markdown b/website/source/docs/post-processors/vagrant-cloud.html.markdown index 374e8c73a..451ed087b 100644 --- a/website/source/docs/post-processors/vagrant-cloud.html.markdown +++ b/website/source/docs/post-processors/vagrant-cloud.html.markdown @@ -7,6 +7,8 @@ description: |- # Vagrant Cloud Post-Processor +~> Vagrant Cloud has been superseded by Atlas. Please use the [Atlas post-processor](/docs/post-processors/atlas.html) instead. Learn more about [Atlas](https://atlas.hashicorp.com/). + Type: `vagrant-cloud` The Packer Vagrant Cloud post-processor receives a Vagrant box from the `vagrant` From aa84f9eeb55d65fbe5229562f8b56efe17c24d90 Mon Sep 17 00:00:00 2001 From: Ben Wen Date: Fri, 26 Jun 2015 15:18:45 -0700 Subject: [PATCH 568/956] Tips for debugging a build. --- .../source/docs/other/debugging.html.markdown | 41 +++++++++++++++---- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/website/source/docs/other/debugging.html.markdown b/website/source/docs/other/debugging.html.markdown index 714a87264..eabf56533 100644 --- a/website/source/docs/other/debugging.html.markdown +++ b/website/source/docs/other/debugging.html.markdown @@ -5,18 +5,41 @@ description: |- Packer strives to be stable and bug-free, but issues inevitably arise where certain things may not work entirely correctly, or may not appear to work correctly. In these cases, it is sometimes helpful to see more details about what Packer is actually doing. --- -# Debugging Packer +# Debugging Packer Builds -Packer strives to be stable and bug-free, but issues inevitably arise where -certain things may not work entirely correctly, or may not appear to work -correctly. In these cases, it is sometimes helpful to see more details about -what Packer is actually doing. +For remote builds with cloud providers like Amazon Web Services AMIs, debugging +a Packer build can be eased greatly with `packer build -debug`. This disables +parallelization and enables debug mode. + +Debug mode informs the builders that they should output debugging information. +The exact behavior of debug mode is left to the builder. In general, builders +usually will stop between each step, waiting for keyboard input before +continuing. This will allow you to inspect state and so on. + +In debug mode once the remote instance is instantiated, Packer will emit to the +current directory an emphemeral private ssh key as a .pem file. Using that you +can `ssh -i ` into the remote build instance and see what is going on +for debugging. The emphemeral key will be deleted at the end of the packer run +during cleanup. + +### Windows +As of Packer 0.8.1 the default WinRM communicator will emit the password for a +Remote Desktop Connection into your instance. This happens following the several +minute pause as the instance is booted. Note a .pem key is still created for +securely transmitting the password. Packer automatically decrypts the password +for you in debug mode. + +## Debugging Packer + +Issues occasionally arise where certain things may not work entirely correctly, +or may not appear to work correctly. In these cases, it is sometimes helpful to +see more details about what Packer is actually doing. Packer has detailed logs which can be enabled by setting the `PACKER_LOG` -environmental variable to any value. This will cause detailed logs to appear -on stderr. The logs contain log messages from Packer as well as any plugins -that are being used. Log messages from plugins are prefixed by their application -name. +environmental variable to any value like this `PACKER_LOG=1 packer build +`. This will cause detailed logs to appear on stderr. The logs +contain log messages from Packer as well as any plugins that are being used. Log +messages from plugins are prefixed by their application name. Note that because Packer is highly parallelized, log messages sometimes appear out of order, especially with respect to plugins. In this case, From eee066371a5bd8a4b281a264bbf8ad093d101d6c Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 26 Jun 2015 17:54:59 -0700 Subject: [PATCH 569/956] Support -flag=var1,var2,var3 to fix #2332 --- helper/flag-slice/flag.go | 2 +- helper/flag-slice/flag_test.go | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/helper/flag-slice/flag.go b/helper/flag-slice/flag.go index da75149dc..587b674fa 100644 --- a/helper/flag-slice/flag.go +++ b/helper/flag-slice/flag.go @@ -11,6 +11,6 @@ func (s *StringFlag) String() string { } func (s *StringFlag) Set(value string) error { - *s = append(*s, value) + *s = append(*s, strings.Split(value, ",")...) return nil } diff --git a/helper/flag-slice/flag_test.go b/helper/flag-slice/flag_test.go index f72e1d960..61d8682b2 100644 --- a/helper/flag-slice/flag_test.go +++ b/helper/flag-slice/flag_test.go @@ -14,6 +14,8 @@ func TestStringFlag_implements(t *testing.T) { } } +// TestStringFlagSet tests for setting the same flag more than once on the CLI +// like: blah -flag foo -flag bar func TestStringFlagSet(t *testing.T) { sv := new(StringFlag) err := sv.Set("foo") @@ -31,3 +33,18 @@ func TestStringFlagSet(t *testing.T) { t.Fatalf("Bad: %#v", sv) } } + +// TestMultiStringFlag tests for setting the same flag using a comma-separated +// list of items like: blah -flag=foo,bar +func TestMultiStringFlag(t *testing.T) { + sv := new(StringFlag) + err := sv.Set("chocolate,vanilla") + if err != nil { + t.Fatalf("err :%s", err) + } + + expected := []string{"chocolate", "vanilla"} + if !reflect.DeepEqual([]string(*sv), expected) { + t.Fatalf("Expected: %#v, found: %#v", expected, sv) + } +} From 94e1f830701907a32fc115a635643ec17e8a33a2 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Sat, 27 Jun 2015 00:47:50 -0700 Subject: [PATCH 570/956] Added a black-box acceptance test for -only and -except build flags --- command/build_test.go | 132 ++++++++++++++++++ .../test-fixtures/build-only/template.json | 22 +++ 2 files changed, 154 insertions(+) create mode 100644 command/build_test.go create mode 100644 command/test-fixtures/build-only/template.json diff --git a/command/build_test.go b/command/build_test.go new file mode 100644 index 000000000..73837e1a2 --- /dev/null +++ b/command/build_test.go @@ -0,0 +1,132 @@ +package command + +import ( + "bytes" + "os" + "path/filepath" + "testing" + + "github.com/mitchellh/packer/builder/file" + "github.com/mitchellh/packer/packer" +) + +func TestBuildOnlyFileCommaFlags(t *testing.T) { + c := &BuildCommand{ + Meta: testMetaFile(t), + } + + args := []string{ + "-only=chocolate,vanilla", + filepath.Join(testFixture("build-only"), "template.json"), + } + + defer cleanup() + + if code := c.Run(args); code != 0 { + fatalCommand(t, c.Meta) + } + + if !fileExists("chocolate.txt") { + t.Error("Expected to find chocolate.txt") + } + if !fileExists("vanilla.txt") { + t.Error("Expected to find vanilla.txt") + } + if fileExists("cherry.txt") { + t.Error("Expected NOT to find cherry.txt") + } +} + +func TestBuildOnlyFileMultipleFlags(t *testing.T) { + c := &BuildCommand{ + Meta: testMetaFile(t), + } + + args := []string{ + "-only=chocolate", + "-only=cherry", + filepath.Join(testFixture("build-only"), "template.json"), + } + + defer cleanup() + + if code := c.Run(args); code != 0 { + fatalCommand(t, c.Meta) + } + + if !fileExists("chocolate.txt") { + t.Error("Expected to find chocolate.txt") + } + if fileExists("vanilla.txt") { + t.Error("Expected NOT to find vanilla.txt") + } + if !fileExists("cherry.txt") { + t.Error("Expected to find cherry.txt") + } +} + +func TestBuildExceptFileCommaFlags(t *testing.T) { + c := &BuildCommand{ + Meta: testMetaFile(t), + } + + args := []string{ + "-except=chocolate", + filepath.Join(testFixture("build-only"), "template.json"), + } + + defer cleanup() + + if code := c.Run(args); code != 0 { + fatalCommand(t, c.Meta) + } + + if fileExists("chocolate.txt") { + t.Error("Expected NOT to find chocolate.txt") + } + if !fileExists("vanilla.txt") { + t.Error("Expected to find vanilla.txt") + } + if !fileExists("cherry.txt") { + t.Error("Expected to find cherry.txt") + } +} + +// fileExists returns true if the filename is found +func fileExists(filename string) bool { + if _, err := os.Stat(filename); err == nil { + return true + } + return false +} + +// testCoreConfigBuilder creates a packer CoreConfig that has a file builder +// available. This allows us to test a builder that writes files to disk. +func testCoreConfigBuilder(t *testing.T) *packer.CoreConfig { + components := packer.ComponentFinder{ + Builder: func(n string) (packer.Builder, error) { + return &file.Builder{}, nil + }, + } + return &packer.CoreConfig{ + Components: components, + } +} + +// testMetaFile creates a Meta object that includes a file builder +func testMetaFile(t *testing.T) Meta { + var out, err bytes.Buffer + return Meta{ + CoreConfig: testCoreConfigBuilder(t), + Ui: &packer.BasicUi{ + Writer: &out, + ErrorWriter: &err, + }, + } +} + +func cleanup() { + os.RemoveAll("chocolate.txt") + os.RemoveAll("vanilla.txt") + os.RemoveAll("cherry.txt") +} diff --git a/command/test-fixtures/build-only/template.json b/command/test-fixtures/build-only/template.json new file mode 100644 index 000000000..ee89d635e --- /dev/null +++ b/command/test-fixtures/build-only/template.json @@ -0,0 +1,22 @@ +{ + "builders": [ + { + "name":"chocolate", + "type":"file", + "content":"chocolate", + "target":"chocolate.txt" + }, + { + "name":"vanilla", + "type":"file", + "content":"vanilla", + "target":"vanilla.txt" + }, + { + "name":"cherry", + "type":"file", + "content":"cherry", + "target":"cherry.txt" + } + ] +} From 5c64f7f7cf9d039775cff4300578af4420583eda Mon Sep 17 00:00:00 2001 From: "nick.grange" Date: Mon, 29 Jun 2015 23:10:56 +1000 Subject: [PATCH 571/956] #2346 updated to ami that doesn't require accepting marketplace terms Updated to same ami as in main readme.md which is from community AMIs and therefore doesn't require accepting the AWS marketplace terms before using in Packer build. --- website/source/intro/getting-started/build-image.html.markdown | 2 +- .../source/intro/getting-started/parallel-builds.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/intro/getting-started/build-image.html.markdown b/website/source/intro/getting-started/build-image.html.markdown index 0c0fc0184..4bf8eda57 100644 --- a/website/source/intro/getting-started/build-image.html.markdown +++ b/website/source/intro/getting-started/build-image.html.markdown @@ -54,7 +54,7 @@ briefly. Create a file `example.json` and fill it with the following contents: "access_key": "{{user `aws_access_key`}}", "secret_key": "{{user `aws_secret_key`}}", "region": "us-east-1", - "source_ami": "ami-c65be9ae", + "source_ami": "ami-de0d9eb7", "instance_type": "t1.micro", "ssh_username": "ubuntu", "ami_name": "packer-example {{timestamp}}" diff --git a/website/source/intro/getting-started/parallel-builds.html.markdown b/website/source/intro/getting-started/parallel-builds.html.markdown index 700f03c28..90554dacc 100644 --- a/website/source/intro/getting-started/parallel-builds.html.markdown +++ b/website/source/intro/getting-started/parallel-builds.html.markdown @@ -93,7 +93,7 @@ The entire template should now look like this: "access_key": "{{user `aws_access_key`}}", "secret_key": "{{user `aws_secret_key`}}", "region": "us-east-1", - "source_ami": "ami-c65be9ae", + "source_ami": "ami-de0d9eb7", "instance_type": "t1.micro", "ssh_username": "ubuntu", "ami_name": "packer-example {{timestamp}}" From 3eaf7f38b0ccf35b77c71247f65e0e629859be52 Mon Sep 17 00:00:00 2001 From: Marcin Matlaszek Date: Mon, 29 Jun 2015 15:39:27 +0200 Subject: [PATCH 572/956] Fix adding launch permissions for created ami. --- builder/amazon/common/step_modify_ami_attributes.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/amazon/common/step_modify_ami_attributes.go b/builder/amazon/common/step_modify_ami_attributes.go index a0c5dccfb..cf37fc5ac 100644 --- a/builder/amazon/common/step_modify_ami_attributes.go +++ b/builder/amazon/common/step_modify_ami_attributes.go @@ -56,8 +56,8 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc users := make([]*string, len(s.Users)) adds := make([]*ec2.LaunchPermission, len(s.Users)) for i, u := range s.Users { - users[i] = &u - adds[i] = &ec2.LaunchPermission{UserID: &u} + users[i] = aws.String(&u) + adds[i] = &ec2.LaunchPermission{UserID: aws.String(&u)} } options["users"] = &ec2.ModifyImageAttributeInput{ UserIDs: users, From a3fa7cdce3082f392cfb8afa3e529d71b02b8da2 Mon Sep 17 00:00:00 2001 From: Emil Hessman Date: Mon, 29 Jun 2015 18:14:41 +0200 Subject: [PATCH 573/956] Add AppVeyor configuration file Packer lacks an official Windows CI server. This commit adds a configuration file for AppVeyor, which is a Windows CI service (free for open-source projects). --- appveyor.yml | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 appveyor.yml diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 000000000..859f053c5 --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,33 @@ +# appveyor.yml reference : http://www.appveyor.com/docs/appveyor-yml + +version: "{build}" + +skip_tags: true + +os: Windows Server 2012 R2 + +environment: + GOPATH: c:\gopath + matrix: + - GOARCH: 386 + GOVERSION: 1.4.2 + - GOARCH: amd64 + GOVERSION: 1.4.2 + +clone_folder: c:\gopath\src\github.com\mitchellh\packer + +install: + - set Path=c:\go\bin;%Path% + - echo %Path% + - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-%GOARCH%.msi + - msiexec /i go%GOVERSION%.windows-%GOARCH%.msi /q + - go version + - go env + - go get -d -v -t ./... + +build_script: + - go test -v ./... + +test: off + +deploy: off From aab5ca7059525b8ce84fb5c6921583ed02a29545 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 29 Jun 2015 09:22:33 -0700 Subject: [PATCH 574/956] update CHANGELOG --- CHANGELOG.md | 5 +++-- builder/amazon/common/step_modify_ami_attributes.go | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d71df0047..e13b129e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,10 @@ BUG FIXES: * builder/amazon: Fix issue with sharing AMIs when using `ami_users` [GH-2308] * builder/amazon: Fix for tag creation when creating new ec2 instance [GH-2317] * builder/amazon: Fix issue with creating AMIs with multiple device mappings [GH-2320] - * builder/amazon: Fix failing AMI snapshot tagging when copying to other + * builder/amazon: Fix failing AMI snapshot tagging when copying to other regions [GH-2316] - * builder/amazon-instance: Fix issue with creating AMIs without specifying a + * builder/amazon: Fix setting AMI launch permissions [GH-2348] + * builder/amazon-instance: Fix issue with creating AMIs without specifying a virtualization type [GH-2330] ## 0.8.0 (June 23, 2015) diff --git a/builder/amazon/common/step_modify_ami_attributes.go b/builder/amazon/common/step_modify_ami_attributes.go index cf37fc5ac..2d72caba7 100644 --- a/builder/amazon/common/step_modify_ami_attributes.go +++ b/builder/amazon/common/step_modify_ami_attributes.go @@ -56,8 +56,8 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc users := make([]*string, len(s.Users)) adds := make([]*ec2.LaunchPermission, len(s.Users)) for i, u := range s.Users { - users[i] = aws.String(&u) - adds[i] = &ec2.LaunchPermission{UserID: aws.String(&u)} + users[i] = aws.String(u) + adds[i] = &ec2.LaunchPermission{UserID: aws.String(u)} } options["users"] = &ec2.ModifyImageAttributeInput{ UserIDs: users, From 79de517e046f8de37d07cd355a6b4551707c728d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 29 Jun 2015 09:24:22 -0700 Subject: [PATCH 575/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e13b129e0..110ee2dfc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ BUG FIXES: * builder/amazon: Fix failing AMI snapshot tagging when copying to other regions [GH-2316] * builder/amazon: Fix setting AMI launch permissions [GH-2348] + * builder/amazon: Fix spot instance cleanup to remove the correct request [GH-2327] * builder/amazon-instance: Fix issue with creating AMIs without specifying a virtualization type [GH-2330] From 4a60e469e93fad44d73ab78b9a12f33be1130c9e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 29 Jun 2015 09:33:20 -0700 Subject: [PATCH 576/956] update CHANGELOG --- CHANGELOG.md | 2 ++ builder/vmware/iso/driver_esx5.go | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 110ee2dfc..8c7d7da46 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,8 @@ BUG FIXES: * builder/amazon: Fix spot instance cleanup to remove the correct request [GH-2327] * builder/amazon-instance: Fix issue with creating AMIs without specifying a virtualization type [GH-2330] + * builder/vmware-iso: Setting `checksum_type` to `none` for ESX builds + now works [GH-2323] ## 0.8.0 (June 23, 2015) diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index e8534ceeb..75d4d3d25 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -396,9 +396,8 @@ func (d *ESX5Driver) upload(dst, src string) error { } func (d *ESX5Driver) verifyChecksum(ctype string, hash string, file string) bool { - if (ctype == "none") { - err := d.sh("stat", file) - if err != nil { + if ctype == "none" { + if err := d.sh("stat", file); err != nil { return false } } else { @@ -408,6 +407,7 @@ func (d *ESX5Driver) verifyChecksum(ctype string, hash string, file string) bool return false } } + return true } From 1aef60ff7675edd62089507a7f41468920a37434 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 29 Jun 2015 09:35:42 -0700 Subject: [PATCH 577/956] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c7d7da46..3d3434fa9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,8 @@ BUG FIXES: virtualization type [GH-2330] * builder/vmware-iso: Setting `checksum_type` to `none` for ESX builds now works [GH-2323] + * provisioner/chef: Use knife config file vs command-line params to + clean up nodes so full set of features can be used [GH-2306] ## 0.8.0 (June 23, 2015) From 0a2e54feaf0714541fba475ee9dd79807f475149 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 29 Jun 2015 09:40:15 -0700 Subject: [PATCH 578/956] builder/amazon: output WinRM password for debug mode [GH-2336] --- builder/amazon/common/step_get_password.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/builder/amazon/common/step_get_password.go b/builder/amazon/common/step_get_password.go index ab51f4394..08a9c7b66 100644 --- a/builder/amazon/common/step_get_password.go +++ b/builder/amazon/common/step_get_password.go @@ -19,6 +19,7 @@ import ( // StepGetPassword reads the password from a Windows server and sets it // on the WinRM config. type StepGetPassword struct { + Debug bool Comm *communicator.Config Timeout time.Duration } @@ -85,6 +86,13 @@ WaitLoop: } } } + + // In debug-mode, we output the password + if s.Debug { + ui.Message(fmt.Sprintf( + "Password (since debug is enabled): %s", s.Comm.WinRMPassword)) + } + return multistep.ActionContinue } From b315e470c82341b48e262688ce02d59069c23eed Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 29 Jun 2015 09:40:58 -0700 Subject: [PATCH 579/956] builder/amazon: set debug flag for get password --- builder/amazon/ebs/builder.go | 1 + builder/amazon/instance/builder.go | 1 + 2 files changed, 2 insertions(+) diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index a572bfeba..1eab06247 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -119,6 +119,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Tags: b.config.RunTags, }, &awscommon.StepGetPassword{ + Debug: b.config.PackerDebug, Comm: &b.config.RunConfig.Comm, Timeout: b.config.WindowsPasswordTimeout, }, diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 17760d918..58efc6032 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -204,6 +204,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Tags: b.config.RunTags, }, &awscommon.StepGetPassword{ + Debug: b.config.PackerDebug, Comm: &b.config.RunConfig.Comm, Timeout: b.config.WindowsPasswordTimeout, }, From 98db68e36cceb1f521d476b7ca7aaa280b66d9d1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 29 Jun 2015 09:49:11 -0700 Subject: [PATCH 580/956] builder/amazon/instance: set valid bundle prefix [GH-2328] --- builder/amazon/instance/builder.go | 12 +++++++----- builder/amazon/instance/builder_test.go | 1 - 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 17760d918..6375c9e0d 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -50,6 +50,12 @@ type Builder struct { } func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { + configs := make([]interface{}, len(raws)+1) + configs[0] = map[string]interface{}{ + "bundle_prefix": "image-{{timestamp}}", + } + copy(configs[1:], raws) + b.config.ctx.Funcs = awscommon.TemplateFuncs err := config.Decode(&b.config, &config.DecodeOpts{ Interpolate: true, @@ -60,7 +66,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { "bundle_vol_command", }, }, - }, raws...) + }, configs...) if err != nil { return nil, err } @@ -69,10 +75,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.BundleDestination = "/tmp" } - if b.config.BundlePrefix == "" { - b.config.BundlePrefix = "image-{{timestamp}}" - } - if b.config.BundleUploadCommand == "" { if b.config.IamInstanceProfile != "" { b.config.BundleUploadCommand = "sudo -i -n ec2-upload-bundle " + diff --git a/builder/amazon/instance/builder_test.go b/builder/amazon/instance/builder_test.go index e15c45131..bb18e54a8 100644 --- a/builder/amazon/instance/builder_test.go +++ b/builder/amazon/instance/builder_test.go @@ -130,7 +130,6 @@ func TestBuilderPrepare_BundlePrefix(t *testing.T) { b := &Builder{} config := testConfig() - config["bundle_prefix"] = "" warnings, err := b.Prepare(config) if len(warnings) > 0 { t.Fatalf("bad: %#v", warnings) From 6c7a7b6068d55e3d5e4f019576cc267a6ab89183 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 29 Jun 2015 09:56:33 -0700 Subject: [PATCH 581/956] builder/googlecompute: default SSH settings properly [GH-2340] --- builder/googlecompute/config.go | 3 +++ builder/googlecompute/config_test.go | 29 ++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go index 4603f2769..eebea011f 100644 --- a/builder/googlecompute/config.go +++ b/builder/googlecompute/config.go @@ -97,6 +97,9 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { } var errs *packer.MultiError + if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { + errs = packer.MultiErrorAppend(errs, es...) + } // Process required parameters. if c.ProjectId == "" { diff --git a/builder/googlecompute/config_test.go b/builder/googlecompute/config_test.go index c28c35a0f..93997912e 100644 --- a/builder/googlecompute/config_test.go +++ b/builder/googlecompute/config_test.go @@ -152,6 +152,35 @@ func TestConfigPrepare(t *testing.T) { } } +func TestConfigDefaults(t *testing.T) { + cases := []struct { + Read func(c *Config) interface{} + Value interface{} + }{ + { + func(c *Config) interface{} { return c.Comm.Type }, + "ssh", + }, + + { + func(c *Config) interface{} { return c.Comm.SSHPort }, + 22, + }, + } + + for _, tc := range cases { + raw := testConfig(t) + + c, warns, errs := NewConfig(raw) + testConfigOk(t, warns, errs) + + actual := tc.Read(c) + if actual != tc.Value { + t.Fatalf("bad: %#v", actual) + } + } +} + func testAccountFile(t *testing.T) string { tf, err := ioutil.TempFile("", "packer") if err != nil { From eef260a11bc737904c080e14d1b7e2933b56967f Mon Sep 17 00:00:00 2001 From: Emil Hessman Date: Mon, 29 Jun 2015 19:23:44 +0200 Subject: [PATCH 582/956] Add AppVeyor status badge to README --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index de9328795..fd562eb9a 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # Packer [![Build Status](https://travis-ci.org/mitchellh/packer.svg?branch=master)](https://travis-ci.org/mitchellh/packer) +[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/mitchellh/packer?branch=master&svg=true)](https://ci.appveyor.com/project/hashicorp/packer) * Website: http://www.packer.io * IRC: `#packer-tool` on Freenode From bfcd0044974cc1cc18bad445e15be31d3629d728 Mon Sep 17 00:00:00 2001 From: Emil Hessman Date: Mon, 29 Jun 2015 20:04:07 +0200 Subject: [PATCH 583/956] AppVeyor: only build master branch AppVeyor builds all branches per default. Add a configuration setting to only build the master branch. See http://www.appveyor.com/docs/branches#white-and-blacklisting for details. For the record, AppVeyor builds can skipped by annotating the commit message according to http://www.appveyor.com/docs/how-to/skip-build Useful for e.g. website commits. [skip appveyor] --- appveyor.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/appveyor.yml b/appveyor.yml index 859f053c5..202456f58 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -4,6 +4,10 @@ version: "{build}" skip_tags: true +branches: + only: + - master + os: Windows Server 2012 R2 environment: From 8657b1e9d81f147747122870032c81acb3bd663c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 29 Jun 2015 11:38:05 -0700 Subject: [PATCH 584/956] communicator/ssh: more logging --- communicator/ssh/communicator.go | 1 + 1 file changed, 1 insertion(+) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index ffcdac749..cc61e8e9f 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -277,6 +277,7 @@ func (c *comm) reconnect() (err error) { if err != nil { log.Printf("handshake error: %s", err) } + log.Printf("handshake complete!") if sshConn != nil { c.client = ssh.NewClient(sshConn, sshChan, req) } From 2498ad02c811413922a55298191f30a00c51fe76 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 29 Jun 2015 11:49:45 -0700 Subject: [PATCH 585/956] packer: validate minimum version [GH-2310] --- command/meta.go | 2 ++ main.go | 1 + packer/core.go | 27 +++++++++++++++++++ packer/core_test.go | 14 ++++++++++ .../validate-min-version-high.json | 7 +++++ .../test-fixtures/validate-min-version.json | 7 +++++ 6 files changed, 58 insertions(+) create mode 100644 packer/test-fixtures/validate-min-version-high.json create mode 100644 packer/test-fixtures/validate-min-version.json diff --git a/command/meta.go b/command/meta.go index e55aebf42..1c0864f92 100644 --- a/command/meta.go +++ b/command/meta.go @@ -28,6 +28,7 @@ type Meta struct { CoreConfig *packer.CoreConfig Cache packer.Cache Ui packer.Ui + Version string // These are set by command-line flags flagBuildExcept []string @@ -42,6 +43,7 @@ func (m *Meta) Core(tpl *template.Template) (*packer.Core, error) { config := *m.CoreConfig config.Template = tpl config.Variables = m.flagVars + config.Version = m.Version // Init the core core, err := packer.NewCore(&config) diff --git a/main.go b/main.go index 7f5cb7bef..4d23339d1 100644 --- a/main.go +++ b/main.go @@ -168,6 +168,7 @@ func wrappedMain() int { PostProcessor: config.LoadPostProcessor, Provisioner: config.LoadProvisioner, }, + Version: Version, }, Cache: cache, Ui: ui, diff --git a/packer/core.go b/packer/core.go index 3bc5d295e..f9bf87b9d 100644 --- a/packer/core.go +++ b/packer/core.go @@ -5,6 +5,7 @@ import ( "sort" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-version" "github.com/mitchellh/packer/template" "github.com/mitchellh/packer/template/interpolate" ) @@ -17,6 +18,7 @@ type Core struct { components ComponentFinder variables map[string]string builds map[string]*template.Builder + version string } // CoreConfig is the structure for initializing a new Core. Once a CoreConfig @@ -25,6 +27,7 @@ type CoreConfig struct { Components ComponentFinder Template *template.Template Variables map[string]string + Version string } // The function type used to lookup Builder implementations. @@ -55,6 +58,7 @@ func NewCore(c *CoreConfig) (*Core, error) { Template: c.Template, components: c.Components, variables: c.Variables, + version: c.Version, } if err := result.validate(); err != nil { return nil, err @@ -226,6 +230,29 @@ func (c *Core) validate() error { return err } + // Validate the minimum version is satisfied + if c.Template.MinVersion != "" { + versionActual, err := version.NewVersion(c.version) + if err != nil { + // This shouldn't happen since we set it via the compiler + panic(err) + } + + versionMin, err := version.NewVersion(c.Template.MinVersion) + if err != nil { + return fmt.Errorf( + "min_version is invalid: %s", err) + } + + if versionActual.LessThan(versionMin) { + return fmt.Errorf( + "This template requires a minimum Packer version of %s,\n"+ + "but version %s is running.", + versionMin, + versionActual) + } + } + // Validate variables are set var err error for n, v := range c.Template.Variables { diff --git a/packer/core_test.go b/packer/core_test.go index cc958356e..07acea43c 100644 --- a/packer/core_test.go +++ b/packer/core_test.go @@ -484,6 +484,19 @@ func TestCoreValidate(t *testing.T) { map[string]string{"foo": "bar"}, false, }, + + // Min version good + { + "validate-min-version.json", + map[string]string{"foo": "bar"}, + false, + }, + + { + "validate-min-version-high.json", + map[string]string{"foo": "bar"}, + true, + }, } for _, tc := range cases { @@ -501,6 +514,7 @@ func TestCoreValidate(t *testing.T) { _, err = NewCore(&CoreConfig{ Template: tpl, Variables: tc.Vars, + Version: "1.0.0", }) if (err != nil) != tc.Err { t.Fatalf("err: %s\n\n%s", tc.File, err) diff --git a/packer/test-fixtures/validate-min-version-high.json b/packer/test-fixtures/validate-min-version-high.json new file mode 100644 index 000000000..2dd93a825 --- /dev/null +++ b/packer/test-fixtures/validate-min-version-high.json @@ -0,0 +1,7 @@ +{ + "min_packer_version": "2.1.0", + + "builders": [ + {"type": "foo"} + ] +} diff --git a/packer/test-fixtures/validate-min-version.json b/packer/test-fixtures/validate-min-version.json new file mode 100644 index 000000000..a8bd74214 --- /dev/null +++ b/packer/test-fixtures/validate-min-version.json @@ -0,0 +1,7 @@ +{ + "min_packer_version": "0.1.0", + + "builders": [ + {"type": "foo"} + ] +} From 782d6930759a7af1312fe9fc839aad46a8d845bb Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 29 Jun 2015 13:47:53 -0700 Subject: [PATCH 586/956] Update CHANGELOG.md --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d3434fa9..6517f1506 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,13 @@ ## 0.8.1 (unreleased) +IMPROVEMENTS: + + * builder/amazon: When debug mode is enabled, the Windows administrator + password for Windows instances will be shown [GH-2351] + BUG FIXES: + * core: `min_packer_version` field in configs work [GH-2356] * builder/amazon: Fix issue with sharing AMIs when using `ami_users` [GH-2308] * builder/amazon: Fix for tag creation when creating new ec2 instance [GH-2317] * builder/amazon: Fix issue with creating AMIs with multiple device mappings [GH-2320] From 95f9391fd655bcfeedd7b23a8839209b34b8aaa7 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 29 Jun 2015 14:00:26 -0700 Subject: [PATCH 587/956] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6517f1506..f98d32f03 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ BUG FIXES: regions [GH-2316] * builder/amazon: Fix setting AMI launch permissions [GH-2348] * builder/amazon: Fix spot instance cleanup to remove the correct request [GH-2327] + * builder/amazon: Fix `bundle_prefix` not interpolating `timestamp` [GH-2352] * builder/amazon-instance: Fix issue with creating AMIs without specifying a virtualization type [GH-2330] * builder/vmware-iso: Setting `checksum_type` to `none` for ESX builds From 61753d37cb4b4bf8ae3a2f9c67b25f20b5bf1939 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 29 Jun 2015 14:04:10 -0700 Subject: [PATCH 588/956] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f98d32f03..83daa8e83 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ BUG FIXES: * builder/amazon: Fix `bundle_prefix` not interpolating `timestamp` [GH-2352] * builder/amazon-instance: Fix issue with creating AMIs without specifying a virtualization type [GH-2330] + * builder/google: Set default communicator settings properly [GH-2353] * builder/vmware-iso: Setting `checksum_type` to `none` for ESX builds now works [GH-2323] * provisioner/chef: Use knife config file vs command-line params to From 51804e91321c33920da289811a430be31f2e8058 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 29 Jun 2015 14:47:28 -0700 Subject: [PATCH 589/956] Find a public IP address to provision a digital ocean box --- builder/digitalocean/step_droplet_info.go | 25 ++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/builder/digitalocean/step_droplet_info.go b/builder/digitalocean/step_droplet_info.go index 81d84dc8d..abdc2b698 100644 --- a/builder/digitalocean/step_droplet_info.go +++ b/builder/digitalocean/step_droplet_info.go @@ -14,11 +14,11 @@ func (s *stepDropletInfo) Run(state multistep.StateBag) multistep.StepAction { client := state.Get("client").(*godo.Client) ui := state.Get("ui").(packer.Ui) c := state.Get("config").(Config) - dropletId := state.Get("droplet_id").(int) + dropletID := state.Get("droplet_id").(int) ui.Say("Waiting for droplet to become active...") - err := waitForDropletState("active", dropletId, client, c.StateTimeout) + err := waitForDropletState("active", dropletID, client, c.StateTimeout) if err != nil { err := fmt.Errorf("Error waiting for droplet to become active: %s", err) state.Put("error", err) @@ -27,7 +27,7 @@ func (s *stepDropletInfo) Run(state multistep.StateBag) multistep.StepAction { } // Set the IP on the state for later - droplet, _, err := client.Droplets.Get(dropletId) + droplet, _, err := client.Droplets.Get(dropletID) if err != nil { err := fmt.Errorf("Error retrieving droplet: %s", err) state.Put("error", err) @@ -39,13 +39,28 @@ func (s *stepDropletInfo) Run(state multistep.StateBag) multistep.StepAction { invalid := droplet.Networks == nil || len(droplet.Networks.V4) == 0 if invalid { - err := fmt.Errorf("IPv4 address not found for droplet!") + err := fmt.Errorf("IPv4 address not found for droplet") + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + // Find a public IPv4 network + foundNetwork := false + for _, network := range droplet.Networks.V4 { + if network.Type == "public" { + state.Put("droplet_ip", network.IPAddress) + foundNetwork = true + break + } + } + if !foundNetwork { + err := fmt.Errorf("Count not find a public IPv4 address for this droplet") state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } - state.Put("droplet_ip", droplet.Networks.V4[0].IPAddress) return multistep.ActionContinue } From 6c2e6c41d97f051a5db53fc9655e7098e3375659 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 29 Jun 2015 17:06:49 -0700 Subject: [PATCH 590/956] Added an example using tcsh with shell provisioner; reworded some of the execute_command docs --- .../docs/provisioners/shell.html.markdown | 22 +++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/website/source/docs/provisioners/shell.html.markdown b/website/source/docs/provisioners/shell.html.markdown index 75bf5e7d1..dec270841 100644 --- a/website/source/docs/provisioners/shell.html.markdown +++ b/website/source/docs/provisioners/shell.html.markdown @@ -89,10 +89,14 @@ Optional parameters: To many new users, the `execute_command` is puzzling. However, it provides an important function: customization of how the command is executed. The -most common use case for this is dealing with **sudo password prompts**. +most common use case for this is dealing with **sudo password prompts**. You may +also need to customize this if you use a non-POSIX shell, such as `tcsh` on +FreeBSD. -For example, if the default user of an installed operating system is "packer" -and has the password "packer" for sudo usage, then you'll likely want to +### Sudo Example + +Some operating systems default to a non-root user. For example if you login +as `ubuntu` and can sudo using the password `packer`, then you'll want to change `execute_command` to be: ```text @@ -100,13 +104,23 @@ change `execute_command` to be: ``` The `-S` flag tells `sudo` to read the password from stdin, which in this -case is being piped in with the value of "packer". The `-E` flag tells `sudo` +case is being piped in with the value of `packer`. The `-E` flag tells `sudo` to preserve the environment, allowing our environmental variables to work within the script. By setting the `execute_command` to this, your script(s) can run with root privileges without worrying about password prompts. +### FreeBSD Example + +FreeBSD's default shell is `tcsh`, which deviates from POSIX sematics. In order +for packer to pass environment variables you will need to change the +`execute_command` to: + + chmod +x {{ .Path }}; env {{ .Vars }} {{ .Path }} + +Note the addition of `env` before `{{ .Vars }}`. + ## Default Environmental Variables In addition to being able to specify custom environmental variables using From d1961e782b364082706d11d4a70f4e4ec6937ad9 Mon Sep 17 00:00:00 2001 From: Danny Guinther Date: Tue, 30 Jun 2015 08:16:31 -0400 Subject: [PATCH 591/956] Be more explicit about sequence definition usage in docker builder docs --- .../source/docs/builders/docker.html.markdown | 53 +++++++++++++++++-- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/website/source/docs/builders/docker.html.markdown b/website/source/docs/builders/docker.html.markdown index c760742ee..b5fe95075 100644 --- a/website/source/docs/builders/docker.html.markdown +++ b/website/source/docs/builders/docker.html.markdown @@ -117,7 +117,10 @@ _exported_. More specifically, if you set `export_path` in your configuration. If you set `commit`, see the next section. The example below shows a full configuration that would import and push -the created image: +the created image. This is accomplished using a sequence definition (a +collection of post-processors that are treated as as single pipeline, see +[Post-Processors](/docs/templates/post-processors.html) +for more information): ```javascript { @@ -134,6 +137,12 @@ the created image: } ``` +In the above example, the result of each builder is passed through the defined +sequence of post-processors starting first with the `docker-import` +post-processor which will import the artifact as a docker image. The resulting +docker image is then passed on to the `docker-push` post-processor which handles +pushing the image to a container repository. + If you want to do this manually, however, perhaps from a script, you can import the image using the process below: @@ -146,9 +155,12 @@ and `docker push`, respectively. ## Using the Artifact: Committed -If you committed your container to an image, you probably want to tag, -save, push, etc. Packer can do this automatically for you. An example is -shown below which tags and pushes the image: +If you committed your container to an image, you probably want to tag, save, +push, etc. Packer can do this automatically for you. An example is shown below +which tags and pushes an image. This is accomplished using a sequence +definition (a collection of post-processors that are treated as as single +pipeline, see [Post-Processors](/docs/templates/post-processors.html) for more +information): ```javascript { @@ -165,6 +177,39 @@ shown below which tags and pushes the image: } ``` +In the above example, the result of each builder is passed through the defined +sequence of post-processors starting first with the `docker-tag` post-processor +which tags the committed image with the supplied repository and tag information. +Once tagged, the resulting artifact is then passed on to the `docker-push` +post-processor which handles pushing the image to a container repository. + +Going a step further, if you wanted to tag and push an image to multiple +container repositories, this could be accomplished by defining two, +nearly-identical sequence definitions, as demonstrated by the example below: + +```javascript +{ + "post-processors": [ + [ + { + "type": "docker-tag", + "repository": "mitchellh/packer", + "tag": "0.7" + }, + "docker-push" + ], + [ + { + "type": "docker-tag", + "repository": "hashicorp/packer", + "tag": "0.7" + }, + "docker-push" + ] + ] +} +``` + ## Dockerfiles This builder allows you to build Docker images _without_ Dockerfiles. From fd2d44c212474ab516b47800bdc0f8d4889b2708 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 30 Jun 2015 10:42:55 -0700 Subject: [PATCH 592/956] helper/config: copy buildname/buildtype properly --- helper/config/decode.go | 2 ++ template/interpolate/funcs.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/helper/config/decode.go b/helper/config/decode.go index 177f8f930..620198e35 100644 --- a/helper/config/decode.go +++ b/helper/config/decode.go @@ -42,6 +42,8 @@ func Decode(target interface{}, config *DecodeOpts, raws ...interface{}) error { if config.InterpolateContext == nil { config.InterpolateContext = ctx } else { + config.InterpolateContext.BuildName = ctx.BuildName + config.InterpolateContext.BuildType = ctx.BuildType config.InterpolateContext.TemplatePath = ctx.TemplatePath config.InterpolateContext.UserVariables = ctx.UserVariables } diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go index e5d01b455..75e1344ed 100644 --- a/template/interpolate/funcs.go +++ b/template/interpolate/funcs.go @@ -71,7 +71,7 @@ func funcGenBuildName(ctx *Context) interface{} { func funcGenBuildType(ctx *Context) interface{} { return func() (string, error) { if ctx == nil || ctx.BuildType == "" { - return "", errors.New("build_name not available") + return "", errors.New("build_type not available") } return ctx.BuildType, nil From ac444accb122e343ba6576f6e3e5c4dc038a8cec Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 30 Jun 2015 10:44:56 -0700 Subject: [PATCH 593/956] helper/config: tests --- helper/config/decode_test.go | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/helper/config/decode_test.go b/helper/config/decode_test.go index f9fa590c9..f325ae4a5 100644 --- a/helper/config/decode_test.go +++ b/helper/config/decode_test.go @@ -74,6 +74,36 @@ func TestDecode(t *testing.T) { }, }, }, + + "build name": { + []interface{}{ + map[string]interface{}{ + "name": "{{build_name}}", + }, + map[string]interface{}{ + "packer_build_name": "foo", + }, + }, + &Target{ + Name: "foo", + }, + nil, + }, + + "build type": { + []interface{}{ + map[string]interface{}{ + "name": "{{build_type}}", + }, + map[string]interface{}{ + "packer_builder_type": "foo", + }, + }, + &Target{ + Name: "foo", + }, + nil, + }, } for k, tc := range cases { From 3abec94936804ca8c51710a7908a7d673b6b0966 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 30 Jun 2015 10:46:14 -0700 Subject: [PATCH 594/956] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 83daa8e83..b6f73f227 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ IMPROVEMENTS: BUG FIXES: * core: `min_packer_version` field in configs work [GH-2356] + * core: The `build_name` and `build_type` functions work in provisioners [GH-2367] * builder/amazon: Fix issue with sharing AMIs when using `ami_users` [GH-2308] * builder/amazon: Fix for tag creation when creating new ec2 instance [GH-2317] * builder/amazon: Fix issue with creating AMIs with multiple device mappings [GH-2320] From 548f2ced1c4ebf6741513e874ae6919cadee8f51 Mon Sep 17 00:00:00 2001 From: Zach Zolton Date: Tue, 30 Jun 2015 17:56:14 +0000 Subject: [PATCH 595/956] Fix `packer build` reading from STDIN Signed-off-by: Jesse Szwedko --- command/build.go | 8 +++++++- command/build_test.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/command/build.go b/command/build.go index 7a035a883..6d2fef67c 100644 --- a/command/build.go +++ b/command/build.go @@ -37,7 +37,13 @@ func (c BuildCommand) Run(args []string) int { } // Parse the template - tpl, err := template.ParseFile(args[0]) + var tpl *template.Template + var err error + if args[0] == "-" { + tpl, err = template.Parse(os.Stdin) + } else { + tpl, err = template.ParseFile(args[0]) + } if err != nil { c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err)) return 1 diff --git a/command/build_test.go b/command/build_test.go index 73837e1a2..aa35a385f 100644 --- a/command/build_test.go +++ b/command/build_test.go @@ -37,6 +37,36 @@ func TestBuildOnlyFileCommaFlags(t *testing.T) { } } +func TestBuildStdin(t *testing.T) { + c := &BuildCommand{ + Meta: testMetaFile(t), + } + f, err := os.Open(filepath.Join(testFixture("build-only"), "template.json")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + stdin := os.Stdin + os.Stdin = f + defer func() { os.Stdin = stdin }() + + defer cleanup() + if code := c.Run([]string{"-"}); code != 0 { + fatalCommand(t, c.Meta) + } + + if !fileExists("chocolate.txt") { + t.Error("Expected to find chocolate.txt") + } + if !fileExists("vanilla.txt") { + t.Error("Expected to find vanilla.txt") + } + if !fileExists("cherry.txt") { + t.Error("Expected to find cherry.txt") + } +} + func TestBuildOnlyFileMultipleFlags(t *testing.T) { c := &BuildCommand{ Meta: testMetaFile(t), From ebd4cb761ddd4aecfe9516b1a5798f716663610b Mon Sep 17 00:00:00 2001 From: Christian Mayer Date: Wed, 1 Jul 2015 10:46:47 +0200 Subject: [PATCH 596/956] Fix NULL builder config documentation. Change from "host" to "ssh_host" was introduced in d545431f9bc735f --- website/source/docs/builders/null.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/builders/null.html.markdown b/website/source/docs/builders/null.html.markdown index 74c4465fb..7398cadd7 100644 --- a/website/source/docs/builders/null.html.markdown +++ b/website/source/docs/builders/null.html.markdown @@ -20,8 +20,8 @@ no provisioners are defined, but it will connect to the specified host via ssh. ```javascript { - "type": "null", - "host": "127.0.0.1", + "type": "null", + "ssh_host": "127.0.0.1", "ssh_username": "foo", "ssh_password": "bar" } From 5e1ea753d4fa7fe4c463557d3543a6c6ac7292bc Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Wed, 1 Jul 2015 11:30:39 -0700 Subject: [PATCH 597/956] Fixed case sensitive issue with VMX entries not being overwritten --- builder/vmware/common/step_clean_vmx.go | 4 ++-- builder/vmware/vmx/step_clone_vmx.go | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) mode change 100644 => 100755 builder/vmware/common/step_clean_vmx.go mode change 100644 => 100755 builder/vmware/vmx/step_clone_vmx.go diff --git a/builder/vmware/common/step_clean_vmx.go b/builder/vmware/common/step_clean_vmx.go old mode 100644 new mode 100755 index bf76f5863..44bf4c407 --- a/builder/vmware/common/step_clean_vmx.go +++ b/builder/vmware/common/step_clean_vmx.go @@ -51,8 +51,8 @@ func (s StepCleanVMX) Run(state multistep.StateBag) multistep.StepAction { ui.Message("Detaching ISO from CD-ROM device...") - vmxData[ide+"devicetype"] = "cdrom-raw" - vmxData[ide+"filename"] = "auto detect" + vmxData[ide+"deviceType"] = "cdrom-raw" + vmxData[ide+"fileName"] = "auto detect" } ui.Message("Disabling VNC server...") diff --git a/builder/vmware/vmx/step_clone_vmx.go b/builder/vmware/vmx/step_clone_vmx.go old mode 100644 new mode 100755 index a020e1627..1dbae678a --- a/builder/vmware/vmx/step_clone_vmx.go +++ b/builder/vmware/vmx/step_clone_vmx.go @@ -38,14 +38,14 @@ func (s *StepCloneVMX) Run(state multistep.StateBag) multistep.StepAction { } var diskName string - if _, ok := vmxData["scsi0:0.filename"]; ok { - diskName = vmxData["scsi0:0.filename"] + if _, ok := vmxData["scsi0:0.fileName"]; ok { + diskName = vmxData["scsi0:0.fileName"] } - if _, ok := vmxData["sata0:0.filename"]; ok { - diskName = vmxData["sata0:0.filename"] + if _, ok := vmxData["sata0:0.fileName"]; ok { + diskName = vmxData["sata0:0.fileName"] } - if _, ok := vmxData["ide0:0.filename"]; ok { - diskName = vmxData["ide0:0.filename"] + if _, ok := vmxData["ide0:0.fileName"]; ok { + diskName = vmxData["ide0:0.fileName"] } if diskName == "" { err := fmt.Errorf("Root disk filename could not be found!") From ded78d2bc265881572ffc6b52bfc55e69e2420a9 Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Wed, 1 Jul 2015 11:45:10 -0700 Subject: [PATCH 598/956] fixed the test as well --- builder/vmware/common/step_clean_vmx_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) mode change 100644 => 100755 builder/vmware/common/step_clean_vmx_test.go diff --git a/builder/vmware/common/step_clean_vmx_test.go b/builder/vmware/common/step_clean_vmx_test.go old mode 100644 new mode 100755 index ea30fb54a..3ca6a7e23 --- a/builder/vmware/common/step_clean_vmx_test.go +++ b/builder/vmware/common/step_clean_vmx_test.go @@ -61,8 +61,8 @@ func TestStepCleanVMX_floppyPath(t *testing.T) { Value string }{ {"floppy0.present", "FALSE"}, - {"floppy0.filetype", ""}, - {"floppy0.filename", ""}, + {"floppy0.fileType", ""}, + {"floppy0.fileName", ""}, } for _, tc := range cases { @@ -109,9 +109,9 @@ func TestStepCleanVMX_isoPath(t *testing.T) { Key string Value string }{ - {"ide0:0.filename", "auto detect"}, - {"ide0:0.devicetype", "cdrom-raw"}, - {"ide0:1.filename", "bar"}, + {"ide0:0.fileName", "auto detect"}, + {"ide0:0.deviceType", "cdrom-raw"}, + {"ide0:1.fileName", "bar"}, {"foo", "bar"}, } @@ -130,12 +130,12 @@ func TestStepCleanVMX_isoPath(t *testing.T) { const testVMXFloppyPath = ` floppy0.present = "TRUE" -floppy0.filetype = "file" +floppy0.fileType = "file" ` const testVMXISOPath = ` -ide0:0.devicetype = "cdrom-image" -ide0:0.filename = "foo" -ide0:1.filename = "bar" +ide0:0.deviceType = "cdrom-image" +ide0:0.fileName = "foo" +ide0:1.fileName = "bar" foo = "bar" ` From 1d7d490c01ae9a199ab848940f6f7857445cc5fa Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Wed, 24 Jun 2015 11:58:00 -0700 Subject: [PATCH 599/956] updated how vmx entries are handled --- builder/vmware/common/step_configure_vmx.go | 2 -- builder/vmware/common/vmx.go | 20 ++++++++++++++++---- 2 files changed, 16 insertions(+), 6 deletions(-) mode change 100644 => 100755 builder/vmware/common/step_configure_vmx.go mode change 100644 => 100755 builder/vmware/common/vmx.go diff --git a/builder/vmware/common/step_configure_vmx.go b/builder/vmware/common/step_configure_vmx.go old mode 100644 new mode 100755 index 401d53055..14c68e76a --- a/builder/vmware/common/step_configure_vmx.go +++ b/builder/vmware/common/step_configure_vmx.go @@ -5,7 +5,6 @@ import ( "io/ioutil" "log" "regexp" - "strings" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" @@ -53,7 +52,6 @@ func (s *StepConfigureVMX) Run(state multistep.StateBag) multistep.StepAction { // Set custom data for k, v := range s.CustomData { log.Printf("Setting VMX: '%s' = '%s'", k, v) - k = strings.ToLower(k) vmxData[k] = v } diff --git a/builder/vmware/common/vmx.go b/builder/vmware/common/vmx.go old mode 100644 new mode 100755 index e7cdb662f..ab0291807 --- a/builder/vmware/common/vmx.go +++ b/builder/vmware/common/vmx.go @@ -17,7 +17,7 @@ import ( func ParseVMX(contents string) map[string]string { results := make(map[string]string) - lineRe := regexp.MustCompile(`^(.+?)\s*=\s*"(.*?)"\s*$`) + lineRe := regexp.MustCompile(`^(.+?)\s*=\s*"?(.*?)"?\s*$`) for _, line := range strings.Split(contents, "\n") { matches := lineRe.FindStringSubmatch(line) @@ -25,8 +25,7 @@ func ParseVMX(contents string) map[string]string { continue } - key := strings.ToLower(matches[1]) - results[key] = matches[2] + results[matches[1]] = matches[2] } return results @@ -43,9 +42,22 @@ func EncodeVMX(contents map[string]string) string { i++ } + // a list of VMX key fragments that should not be wrapped in quotes, + // fragments because multiple disks can use the virtualSSD suffix + noQuotes := []string { + "virtualSSD", + } + sort.Strings(keys) for _, k := range keys { - buf.WriteString(fmt.Sprintf("%s = \"%s\"\n", k, contents[k])) + pat := "%s = \"%s\"\n" + for _, q := range noQuotes { + if strings.Contains(k, q) { + pat = "%s = %s\n" + break; + } + } + buf.WriteString(fmt.Sprintf(pat, k, contents[k])) } return buf.String() From df1be999dcc6a7861ce19ec7c06e99022c1db6e6 Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Wed, 1 Jul 2015 11:30:39 -0700 Subject: [PATCH 600/956] Fixed case sensitive issue with VMX entries not being overwritten --- builder/vmware/common/step_clean_vmx.go | 4 ++-- builder/vmware/vmx/step_clone_vmx.go | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) mode change 100644 => 100755 builder/vmware/common/step_clean_vmx.go mode change 100644 => 100755 builder/vmware/vmx/step_clone_vmx.go diff --git a/builder/vmware/common/step_clean_vmx.go b/builder/vmware/common/step_clean_vmx.go old mode 100644 new mode 100755 index bf76f5863..44bf4c407 --- a/builder/vmware/common/step_clean_vmx.go +++ b/builder/vmware/common/step_clean_vmx.go @@ -51,8 +51,8 @@ func (s StepCleanVMX) Run(state multistep.StateBag) multistep.StepAction { ui.Message("Detaching ISO from CD-ROM device...") - vmxData[ide+"devicetype"] = "cdrom-raw" - vmxData[ide+"filename"] = "auto detect" + vmxData[ide+"deviceType"] = "cdrom-raw" + vmxData[ide+"fileName"] = "auto detect" } ui.Message("Disabling VNC server...") diff --git a/builder/vmware/vmx/step_clone_vmx.go b/builder/vmware/vmx/step_clone_vmx.go old mode 100644 new mode 100755 index a020e1627..1dbae678a --- a/builder/vmware/vmx/step_clone_vmx.go +++ b/builder/vmware/vmx/step_clone_vmx.go @@ -38,14 +38,14 @@ func (s *StepCloneVMX) Run(state multistep.StateBag) multistep.StepAction { } var diskName string - if _, ok := vmxData["scsi0:0.filename"]; ok { - diskName = vmxData["scsi0:0.filename"] + if _, ok := vmxData["scsi0:0.fileName"]; ok { + diskName = vmxData["scsi0:0.fileName"] } - if _, ok := vmxData["sata0:0.filename"]; ok { - diskName = vmxData["sata0:0.filename"] + if _, ok := vmxData["sata0:0.fileName"]; ok { + diskName = vmxData["sata0:0.fileName"] } - if _, ok := vmxData["ide0:0.filename"]; ok { - diskName = vmxData["ide0:0.filename"] + if _, ok := vmxData["ide0:0.fileName"]; ok { + diskName = vmxData["ide0:0.fileName"] } if diskName == "" { err := fmt.Errorf("Root disk filename could not be found!") From e9ef2b987eca789b65a306a902f2af54be090e17 Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Wed, 1 Jul 2015 11:45:10 -0700 Subject: [PATCH 601/956] fixed the test as well --- builder/vmware/common/step_clean_vmx_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) mode change 100644 => 100755 builder/vmware/common/step_clean_vmx_test.go diff --git a/builder/vmware/common/step_clean_vmx_test.go b/builder/vmware/common/step_clean_vmx_test.go old mode 100644 new mode 100755 index ea30fb54a..3ca6a7e23 --- a/builder/vmware/common/step_clean_vmx_test.go +++ b/builder/vmware/common/step_clean_vmx_test.go @@ -61,8 +61,8 @@ func TestStepCleanVMX_floppyPath(t *testing.T) { Value string }{ {"floppy0.present", "FALSE"}, - {"floppy0.filetype", ""}, - {"floppy0.filename", ""}, + {"floppy0.fileType", ""}, + {"floppy0.fileName", ""}, } for _, tc := range cases { @@ -109,9 +109,9 @@ func TestStepCleanVMX_isoPath(t *testing.T) { Key string Value string }{ - {"ide0:0.filename", "auto detect"}, - {"ide0:0.devicetype", "cdrom-raw"}, - {"ide0:1.filename", "bar"}, + {"ide0:0.fileName", "auto detect"}, + {"ide0:0.deviceType", "cdrom-raw"}, + {"ide0:1.fileName", "bar"}, {"foo", "bar"}, } @@ -130,12 +130,12 @@ func TestStepCleanVMX_isoPath(t *testing.T) { const testVMXFloppyPath = ` floppy0.present = "TRUE" -floppy0.filetype = "file" +floppy0.fileType = "file" ` const testVMXISOPath = ` -ide0:0.devicetype = "cdrom-image" -ide0:0.filename = "foo" -ide0:1.filename = "bar" +ide0:0.deviceType = "cdrom-image" +ide0:0.fileName = "foo" +ide0:1.fileName = "bar" foo = "bar" ` From 03850cafc646600d00b2e038ef81286f9e2fe392 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 2 Jul 2015 03:40:47 -0700 Subject: [PATCH 602/956] Implemented timeout around the SSH handshake, including a unit test --- communicator/ssh/communicator.go | 46 +++++++++++++++-- communicator/ssh/communicator_test.go | 72 ++++++++++++++++++++++++--- 2 files changed, 107 insertions(+), 11 deletions(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index cc61e8e9f..f05f6e46e 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -5,9 +5,6 @@ import ( "bytes" "errors" "fmt" - "github.com/mitchellh/packer/packer" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" "io" "io/ioutil" "log" @@ -16,8 +13,15 @@ import ( "path/filepath" "strconv" "sync" + "time" + + "github.com/mitchellh/packer/packer" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" ) +var ErrHandshakeTimeout = fmt.Errorf("Timeout during SSH handshake") + type comm struct { client *ssh.Client config *Config @@ -40,6 +44,10 @@ type Config struct { // DisableAgent, if true, will not forward the SSH agent. DisableAgent bool + + // HandshakeTimeout limits the amount of time we'll wait to handshake before + // saying the connection failed. + HandshakeTimeout time.Duration } // Creates a new packer.Communicator implementation over SSH. This takes @@ -273,9 +281,39 @@ func (c *comm) reconnect() (err error) { } log.Printf("handshaking with SSH") - sshConn, sshChan, req, err := ssh.NewClientConn(c.conn, c.address, c.config.SSHConfig) + + // Default timeout to 1 minute if it wasn't specified (zero value). For + // when you need to handshake from low orbit. + var duration time.Duration + if c.config.HandshakeTimeout == 0 { + duration = 1 * time.Minute + } else { + duration = c.config.HandshakeTimeout + } + + timeoutExceeded := time.After(duration) + connectionEstablished := make(chan bool, 1) + + var sshConn ssh.Conn + var sshChan <-chan ssh.NewChannel + var req <-chan *ssh.Request + + go func() { + sshConn, sshChan, req, err = ssh.NewClientConn(c.conn, c.address, c.config.SSHConfig) + connectionEstablished <- true + }() + + select { + case <-connectionEstablished: + // We don't need to do anything here. We just want select to block until + // we connect or timeout. + case <-timeoutExceeded: + return ErrHandshakeTimeout + } + if err != nil { log.Printf("handshake error: %s", err) + return } log.Printf("handshake complete!") if sshConn != nil { diff --git a/communicator/ssh/communicator_test.go b/communicator/ssh/communicator_test.go index e9f73d2dc..6398bd713 100644 --- a/communicator/ssh/communicator_test.go +++ b/communicator/ssh/communicator_test.go @@ -5,10 +5,12 @@ package ssh import ( "bytes" "fmt" - "github.com/mitchellh/packer/packer" - "golang.org/x/crypto/ssh" "net" "testing" + "time" + + "github.com/mitchellh/packer/packer" + "golang.org/x/crypto/ssh" ) // private key for mock server @@ -94,6 +96,28 @@ func newMockLineServer(t *testing.T) string { return l.Addr().String() } +func newMockBrokenServer(t *testing.T) string { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("Unable tp listen for connection: %s", err) + } + + go func() { + defer l.Close() + c, err := l.Accept() + if err != nil { + t.Errorf("Unable to accept incoming connection: %s", err) + } + defer c.Close() + // This should block for a period of time longer than our timeout in + // the test case. That way we invoke a failure scenario. + time.Sleep(5 * time.Second) + t.Log("Block on handshaking for SSH connection") + }() + + return l.Addr().String() +} + func TestCommIsCommunicator(t *testing.T) { var raw interface{} raw = &comm{} @@ -157,10 +181,44 @@ func TestStart(t *testing.T) { t.Fatalf("error connecting to SSH: %s", err) } - var cmd packer.RemoteCmd - stdout := new(bytes.Buffer) - cmd.Command = "echo foo" - cmd.Stdout = stdout + cmd := &packer.RemoteCmd{ + Command: "echo foo", + Stdout: new(bytes.Buffer), + } - client.Start(&cmd) + client.Start(cmd) +} + +func TestHandshakeTimeout(t *testing.T) { + clientConfig := &ssh.ClientConfig{ + User: "user", + Auth: []ssh.AuthMethod{ + ssh.Password("pass"), + }, + } + + address := newMockBrokenServer(t) + conn := func() (net.Conn, error) { + conn, err := net.Dial("tcp", address) + if err != nil { + t.Fatalf("unable to dial to remote side: %s", err) + } + return conn, err + } + + config := &Config{ + Connection: conn, + SSHConfig: clientConfig, + HandshakeTimeout: 50 * time.Millisecond, + } + + _, err := New(address, config) + if err != ErrHandshakeTimeout { + // Note: there's another error that can come back from this call: + // ssh: handshake failed: EOF + // This should appear in cases where the handshake fails because of + // malformed (or no) data sent back by the server, but should not happen + // in a timeout scenario. + t.Fatalf("Expected handshake timeout, got: %s", err) + } } From 5dd8ae45c70876dc478af0eee856ff9ac4845f5b Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 2 Jul 2015 03:55:18 -0700 Subject: [PATCH 603/956] Cleanup some resources we may have created --- communicator/ssh/communicator.go | 6 ++++++ communicator/ssh/communicator_test.go | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index f05f6e46e..d1be89e64 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -308,6 +308,12 @@ func (c *comm) reconnect() (err error) { // We don't need to do anything here. We just want select to block until // we connect or timeout. case <-timeoutExceeded: + if c.conn != nil { + c.conn.Close() + } + if sshConn != nil { + sshConn.Close() + } return ErrHandshakeTimeout } diff --git a/communicator/ssh/communicator_test.go b/communicator/ssh/communicator_test.go index 6398bd713..b0bc03508 100644 --- a/communicator/ssh/communicator_test.go +++ b/communicator/ssh/communicator_test.go @@ -111,8 +111,8 @@ func newMockBrokenServer(t *testing.T) string { defer c.Close() // This should block for a period of time longer than our timeout in // the test case. That way we invoke a failure scenario. - time.Sleep(5 * time.Second) t.Log("Block on handshaking for SSH connection") + time.Sleep(5 * time.Second) }() return l.Addr().String() From 0869c9fe6479a4dcae0921f2c7ac3184455c4d9f Mon Sep 17 00:00:00 2001 From: Steven Merrill Date: Thu, 2 Jul 2015 10:23:44 -0400 Subject: [PATCH 604/956] Fix #2830. --- builder/amazon/common/step_run_source_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index 09b053901..b94a6031c 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -43,7 +43,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi securityGroupIds := make([]*string, len(tempSecurityGroupIds)) for i, sg := range tempSecurityGroupIds { - securityGroupIds[i] = &sg + securityGroupIds[i] = aws.String(sg) } userData := s.UserData From 67a1703768be7b8a0ba99203cd40b5312401eb27 Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 2 Jul 2015 09:36:28 -0500 Subject: [PATCH 605/956] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b6f73f227..e76203cd1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ BUG FIXES: * core: `min_packer_version` field in configs work [GH-2356] * core: The `build_name` and `build_type` functions work in provisioners [GH-2367] * builder/amazon: Fix issue with sharing AMIs when using `ami_users` [GH-2308] + * builder/amazon: Fix issue when using multiple Security Groups [GH-2381] * builder/amazon: Fix for tag creation when creating new ec2 instance [GH-2317] * builder/amazon: Fix issue with creating AMIs with multiple device mappings [GH-2320] * builder/amazon: Fix failing AMI snapshot tagging when copying to other From 0ca03f09c1f5b7709c3be8101710556293aa9e60 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 2 Jul 2015 11:58:51 -0700 Subject: [PATCH 606/956] Fix some style issues and add a doc to ErrHandshakeTimeout --- communicator/ssh/communicator.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index d1be89e64..0b2acd8f8 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -20,6 +20,9 @@ import ( "golang.org/x/crypto/ssh/agent" ) +// ErrHandshakeTimeout is returned from New() whenever we're unable to establish +// an ssh connection within a certain timeframe. By default the handshake time- +// out period is 1 minute. You can change it with Config.HandshakeTimeout. var ErrHandshakeTimeout = fmt.Errorf("Timeout during SSH handshake") type comm struct { @@ -291,8 +294,7 @@ func (c *comm) reconnect() (err error) { duration = c.config.HandshakeTimeout } - timeoutExceeded := time.After(duration) - connectionEstablished := make(chan bool, 1) + connectionEstablished := make(chan struct{}, 1) var sshConn ssh.Conn var sshChan <-chan ssh.NewChannel @@ -300,14 +302,14 @@ func (c *comm) reconnect() (err error) { go func() { sshConn, sshChan, req, err = ssh.NewClientConn(c.conn, c.address, c.config.SSHConfig) - connectionEstablished <- true + close(connectionEstablished) }() select { case <-connectionEstablished: // We don't need to do anything here. We just want select to block until // we connect or timeout. - case <-timeoutExceeded: + case <-time.After(duration): if c.conn != nil { c.conn.Close() } From 872e78d5b0a387eb3b87ddeef210264c3199d178 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 2 Jul 2015 14:14:56 -0700 Subject: [PATCH 607/956] v0.8.1 --- CHANGELOG.md | 5 ++++- version.go | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e76203cd1..9a7edff10 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.8.1 (unreleased) +## 0.8.1 (July 2, 2015) IMPROVEMENTS: @@ -9,6 +9,8 @@ BUG FIXES: * core: `min_packer_version` field in configs work [GH-2356] * core: The `build_name` and `build_type` functions work in provisioners [GH-2367] + * core: Handle timeout in SSH handshake [GH-2333] + * command/build: Fix reading configuration from stdin [GH-2366] * builder/amazon: Fix issue with sharing AMIs when using `ami_users` [GH-2308] * builder/amazon: Fix issue when using multiple Security Groups [GH-2381] * builder/amazon: Fix for tag creation when creating new ec2 instance [GH-2317] @@ -20,6 +22,7 @@ BUG FIXES: * builder/amazon: Fix `bundle_prefix` not interpolating `timestamp` [GH-2352] * builder/amazon-instance: Fix issue with creating AMIs without specifying a virtualization type [GH-2330] + * builder/digitalocean: Fix builder using private IP instead of public IP [GH-2339] * builder/google: Set default communicator settings properly [GH-2353] * builder/vmware-iso: Setting `checksum_type` to `none` for ESX builds now works [GH-2323] diff --git a/version.go b/version.go index e4b31afba..c2f69582d 100644 --- a/version.go +++ b/version.go @@ -9,4 +9,4 @@ const Version = "0.8.1" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" From c416f6e14baa923e8a694eca1f7e1eaffd96f238 Mon Sep 17 00:00:00 2001 From: Tyler Tidman Date: Thu, 2 Jul 2015 17:37:03 -0400 Subject: [PATCH 608/956] Remove ssh_wait_timeout from doc basic examples for virtualbox/vmware builders. Fixes GH-2382. --- website/source/docs/builders/virtualbox-iso.html.markdown | 1 - website/source/docs/builders/virtualbox-ovf.html.markdown | 1 - website/source/docs/builders/vmware-iso.html.markdown | 1 - 3 files changed, 3 deletions(-) diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index 616b0a8e4..353710720 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -33,7 +33,6 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio "iso_checksum_type": "md5", "ssh_username": "packer", "ssh_password": "packer", - "ssh_wait_timeout": "30s", "shutdown_command": "echo 'packer' | sudo -S shutdown -P now" } ``` diff --git a/website/source/docs/builders/virtualbox-ovf.html.markdown b/website/source/docs/builders/virtualbox-ovf.html.markdown index db5247738..693e53e68 100644 --- a/website/source/docs/builders/virtualbox-ovf.html.markdown +++ b/website/source/docs/builders/virtualbox-ovf.html.markdown @@ -39,7 +39,6 @@ the settings here. "source_path": "source.ovf", "ssh_username": "packer", "ssh_password": "packer", - "ssh_wait_timeout": "30s", "shutdown_command": "echo 'packer' | sudo -S shutdown -P now" } ``` diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index 2bb3a402f..8ac3a9fd3 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -38,7 +38,6 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio "iso_checksum": "af5f788aee1b32c4b2634734309cc9e9", "iso_checksum_type": "md5", "ssh_username": "packer", - "ssh_wait_timeout": "30s", "shutdown_command": "shutdown -P now" } ``` From ed463bccda9247021f47ca61050d1f36226ecb05 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 6 Jul 2015 14:17:33 -0600 Subject: [PATCH 609/956] Added post-processor crash fix to the changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a7edff10..1b58cda75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ BUG FIXES: now works [GH-2323] * provisioner/chef: Use knife config file vs command-line params to clean up nodes so full set of features can be used [GH-2306] + * post-processor/compress: Fixed crash in compress post-processor plugin [GH-2311] ## 0.8.0 (June 23, 2015) From 98e7f2a3412ed5cfbf6d2caf2f8523290800a3a8 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 6 Jul 2015 14:53:03 -0600 Subject: [PATCH 610/956] Added a footer link to edit the docs --- website/source/layouts/layout.erb | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index 192b7303d..26a1dac6b 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -51,12 +51,15 @@
      From 0cee01261cb7142bd652f42710e8dd9f4ff291eb Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 6 Jul 2015 14:53:17 -0600 Subject: [PATCH 611/956] Added a makefile for the docs repo --- website/Makefile | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 website/Makefile diff --git a/website/Makefile b/website/Makefile new file mode 100644 index 000000000..9888cfa82 --- /dev/null +++ b/website/Makefile @@ -0,0 +1,10 @@ +all: build + +init: + bundle + +dev: init + PACKER_DISABLE_DOWNLOAD_FETCH=true PACKER_VERSION=1.0 bundle exec middleman server + +build: init + PACKER_DISABLE_DOWNLOAD_FETCH=true PACKER_VERSION=1.0 bundle exec middleman build From cc9549eac8c5e5053ccc73b3998eb4ba5e445104 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 6 Jul 2015 14:54:30 -0600 Subject: [PATCH 612/956] Update README to recommend the makefile --- website/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/website/README.md b/website/README.md index d661265d9..881362f5a 100644 --- a/website/README.md +++ b/website/README.md @@ -16,8 +16,7 @@ Running the site locally is simple. Clone this repo and run the following commands: ``` -$ bundle -$ PACKER_DISABLE_DOWNLOAD_FETCH=true PACKER_VERSION=1.0 bundle exec middleman server +make dev ``` Then open up `localhost:4567`. Note that some URLs you may need to append From 6d6b3e1ac2ae03546269744d788f5e631fb0f810 Mon Sep 17 00:00:00 2001 From: Samit Pal Date: Mon, 6 Jul 2015 09:32:08 +0000 Subject: [PATCH 613/956] The default image name in the code has a bug. It is being set to packer-{{timestamp}}, the {{timestamp}} part needs to be interpolated. Without the interpolation the GCE builder fails with the following error ==> googlecompute: Creating image... ==> googlecompute: Error waiting for image: googleapi: Error 400: Invalid value for field 'resource.name': 'packer-{{timestamp}}'. Must be a match of regex '(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)', invalid --- builder/googlecompute/config.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go index eebea011f..dc049aa1c 100644 --- a/builder/googlecompute/config.go +++ b/builder/googlecompute/config.go @@ -73,7 +73,13 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { } if c.ImageName == "" { - c.ImageName = "packer-{{timestamp}}" + img, err := interpolate.Render("packer-{{timestamp}}", nil) + if err != nil { + panic(err) + } + + // Default to packer-{{ unix timestamp (utc) }} + c.ImageName = img } if c.InstanceName == "" { From 26aa3dd575aadbd14e13e321d45d87d9dfecd482 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 7 Jul 2015 11:07:38 -0600 Subject: [PATCH 614/956] amazon/common: store instance ID earlier for cleanup --- .../amazon/common/step_run_source_instance.go | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index b94a6031c..b3bb6744f 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -31,7 +31,7 @@ type StepRunSourceInstance struct { UserData string UserDataFile string - instance *ec2.Instance + instanceId string spotRequest *ec2.SpotInstanceRequest } @@ -235,6 +235,9 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi instanceId = *spotResp.SpotInstanceRequests[0].InstanceID } + // Set the instance ID so that the cleanup works properly + s.instanceId = instanceId + ui.Message(fmt.Sprintf("Instance ID: %s", instanceId)) ui.Say(fmt.Sprintf("Waiting for instance (%v) to become ready...", instanceId)) stateChange := StateChangeConf{ @@ -251,7 +254,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi return multistep.ActionHalt } - s.instance = latestInstance.(*ec2.Instance) + instance := latestInstance.(*ec2.Instance) ec2Tags := make([]*ec2.Tag, 1, len(s.Tags)+1) ec2Tags[0] = &ec2.Tag{Key: aws.String("Name"), Value: aws.String("Packer Builder")} @@ -261,7 +264,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi _, err = ec2conn.CreateTags(&ec2.CreateTagsInput{ Tags: ec2Tags, - Resources: []*string{s.instance.InstanceID}, + Resources: []*string{instance.InstanceID}, }) if err != nil { ui.Message( @@ -269,20 +272,20 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi } if s.Debug { - if s.instance.PublicDNSName != nil && *s.instance.PublicDNSName != "" { - ui.Message(fmt.Sprintf("Public DNS: %s", *s.instance.PublicDNSName)) + if instance.PublicDNSName != nil && *instance.PublicDNSName != "" { + ui.Message(fmt.Sprintf("Public DNS: %s", *instance.PublicDNSName)) } - if s.instance.PublicIPAddress != nil && *s.instance.PublicIPAddress != "" { - ui.Message(fmt.Sprintf("Public IP: %s", *s.instance.PublicIPAddress)) + if instance.PublicIPAddress != nil && *instance.PublicIPAddress != "" { + ui.Message(fmt.Sprintf("Public IP: %s", *instance.PublicIPAddress)) } - if s.instance.PrivateIPAddress != nil && *s.instance.PrivateIPAddress != "" { - ui.Message(fmt.Sprintf("Private IP: %s", *s.instance.PrivateIPAddress)) + if instance.PrivateIPAddress != nil && *instance.PrivateIPAddress != "" { + ui.Message(fmt.Sprintf("Private IP: %s", *instance.PrivateIPAddress)) } } - state.Put("instance", s.instance) + state.Put("instance", instance) return multistep.ActionContinue } @@ -313,16 +316,15 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) { } // Terminate the source instance if it exists - if s.instance != nil { - + if s.instanceId != "" { ui.Say("Terminating the source AWS instance...") - if _, err := ec2conn.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIDs: []*string{s.instance.InstanceID}}); err != nil { + if _, err := ec2conn.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIDs: []*string{&s.instanceId}}); err != nil { ui.Error(fmt.Sprintf("Error terminating instance, may still be around: %s", err)) return } stateChange := StateChangeConf{ Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"}, - Refresh: InstanceStateRefreshFunc(ec2conn, *s.instance.InstanceID), + Refresh: InstanceStateRefreshFunc(ec2conn, s.instanceId), Target: "terminated", } From 750e09d51d0a3535e5d01c1926b28b1dc7a278b9 Mon Sep 17 00:00:00 2001 From: Sunil K Chopra Date: Tue, 7 Jul 2015 17:04:27 -0500 Subject: [PATCH 615/956] should be ssh_host, not host --- builder/null/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/null/config.go b/builder/null/config.go index a6a12332e..c207f8087 100644 --- a/builder/null/config.go +++ b/builder/null/config.go @@ -33,7 +33,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { } if c.CommConfig.SSHHost == "" { errs = packer.MultiErrorAppend(errs, - fmt.Errorf("host must be specified")) + fmt.Errorf("ssh_host must be specified")) } if c.CommConfig.SSHUsername == "" { From bd6c31c2d902822909b6a4c6cc94e689ebdc7a41 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 7 Jul 2015 16:18:31 -0600 Subject: [PATCH 616/956] Added TestImageName and moved private methods to the bottom of the file --- builder/googlecompute/config_test.go | 88 ++++++++++++++++------------ 1 file changed, 49 insertions(+), 39 deletions(-) diff --git a/builder/googlecompute/config_test.go b/builder/googlecompute/config_test.go index 93997912e..581c1425b 100644 --- a/builder/googlecompute/config_test.go +++ b/builder/googlecompute/config_test.go @@ -2,48 +2,10 @@ package googlecompute import ( "io/ioutil" + "strings" "testing" ) -func testConfig(t *testing.T) map[string]interface{} { - return map[string]interface{}{ - "account_file": testAccountFile(t), - "project_id": "hashicorp", - "source_image": "foo", - "zone": "us-east-1a", - } -} - -func testConfigStruct(t *testing.T) *Config { - c, warns, errs := NewConfig(testConfig(t)) - if len(warns) > 0 { - t.Fatalf("bad: %#v", len(warns)) - } - if errs != nil { - t.Fatalf("bad: %#v", errs) - } - - return c -} - -func testConfigErr(t *testing.T, warns []string, err error, extra string) { - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatalf("should error: %s", extra) - } -} - -func testConfigOk(t *testing.T, warns []string, err error) { - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("bad: %s", err) - } -} - func TestConfigPrepare(t *testing.T) { cases := []struct { Key string @@ -181,6 +143,54 @@ func TestConfigDefaults(t *testing.T) { } } +func TestImageName(t *testing.T) { + c, _, _ := NewConfig(testConfig(t)) + if strings.Contains(c.ImageName, "{{timestamp}}") { + t.Errorf("ImageName should be interpolated; found %s", c.ImageName) + } +} + +// Helper stuff below + +func testConfig(t *testing.T) map[string]interface{} { + return map[string]interface{}{ + "account_file": testAccountFile(t), + "project_id": "hashicorp", + "source_image": "foo", + "zone": "us-east-1a", + } +} + +func testConfigStruct(t *testing.T) *Config { + c, warns, errs := NewConfig(testConfig(t)) + if len(warns) > 0 { + t.Fatalf("bad: %#v", len(warns)) + } + if errs != nil { + t.Fatalf("bad: %#v", errs) + } + + return c +} + +func testConfigErr(t *testing.T, warns []string, err error, extra string) { + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatalf("should error: %s", extra) + } +} + +func testConfigOk(t *testing.T, warns []string, err error) { + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err != nil { + t.Fatalf("bad: %s", err) + } +} + func testAccountFile(t *testing.T) string { tf, err := ioutil.TempFile("", "packer") if err != nil { From 1c71eaaa91cf43f24eac59d412907c0bcd58cc37 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 7 Jul 2015 17:12:21 -0600 Subject: [PATCH 617/956] Change panic to multierror --- builder/googlecompute/config.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go index dc049aa1c..317d64ace 100644 --- a/builder/googlecompute/config.go +++ b/builder/googlecompute/config.go @@ -59,6 +59,8 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { return nil, nil, err } + var errs *packer.MultiError + // Set defaults. if c.Network == "" { c.Network = "default" @@ -75,11 +77,10 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { if c.ImageName == "" { img, err := interpolate.Render("packer-{{timestamp}}", nil) if err != nil { - panic(err) + errs = packer.MultiErrorAppend(errs, + fmt.Errorf("Unable to parse image name: %s ", err)) + c.ImageName = img } - - // Default to packer-{{ unix timestamp (utc) }} - c.ImageName = img } if c.InstanceName == "" { @@ -102,7 +103,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { c.Comm.SSHUsername = "root" } - var errs *packer.MultiError if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { errs = packer.MultiErrorAppend(errs, es...) } From 1f6137e6368b3e6447121bff24c0066433ad730f Mon Sep 17 00:00:00 2001 From: Andrew Dahl Date: Wed, 8 Jul 2015 16:55:25 -0500 Subject: [PATCH 618/956] Add 1/10th second delay between key events to VNC for QEMU --- builder/qemu/step_type_boot_command.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/builder/qemu/step_type_boot_command.go b/builder/qemu/step_type_boot_command.go index e42903f55..13c8622ed 100644 --- a/builder/qemu/step_type_boot_command.go +++ b/builder/qemu/step_type_boot_command.go @@ -177,7 +177,9 @@ func vncSendString(c *vnc.ClientConn, original string) { } c.KeyEvent(keyCode, true) + time.Sleep(time.Second/10) c.KeyEvent(keyCode, false) + time.Sleep(time.Second/10) if keyShift { c.KeyEvent(KeyLeftShift, false) From 315ad5483460e86a4c3f6b216e040c78f1195c76 Mon Sep 17 00:00:00 2001 From: YuZakuro Date: Thu, 9 Jul 2015 17:40:34 +0900 Subject: [PATCH 619/956] Add description of force option for docker-tag --- website/source/docs/post-processors/docker-tag.html.markdown | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/post-processors/docker-tag.html.markdown b/website/source/docs/post-processors/docker-tag.html.markdown index 7b8b92722..d3925d1fa 100644 --- a/website/source/docs/post-processors/docker-tag.html.markdown +++ b/website/source/docs/post-processors/docker-tag.html.markdown @@ -30,6 +30,9 @@ a repository is required. * `tag` (string) - The tag for the image. By default this is not set. +* `force` (boolean) - If true, this post-processor forcibly tag the image + even if tag name is collided. Default to `false`. + ## Example An example is shown below, showing only the post-processor configuration: From a6269671ecd7e8b75deb8a4e2554d9e726c649c0 Mon Sep 17 00:00:00 2001 From: YuZakuro Date: Thu, 9 Jul 2015 18:19:18 +0900 Subject: [PATCH 620/956] Fix inconsistent spelling `(boolean)` is used in most cases, but `(bool)` is used in places. I ran `find website/source/**/*.(markdown|md) | xargs sed -i 's/(bool)/(boolean)/g'` --- website/source/docs/builders/amazon-ebs.html.markdown | 2 +- website/source/docs/builders/amazon-instance.html.markdown | 2 +- website/source/docs/builders/virtualbox-iso.html.markdown | 2 +- website/source/docs/builders/virtualbox-ovf.html.markdown | 2 +- website/source/docs/post-processors/compress.html.markdown | 2 +- website/source/docs/provisioners/puppet-server.html.markdown | 2 +- website/source/docs/templates/communicator.html.md | 4 ++-- website/source/docs/templates/push.html.markdown | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 25a06a957..69a9a5c04 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -158,7 +158,7 @@ AMI if one with the same name already exists. Default `false`. generate a temporary keypair. `ssh_private_key_file` must be specified with this. -* `ssh_private_ip` (bool) - If true, then SSH will always use the private +* `ssh_private_ip` (boolean) - If true, then SSH will always use the private IP if available. * `subnet_id` (string) - If using VPC, the ID of the subnet, such as diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index f670e4b66..fa3c8a190 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -197,7 +197,7 @@ AMI if one with the same name already exists. Default `false`. generate a temporary keypair. `ssh_private_key_file` must be specified with this. -* `ssh_private_ip` (bool) - If true, then SSH will always use the private +* `ssh_private_ip` (boolean) - If true, then SSH will always use the private IP if available. * `subnet_id` (string) - If using VPC, the ID of the subnet, such as diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index 353710720..97ba056f8 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -199,7 +199,7 @@ can be configured for this builder. Packer will choose a randomly available port in this range to use as the host port. -* `ssh_skip_nat_mapping` (bool) - Defaults to false. When enabled, Packer does +* `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer does not setup forwarded port mapping for SSH requests and uses `ssh_port` on the host to communicate to the virtual machine diff --git a/website/source/docs/builders/virtualbox-ovf.html.markdown b/website/source/docs/builders/virtualbox-ovf.html.markdown index 693e53e68..0a4516d02 100644 --- a/website/source/docs/builders/virtualbox-ovf.html.markdown +++ b/website/source/docs/builders/virtualbox-ovf.html.markdown @@ -174,7 +174,7 @@ can be configured for this builder. Packer will choose a randomly available port in this range to use as the host port. -* `ssh_skip_nat_mapping` (bool) - Defaults to false. When enabled, Packer does +* `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer does not setup forwarded port mapping for SSH requests and uses `ssh_port` on the host to communicate to the virtual machine diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index 8fcd81ee3..e6a1237e9 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -34,7 +34,7 @@ If you want more control over how the archive is created you can specify the fol that support it, from 1 through 9 inclusive. Typically higher compression levels take longer but produce smaller files. Defaults to `6` -* `keep_input_artifact` (bool) - Keep source files; defaults to `false` +* `keep_input_artifact` (boolean) - Keep source files; defaults to `false` ### Supported Formats diff --git a/website/source/docs/provisioners/puppet-server.html.markdown b/website/source/docs/provisioners/puppet-server.html.markdown index 1cbc9c2d4..803ae22cf 100644 --- a/website/source/docs/provisioners/puppet-server.html.markdown +++ b/website/source/docs/provisioners/puppet-server.html.markdown @@ -51,7 +51,7 @@ required. They are listed below: * `facter` (object of key/value strings) - Additional Facter facts to make available to the Puppet run. -* `ignore_exit_codes` (bool) - If true, Packer will never consider the +* `ignore_exit_codes` (boolean) - If true, Packer will never consider the provisioner a failure. * `options` (string) - Additional command line options to pass diff --git a/website/source/docs/templates/communicator.html.md b/website/source/docs/templates/communicator.html.md index 8a450ac50..1169ea29b 100644 --- a/website/source/docs/templates/communicator.html.md +++ b/website/source/docs/templates/communicator.html.md @@ -67,7 +67,7 @@ The SSH communicator has the following options: * `ssh_private_key_file` (string) - Path to a PEM encoded private key file to use to authentiate with SSH. - * `ssh_pty` (bool) - If true, a PTY will be requested for the SSH connection. + * `ssh_pty` (boolean) - If true, a PTY will be requested for the SSH connection. This defaults to false. * `ssh_timeout` (string) - The time to wait for SSH to become available. @@ -77,7 +77,7 @@ The SSH communicator has the following options: * `ssh_handshake_attempts` (int) - The number of handshakes to attempt with SSH once it can connect. This defaults to 10. - * `ssh_disable_agent` (bool) - If true, SSH agent forwarding will be disabled. + * `ssh_disable_agent` (boolean) - If true, SSH agent forwarding will be disabled. * `ssh_bastion_host` (string) - A bastion host to use for the actual SSH connection. diff --git a/website/source/docs/templates/push.html.markdown b/website/source/docs/templates/push.html.markdown index 691d4f34d..4bb5df378 100644 --- a/website/source/docs/templates/push.html.markdown +++ b/website/source/docs/templates/push.html.markdown @@ -59,7 +59,7 @@ each category, the available configuration keys are alphabetized. * `token` (string) - An access token to use to authenticate to the build service. -* `vcs` (bool) - If true, Packer will detect your VCS (if there is one) +* `vcs` (boolean) - If true, Packer will detect your VCS (if there is one) and only upload the files that are tracked by the VCS. This is useful for automatically excluding ignored files. This defaults to false. From 132b289b0e58514cf9526314a932a0002ca4cb09 Mon Sep 17 00:00:00 2001 From: Walter Dolce Date: Sat, 11 Jul 2015 19:45:34 +0100 Subject: [PATCH 621/956] Fix character being displayed incorrectly --- website/source/intro/hashicorp-ecosystem.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/intro/hashicorp-ecosystem.html.markdown b/website/source/intro/hashicorp-ecosystem.html.markdown index d88e492ba..cf98742d4 100644 --- a/website/source/intro/hashicorp-ecosystem.html.markdown +++ b/website/source/intro/hashicorp-ecosystem.html.markdown @@ -12,7 +12,7 @@ description: |- HashiCorp is the creator of the open source projects Vagrant, Packer, Terraform, Serf, and Consul, and the commercial product Atlas. Packer is just one piece of the ecosystem HashiCorp has built to make application delivery a versioned, auditable, repeatable, and collaborative process. To learn more about our beliefs on the qualities of the modern datacenter and responsible application delivery, read [The Atlas Mindset: Version Control for Infrastructure](https://hashicorp.com/blog/atlas-mindset.html/?utm_source=packer&utm_campaign=HashicorpEcosystem). -If you are using Packer to build machine images and deployable artifacts, it’s likely that you need a solution for deploying those artifacts. Terraform is our tool for creating, combining, and modifying infrastructure. +If you are using Packer to build machine images and deployable artifacts, it's likely that you need a solution for deploying those artifacts. Terraform is our tool for creating, combining, and modifying infrastructure. Below are summaries of HashiCorp’s open source projects and a graphic showing how Atlas connects them to create a full application delivery workflow. From d99a1fab86abc29bf20b527882ebc7f034af731b Mon Sep 17 00:00:00 2001 From: Walter Dolce Date: Sun, 12 Jul 2015 00:11:59 +0100 Subject: [PATCH 622/956] Fix other characters in docs displayed incorrectly --- website/source/intro/hashicorp-ecosystem.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/intro/hashicorp-ecosystem.html.markdown b/website/source/intro/hashicorp-ecosystem.html.markdown index cf98742d4..37c26b9ad 100644 --- a/website/source/intro/hashicorp-ecosystem.html.markdown +++ b/website/source/intro/hashicorp-ecosystem.html.markdown @@ -14,7 +14,7 @@ HashiCorp is the creator of the open source projects Vagrant, Packer, Terraform, If you are using Packer to build machine images and deployable artifacts, it's likely that you need a solution for deploying those artifacts. Terraform is our tool for creating, combining, and modifying infrastructure. -Below are summaries of HashiCorp’s open source projects and a graphic showing how Atlas connects them to create a full application delivery workflow. +Below are summaries of HashiCorp's open source projects and a graphic showing how Atlas connects them to create a full application delivery workflow. # HashiCorp Ecosystem ![Atlas Workflow](docs/atlas-workflow.png) @@ -27,6 +27,6 @@ Below are summaries of HashiCorp’s open source projects and a graphic showing [Consul](https://consul.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for service discovery, service registry, and health checks. In the Atlas workflow Consul is configured at the Packer build stage and identifies the service(s) contained in each artifact. Since Consul is configured at the build phase with Packer, when the artifact is deployed with Terraform, it is fully configured with dependencies and service discovery pre-baked. This greatly reduces the risk of an unhealthy node in production due to configuration failure at runtime. -[Serf](https://serfdom.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for cluster membership and failure detection. Consul uses Serf’s gossip protocol as the foundation for service discovery. +[Serf](https://serfdom.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for cluster membership and failure detection. Consul uses Serf's gossip protocol as the foundation for service discovery. [Vagrant](https://www.vagrantup.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for managing development environments that mirror production. Vagrant environments reduce the friction of developing a project and reduce the risk of unexpected behavior appearing after deployment. Vagrant boxes can be built in parallel with production artifacts with Packer to maintain parity between development and production. From 0c0ff5a25bed586adaf7a8a0632b205e189b66c8 Mon Sep 17 00:00:00 2001 From: Walter Dolce Date: Sun, 12 Jul 2015 14:50:04 +0100 Subject: [PATCH 623/956] Remove third 's' from word --- website/source/docs/templates/communicator.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/templates/communicator.html.md b/website/source/docs/templates/communicator.html.md index 8a450ac50..c944396ee 100644 --- a/website/source/docs/templates/communicator.html.md +++ b/website/source/docs/templates/communicator.html.md @@ -28,7 +28,7 @@ use. For example, the Docker builder has a "docker" communicator that uses ## Using a Communicator By default, the SSH communicator is usually used. Additional configuration -may not even be necesssary, since some builders such as Amazon automatically +may not even be necessary, since some builders such as Amazon automatically configure everything. However, to specify a communicator, you set the `communicator` key within From 1de5171857254eedf8d9057d56281cde5db45077 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Mon, 13 Jul 2015 12:59:42 -0400 Subject: [PATCH 624/956] Update middleman-hashicorp --- website/Gemfile.lock | 63 ++++++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 29 deletions(-) diff --git a/website/Gemfile.lock b/website/Gemfile.lock index 625e13326..9477360f6 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -1,6 +1,6 @@ GIT remote: git://github.com/hashicorp/middleman-hashicorp.git - revision: 7796ba44d303ac8e1b566e855e2766e6d0f695fc + revision: 76f0f284ad44cea0457484ea83467192f02daf87 specs: middleman-hashicorp (0.1.0) bootstrap-sass (~> 3.3) @@ -11,6 +11,7 @@ GIT middleman-minify-html (~> 3.4) middleman-syntax (~> 2.0) rack-contrib (~> 1.2) + rack-protection (~> 1.5) rack-rewrite (~> 1.5) rack-ssl-enforcer (~> 0.2) redcarpet (~> 3.2) @@ -20,18 +21,18 @@ GIT GEM remote: https://rubygems.org/ specs: - activesupport (4.1.10) + activesupport (4.1.12) i18n (~> 0.6, >= 0.6.9) json (~> 1.7, >= 1.7.7) minitest (~> 5.1) thread_safe (~> 0.1) tzinfo (~> 1.1) - autoprefixer-rails (5.1.11) + autoprefixer-rails (5.2.1) execjs json - bootstrap-sass (3.3.4.1) + bootstrap-sass (3.3.5.1) autoprefixer-rails (>= 5.0.0.1) - sass (>= 3.2.19) + sass (>= 3.3.0) builder (3.2.2) celluloid (0.16.0) timers (~> 4.0.0) @@ -53,44 +54,45 @@ GEM sass (>= 3.3.0, < 3.5) compass-import-once (1.0.5) sass (>= 3.2, < 3.5) - daemons (1.2.2) + daemons (1.2.3) em-websocket (0.5.1) eventmachine (>= 0.12.9) http_parser.rb (~> 0.6.0) erubis (2.7.0) eventmachine (1.0.7) execjs (2.5.2) - ffi (1.9.8) + ffi (1.9.10) + git-version-bump (0.15.1) haml (4.0.6) tilt hike (1.2.3) hitimes (1.2.2) hooks (0.4.0) uber (~> 0.0.4) - htmlcompressor (0.1.2) + htmlcompressor (0.2.0) http_parser.rb (0.6.0) i18n (0.7.0) - json (1.8.2) - kramdown (1.7.0) + json (1.8.3) + kramdown (1.8.0) less (2.6.0) commonjs (~> 0.2.7) - libv8 (3.16.14.7) - listen (2.10.0) + libv8 (3.16.14.11) + listen (2.10.1) celluloid (~> 0.16.0) rb-fsevent (>= 0.9.3) rb-inotify (>= 0.9) - middleman (3.3.12) + middleman (3.3.13) coffee-script (~> 2.2) compass (>= 1.0.0, < 2.0.0) compass-import-once (= 1.0.5) execjs (~> 2.0) haml (>= 4.0.5) kramdown (~> 1.2) - middleman-core (= 3.3.12) + middleman-core (= 3.3.13) middleman-sprockets (>= 3.1.2) sass (>= 3.4.0, < 4.0) uglifier (~> 2.5) - middleman-core (3.3.12) + middleman-core (3.3.13) activesupport (~> 4.1.0) bundler (~> 1.1) erubis @@ -106,8 +108,8 @@ GEM em-websocket (~> 0.5.1) middleman-core (>= 3.3) rack-livereload (~> 0.3.15) - middleman-minify-html (3.4.0) - htmlcompressor (~> 0.1.0) + middleman-minify-html (3.4.1) + htmlcompressor (~> 0.2.0) middleman-core (>= 3.2) middleman-sprockets (3.4.2) middleman-core (>= 3.3) @@ -117,31 +119,34 @@ GEM middleman-syntax (2.0.0) middleman-core (~> 3.2) rouge (~> 1.0) - minitest (5.6.1) - multi_json (1.11.0) + minitest (5.7.0) + multi_json (1.11.2) padrino-helpers (0.12.5) i18n (~> 0.6, >= 0.6.7) padrino-support (= 0.12.5) tilt (~> 1.4.1) padrino-support (0.12.5) activesupport (>= 3.1) - rack (1.6.1) - rack-contrib (1.2.0) - rack (>= 0.9.1) - rack-livereload (0.3.15) + rack (1.6.4) + rack-contrib (1.3.0) + git-version-bump (~> 0.15) + rack (~> 1.4) + rack-livereload (0.3.16) + rack + rack-protection (1.5.3) rack rack-rewrite (1.5.1) rack-ssl-enforcer (0.2.8) rack-test (0.6.3) rack (>= 1.0) - rb-fsevent (0.9.4) + rb-fsevent (0.9.5) rb-inotify (0.9.5) ffi (>= 0.5.0) - redcarpet (3.2.3) - ref (1.0.5) - rouge (1.8.0) - sass (3.4.13) - sprockets (2.12.3) + redcarpet (3.3.2) + ref (2.0.0) + rouge (1.9.1) + sass (3.4.16) + sprockets (2.12.4) hike (~> 1.2) multi_json (~> 1.0) rack (~> 1.0) From ff2e07771ec3b0c306f198ff3114b45b2fc76d35 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Mon, 13 Jul 2015 12:59:48 -0400 Subject: [PATCH 625/956] Use Rack::Protection --- website/config.ru | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/website/config.ru b/website/config.ru index fd8f01d6c..b1a3c74b2 100644 --- a/website/config.ru +++ b/website/config.ru @@ -3,6 +3,17 @@ require "rack/contrib/not_found" require "rack/contrib/response_headers" require "rack/contrib/static_cache" require "rack/contrib/try_static" +require "rack/protection" + +# Protect against various bad things +use Rack::Protection::JsonCsrf +use Rack::Protection::RemoteReferrer +use Rack::Protection::HttpOrigin +use Rack::Protection::EscapedParams +use Rack::Protection::XSSHeader +use Rack::Protection::FrameOptions +use Rack::Protection::PathTraversal +use Rack::Protection::IPSpoofing # Properly compress the output if the client can handle it. use Rack::Deflater From 034040d2a7d1fa6a40d083400bbe3b0a1d280abc Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Mon, 13 Jul 2015 13:25:06 -0400 Subject: [PATCH 626/956] Weird bundler --- website/Gemfile.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/Gemfile.lock b/website/Gemfile.lock index 9477360f6..216114847 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -81,18 +81,18 @@ GEM celluloid (~> 0.16.0) rb-fsevent (>= 0.9.3) rb-inotify (>= 0.9) - middleman (3.3.13) + middleman (3.3.12) coffee-script (~> 2.2) compass (>= 1.0.0, < 2.0.0) compass-import-once (= 1.0.5) execjs (~> 2.0) haml (>= 4.0.5) kramdown (~> 1.2) - middleman-core (= 3.3.13) + middleman-core (= 3.3.12) middleman-sprockets (>= 3.1.2) sass (>= 3.4.0, < 4.0) uglifier (~> 2.5) - middleman-core (3.3.13) + middleman-core (3.3.12) activesupport (~> 4.1.0) bundler (~> 1.1) erubis From 7e64e906705a541e3d647c44a5fc4a12ce01ad6f Mon Sep 17 00:00:00 2001 From: Lorenzo Villani Date: Mon, 13 Jul 2015 19:21:59 +0200 Subject: [PATCH 627/956] Use --portcount on VirtualBox 5.x --- builder/virtualbox/common/driver_4_2.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/virtualbox/common/driver_4_2.go b/builder/virtualbox/common/driver_4_2.go index c375fba32..38c978f1d 100644 --- a/builder/virtualbox/common/driver_4_2.go +++ b/builder/virtualbox/common/driver_4_2.go @@ -22,7 +22,7 @@ func (d *VBox42Driver) CreateSATAController(vmName string, name string) error { } portCountArg := "--sataportcount" - if strings.HasPrefix(version, "4.3") { + if strings.HasPrefix(version, "4.3") || strings.HasPrefix(version, "5.") { portCountArg = "--portcount" } From 28a13111b8f7d6746e95c830e16f6e09cbd73c76 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 13 Jul 2015 14:57:35 -0700 Subject: [PATCH 628/956] Add stub for validate test --- command/test-fixtures/validate/template.json | 10 ++++++ command/validate_test.go | 32 ++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 command/test-fixtures/validate/template.json create mode 100644 command/validate_test.go diff --git a/command/test-fixtures/validate/template.json b/command/test-fixtures/validate/template.json new file mode 100644 index 000000000..1d9e251e5 --- /dev/null +++ b/command/test-fixtures/validate/template.json @@ -0,0 +1,10 @@ +{ + "builders":[ + { + "type":"file", + "target":"chocolate.txt", + "content":"chocolate" + } + ], + "min_packer_version":"0.8.0" +} diff --git a/command/validate_test.go b/command/validate_test.go new file mode 100644 index 000000000..479181fdc --- /dev/null +++ b/command/validate_test.go @@ -0,0 +1,32 @@ +package command + +import ( + "path/filepath" + "testing" +) + +func TestValidateCommand(t *testing.T) { + c := &ValidateCommand{ + Meta: testMetaFile(t), + } + + args := []string{ + filepath.Join(testFixture("validate"), "template.json"), + } + + defer cleanup() + + if code := c.Run(args); code != 0 { + fatalCommand(t, c.Meta) + } + + if !fileExists("chocolate.txt") { + t.Error("Expected to find chocolate.txt") + } + if !fileExists("vanilla.txt") { + t.Error("Expected to find vanilla.txt") + } + if fileExists("cherry.txt") { + t.Error("Expected NOT to find cherry.txt") + } +} From da20c3645479c45d4720490f2fb9c40aff397986 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 13 Jul 2015 18:04:50 -0700 Subject: [PATCH 629/956] Removed unused variable breaking gorename --- command/push_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/command/push_test.go b/command/push_test.go index f1b7fd306..9d7b79be7 100644 --- a/command/push_test.go +++ b/command/push_test.go @@ -122,10 +122,8 @@ func TestPush_noName(t *testing.T) { func TestPush_cliName(t *testing.T) { var actual []string - var actualOpts *uploadOpts uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) { actual = testArchive(t, r) - actualOpts = opts doneCh := make(chan struct{}) close(doneCh) From 78174dae4eb01047ccb025e361e29b6fcb188bb9 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 13 Jul 2015 19:03:36 -0700 Subject: [PATCH 630/956] Don't override packer's build version from a config file; fixes #2385 --- command/meta.go | 1 - 1 file changed, 1 deletion(-) diff --git a/command/meta.go b/command/meta.go index 1c0864f92..d22efcaba 100644 --- a/command/meta.go +++ b/command/meta.go @@ -43,7 +43,6 @@ func (m *Meta) Core(tpl *template.Template) (*packer.Core, error) { config := *m.CoreConfig config.Template = tpl config.Variables = m.flagVars - config.Version = m.Version // Init the core core, err := packer.NewCore(&config) From b3eacc5c2b460e985ff8da86b995171a6df6db94 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 13 Jul 2015 19:32:28 -0700 Subject: [PATCH 631/956] Updated test to verify expected behavior --- command/command_test.go | 7 ++++ command/test-fixtures/validate/template.json | 2 +- command/validate_test.go | 40 +++++++++++++------- packer/core.go | 3 +- 4 files changed, 35 insertions(+), 17 deletions(-) diff --git a/command/command_test.go b/command/command_test.go index 126897810..4ba4b8787 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -20,6 +20,13 @@ func fatalCommand(t *testing.T, m Meta) { err.String()) } +func outputCommand(t *testing.T, m Meta) (string, string) { + ui := m.Ui.(*packer.BasicUi) + out := ui.Writer.(*bytes.Buffer) + err := ui.ErrorWriter.(*bytes.Buffer) + return out.String(), err.String() +} + func testFixture(n string) string { return filepath.Join(fixturesDir, n) } diff --git a/command/test-fixtures/validate/template.json b/command/test-fixtures/validate/template.json index 1d9e251e5..75b3f6251 100644 --- a/command/test-fixtures/validate/template.json +++ b/command/test-fixtures/validate/template.json @@ -6,5 +6,5 @@ "content":"chocolate" } ], - "min_packer_version":"0.8.0" + "min_packer_version":"101.0.0" } diff --git a/command/validate_test.go b/command/validate_test.go index 479181fdc..b15e5f155 100644 --- a/command/validate_test.go +++ b/command/validate_test.go @@ -5,28 +5,40 @@ import ( "testing" ) -func TestValidateCommand(t *testing.T) { +func TestValidateCommandOKVersion(t *testing.T) { c := &ValidateCommand{ Meta: testMetaFile(t), } - args := []string{ filepath.Join(testFixture("validate"), "template.json"), } - defer cleanup() - + // This should pass with a valid configuration version + c.CoreConfig.Version = "102.0.0" if code := c.Run(args); code != 0 { fatalCommand(t, c.Meta) } - - if !fileExists("chocolate.txt") { - t.Error("Expected to find chocolate.txt") - } - if !fileExists("vanilla.txt") { - t.Error("Expected to find vanilla.txt") - } - if fileExists("cherry.txt") { - t.Error("Expected NOT to find cherry.txt") - } +} + +func TestValidateCommandBadVersion(t *testing.T) { + c := &ValidateCommand{ + Meta: testMetaFile(t), + } + args := []string{ + filepath.Join(testFixture("validate"), "template.json"), + } + + // This should fail with an invalid configuration version + c.CoreConfig.Version = "100.0.0" + if code := c.Run(args); code != 1 { + t.Errorf("Expected exit code 1") + } + + stdout, stderr := outputCommand(t, c.Meta) + expected := `Error initializing core: This template requires Packer version 101.0.0 or higher; using 100.0.0 +` + if stderr != expected { + t.Fatalf("Expected:\n%s\nFound:\n%s\n", expected, stderr) + } + t.Log(stdout) } diff --git a/packer/core.go b/packer/core.go index f9bf87b9d..496fce6bd 100644 --- a/packer/core.go +++ b/packer/core.go @@ -246,8 +246,7 @@ func (c *Core) validate() error { if versionActual.LessThan(versionMin) { return fmt.Errorf( - "This template requires a minimum Packer version of %s,\n"+ - "but version %s is running.", + "This template requires Packer version %s or higher; using %s", versionMin, versionActual) } From a3bf467378f60e363bb5621496d46afd0bdc64a5 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 14 Jul 2015 08:34:56 -0500 Subject: [PATCH 632/956] Update "getting started" link to Atlas The current link 404's, but this fixes it. Thanks to copyedt on Twitter: https://twitter.com/copyedt/status/620807196259958785 --- website/source/intro/getting-started/next.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/intro/getting-started/next.html.markdown b/website/source/intro/getting-started/next.html.markdown index 62480823a..262b84bb9 100644 --- a/website/source/intro/getting-started/next.html.markdown +++ b/website/source/intro/getting-started/next.html.markdown @@ -17,7 +17,7 @@ the [documentation](/docs). The documentation is less of a guide and more of a reference of all the overall features and options of Packer. If you're interested in learning more about how Packer fits into the -HashiCorp ecosystem of tools, read our [Atlas getting started overview](https://atlas.hashicorp.com/help/getting-started/getting-started-overview). +HashiCorp ecosystem of tools, read our [Atlas getting started overview](https://atlas.hashicorp.com/help/intro/getting-started). As you use Packer more, please voice your comments and concerns on the [mailing list or IRC](/community). Additionally, Packer is From 8495a8c919fb1118c3ea5f4e29b18b774e119c40 Mon Sep 17 00:00:00 2001 From: Gleb M Borisov Date: Wed, 15 Jul 2015 02:11:46 +0300 Subject: [PATCH 633/956] Fix handling IPv6 when ssh_interface set (openstack builder) --- builder/openstack/ssh.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/builder/openstack/ssh.go b/builder/openstack/ssh.go index 3e7350d11..3e1c8c20f 100644 --- a/builder/openstack/ssh.go +++ b/builder/openstack/ssh.go @@ -101,11 +101,15 @@ func sshAddrFromPool(s *servers.Server, desired string) string { if address["OS-EXT-IPS:type"] == "floating" { addr = address["addr"].(string) } else { - if address["version"].(float64) == 4 { + if address["version"].(float64) == 6 { + addr = fmt.Sprintf("[%s]", address["addr"].(string)) + } else { addr = address["addr"].(string) } } + if addr != "" { + log.Printf("[DEBUG] Detected address: %s", addr) return addr } } From 6343d8f16a28a50456a7131f29f8dc3694773098 Mon Sep 17 00:00:00 2001 From: Chris Becker Date: Tue, 14 Jul 2015 11:43:55 -0400 Subject: [PATCH 634/956] Update `inventory_groups` documentation for clarity --- .../provisioners/ansible-local.html.markdown | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/website/source/docs/provisioners/ansible-local.html.markdown b/website/source/docs/provisioners/ansible-local.html.markdown index 0a12dbc5b..7d8f18b8f 100644 --- a/website/source/docs/provisioners/ansible-local.html.markdown +++ b/website/source/docs/provisioners/ansible-local.html.markdown @@ -41,15 +41,16 @@ Optional: * `extra_arguments` (array of strings) - An array of extra arguments to pass to the ansible command. By default, this is empty. -* `inventory_groups` (string) - You can let Packer generate a temporary inventory - for you. It will contains only `127.0.0.1`. Thanks to `inventory_groups`, - packer will set the current machine into different groups and will - generate an inventory like: +* `inventory_groups` (string) - A comma-separated list of groups to which + packer will assign the host `127.0.0.1`. A value of `my_group_1,my_group_2` + will generate an Ansible inventory like: - [my_group_1] - 127.0.0.1 - [my_group_2] - 127.0.0.1 +```text +[my_group_1] +127.0.0.1 +[my_group_2] +127.0.0.1 +``` * `inventory_file` (string) - The inventory file to be used by ansible. This file must exist on your local system and will be uploaded to the From 985cd9495221d344f545c372bc3eb1c76df87c5f Mon Sep 17 00:00:00 2001 From: Gleb M Borisov Date: Wed, 15 Jul 2015 13:54:41 +0300 Subject: [PATCH 635/956] Fix go version in docs --- CONTRIBUTING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index de8b8fb60..174f5f642 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -53,8 +53,8 @@ it raises the chances we can quickly merge or address your contributions. If you have never worked with Go before, you will have to complete the following steps in order to be able to compile and test Packer. -1. Install Go. Make sure the Go version is at least Go 1.2. Packer will not work with anything less than - Go 1.2. On a Mac, you can `brew install go` to install Go 1.2. +1. Install Go. Make sure the Go version is at least Go 1.4. Packer will not work with anything less than + Go 1.4. On a Mac, you can `brew install go` to install Go 1.4. 2. Set and export the `GOPATH` environment variable and update your `PATH`. For example, you can add to your `.bash_profile`. From c614bb703dd753b3e8e694e9a68cd8f8c9a1b4d1 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 15 Jul 2015 11:28:43 -0700 Subject: [PATCH 636/956] Bump version to 0.8.2.dev --- version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version.go b/version.go index c2f69582d..1462dccd9 100644 --- a/version.go +++ b/version.go @@ -4,9 +4,9 @@ package main var GitCommit string // The main version number that is being run at the moment. -const Version = "0.8.1" +const Version = "0.8.2" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" From 8e3559c3b12684802a98afa115cc22d5725d4586 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 15 Jul 2015 12:26:19 -0700 Subject: [PATCH 637/956] Guard against uninitialized pointers in io.Copy to fix #2416 --- communicator/winrm/communicator.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go index d90cd8450..1efd770e0 100644 --- a/communicator/winrm/communicator.go +++ b/communicator/winrm/communicator.go @@ -85,8 +85,13 @@ func (c *Communicator) Start(rc *packer.RemoteCmd) error { func runCommand(shell *winrm.Shell, cmd *winrm.Command, rc *packer.RemoteCmd) { defer shell.Close() - go io.Copy(rc.Stdout, cmd.Stdout) - go io.Copy(rc.Stderr, cmd.Stderr) + if rc.Stdout != nil && cmd.Stdout != nil { + go io.Copy(rc.Stdout, cmd.Stdout) + } + + if rc.Stderr != nil && cmd.Stderr != nil { + go io.Copy(rc.Stderr, cmd.Stderr) + } cmd.Wait() From 29e6194e49ea76ae8556231389a8d18822d40948 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 15 Jul 2015 12:29:42 -0700 Subject: [PATCH 638/956] Added a warning log so we can diagnose failure cases --- communicator/winrm/communicator.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go index 1efd770e0..59034fcf0 100644 --- a/communicator/winrm/communicator.go +++ b/communicator/winrm/communicator.go @@ -87,10 +87,14 @@ func runCommand(shell *winrm.Shell, cmd *winrm.Command, rc *packer.RemoteCmd) { if rc.Stdout != nil && cmd.Stdout != nil { go io.Copy(rc.Stdout, cmd.Stdout) + } else { + log.Printf("[WARN] Failed to read stdout for command '%s'", rc.Command) } if rc.Stderr != nil && cmd.Stderr != nil { go io.Copy(rc.Stderr, cmd.Stderr) + } else { + log.Printf("[WARN] Failed to read stderr for command '%s'", rc.Command) } cmd.Wait() From f27505626f274f0e445332aefc4b88def101afd1 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 15 Jul 2015 13:09:56 -0700 Subject: [PATCH 639/956] Formatting tweak for ansible docs --- .../docs/provisioners/ansible-local.html.markdown | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/source/docs/provisioners/ansible-local.html.markdown b/website/source/docs/provisioners/ansible-local.html.markdown index 7d8f18b8f..a2550b7bd 100644 --- a/website/source/docs/provisioners/ansible-local.html.markdown +++ b/website/source/docs/provisioners/ansible-local.html.markdown @@ -45,12 +45,12 @@ Optional: packer will assign the host `127.0.0.1`. A value of `my_group_1,my_group_2` will generate an Ansible inventory like: -```text -[my_group_1] -127.0.0.1 -[my_group_2] -127.0.0.1 -``` + ```text + [my_group_1] + 127.0.0.1 + [my_group_2] + 127.0.0.1 + ``` * `inventory_file` (string) - The inventory file to be used by ansible. This file must exist on your local system and will be uploaded to the From 224bb78175c4d50051f04ca4dcb922c992ca3ad0 Mon Sep 17 00:00:00 2001 From: Anthony Spring Date: Mon, 29 Jun 2015 17:11:58 -0400 Subject: [PATCH 640/956] Make EBS block device mapping optional for ephemeral virtual names. --- builder/amazon/common/block_device.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index 482b876f6..48cc4ef27 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -4,6 +4,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/packer/template/interpolate" + "strings" ) // BlockDevice @@ -47,9 +48,12 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { } mapping := &ec2.BlockDeviceMapping{ - EBS: ebsBlockDevice, - DeviceName: aws.String(blockDevice.DeviceName), - VirtualName: aws.String(blockDevice.VirtualName), + DeviceName: aws.String(blockDevice.DeviceName), + VirtualName: aws.String(blockDevice.VirtualName), + } + + if !strings.HasPrefix(blockDevice.VirtualName, "ephemeral") { + mapping.EBS = ebsBlockDevice } if blockDevice.NoDevice { From ae064207d529b8d8f81a3d121e672632ca88eb23 Mon Sep 17 00:00:00 2001 From: Tommy Ulfsparre Date: Thu, 2 Jul 2015 00:02:31 +0200 Subject: [PATCH 641/956] Add test for ephemeral block device mapping --- builder/amazon/common/block_device_test.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index 063c480f1..803bfc996 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -17,7 +17,6 @@ func TestBlockDevice(t *testing.T) { { Config: &BlockDevice{ DeviceName: "/dev/sdb", - VirtualName: "ephemeral0", SnapshotId: "snap-1234", VolumeType: "standard", VolumeSize: 8, @@ -27,7 +26,7 @@ func TestBlockDevice(t *testing.T) { Result: &ec2.BlockDeviceMapping{ DeviceName: aws.String("/dev/sdb"), - VirtualName: aws.String("ephemeral0"), + VirtualName: aws.String(""), EBS: &ec2.EBSBlockDevice{ SnapshotID: aws.String("snap-1234"), VolumeType: aws.String("standard"), @@ -55,7 +54,6 @@ func TestBlockDevice(t *testing.T) { { Config: &BlockDevice{ DeviceName: "/dev/sdb", - VirtualName: "ephemeral0", VolumeType: "io1", VolumeSize: 8, DeleteOnTermination: true, @@ -64,7 +62,7 @@ func TestBlockDevice(t *testing.T) { Result: &ec2.BlockDeviceMapping{ DeviceName: aws.String("/dev/sdb"), - VirtualName: aws.String("ephemeral0"), + VirtualName: aws.String(""), EBS: &ec2.EBSBlockDevice{ VolumeType: aws.String("io1"), VolumeSize: aws.Long(8), @@ -73,6 +71,17 @@ func TestBlockDevice(t *testing.T) { }, }, }, + { + Config: &BlockDevice{ + DeviceName: "/dev/sdb", + VirtualName: "ephemeral0", + }, + + Result: &ec2.BlockDeviceMapping{ + DeviceName: aws.String("/dev/sdb"), + VirtualName: aws.String("ephemeral0"), + }, + }, } for _, tc := range cases { From 9365a4317976d5602a627538c3ffeff9fd03a4b6 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 15 Jul 2015 17:07:36 -0700 Subject: [PATCH 642/956] Reformat --- builder/amazon/common/block_device.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index 48cc4ef27..fb14a66ae 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -1,10 +1,11 @@ package common import ( + "strings" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/packer/template/interpolate" - "strings" ) // BlockDevice @@ -48,12 +49,12 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { } mapping := &ec2.BlockDeviceMapping{ - DeviceName: aws.String(blockDevice.DeviceName), - VirtualName: aws.String(blockDevice.VirtualName), + DeviceName: aws.String(blockDevice.DeviceName), + VirtualName: aws.String(blockDevice.VirtualName), } if !strings.HasPrefix(blockDevice.VirtualName, "ephemeral") { - mapping.EBS = ebsBlockDevice + mapping.EBS = ebsBlockDevice } if blockDevice.NoDevice { From c615539929a186875c043df1e4de4d475eef7a8e Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 15 Jul 2015 17:17:59 -0700 Subject: [PATCH 643/956] Reformat long lines --- builder/amazon/common/block_device_test.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index 803bfc996..1bdf9cdf7 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -93,11 +93,14 @@ func TestBlockDevice(t *testing.T) { expected := []*ec2.BlockDeviceMapping{tc.Result} got := blockDevices.BuildAMIDevices() if !reflect.DeepEqual(expected, got) { - t.Fatalf("Bad block device, \nexpected: %s\n\ngot: %s", awsutil.StringValue(expected), awsutil.StringValue(got)) + t.Fatalf("Bad block device, \nexpected: %s\n\ngot: %s", + awsutil.StringValue(expected), awsutil.StringValue(got)) } if !reflect.DeepEqual(expected, blockDevices.BuildLaunchDevices()) { - t.Fatalf("Bad block device, \nexpected: %s\n\ngot: %s", awsutil.StringValue(expected), awsutil.StringValue(blockDevices.BuildLaunchDevices())) + t.Fatalf("Bad block device, \nexpected: %s\n\ngot: %s", + awsutil.StringValue(expected), + awsutil.StringValue(blockDevices.BuildLaunchDevices())) } } } From 241903d0b00226f76ef74f04d02e7c1d260d66d1 Mon Sep 17 00:00:00 2001 From: Tommy Ulfsparre Date: Thu, 2 Jul 2015 00:06:24 +0200 Subject: [PATCH 644/956] no point in setting iops on a standard volumes --- builder/amazon/common/block_device_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index 1bdf9cdf7..c69ef2efb 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -21,7 +21,6 @@ func TestBlockDevice(t *testing.T) { VolumeType: "standard", VolumeSize: 8, DeleteOnTermination: true, - IOPS: 1000, }, Result: &ec2.BlockDeviceMapping{ From 988cf2fecff95e20933fcd43b3aefb6c6ae14b95 Mon Sep 17 00:00:00 2001 From: Travis Truman Date: Wed, 15 Jul 2015 21:31:13 -0400 Subject: [PATCH 645/956] Fixes #2434 by adding OpenStack Glance metadata support --- builder/openstack/image_config.go | 3 ++- builder/openstack/step_create_image.go | 3 ++- website/source/docs/builders/openstack.html.markdown | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/builder/openstack/image_config.go b/builder/openstack/image_config.go index 124449eab..b52ad2c67 100644 --- a/builder/openstack/image_config.go +++ b/builder/openstack/image_config.go @@ -8,7 +8,8 @@ import ( // ImageConfig is for common configuration related to creating Images. type ImageConfig struct { - ImageName string `mapstructure:"image_name"` + ImageName string `mapstructure:"image_name"` + ImageMetadata map[string]string `mapstructure:"metadata"` } func (c *ImageConfig) Prepare(ctx *interpolate.Context) []error { diff --git a/builder/openstack/step_create_image.go b/builder/openstack/step_create_image.go index b777e8b0b..9b6ac0cd8 100644 --- a/builder/openstack/step_create_image.go +++ b/builder/openstack/step_create_image.go @@ -30,7 +30,8 @@ func (s *stepCreateImage) Run(state multistep.StateBag) multistep.StepAction { // Create the image ui.Say(fmt.Sprintf("Creating the image: %s", config.ImageName)) imageId, err := servers.CreateImage(client, server.ID, servers.CreateImageOpts{ - Name: config.ImageName, + Name: config.ImageName, + Metadata: config.ImageMetadata, }).ExtractImageID() if err != nil { err := fmt.Errorf("Error creating image: %s", err) diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index fec1a85a6..b61e503be 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -96,6 +96,8 @@ can be configured for this builder. * `rackconnect_wait` (boolean) - For rackspace, whether or not to wait for Rackconnect to assign the machine an IP address before connecting via SSH. Defaults to false. +* `metadata` (object of key/value strings) - Glance metadata that will be applied + to the image. ## Basic Example: Rackspace public cloud From e3ab74e09f9f5b73a5efa046b4b8498487a9cdf5 Mon Sep 17 00:00:00 2001 From: georgevicbell Date: Thu, 16 Jul 2015 21:06:03 -0400 Subject: [PATCH 646/956] Add Config struct for docker PTY --- builder/docker/communicator.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 6fedf2769..56ea3b1d3 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -24,10 +24,14 @@ type Communicator struct { HostDir string ContainerDir string Version *version.Version - + config *Config lock sync.Mutex } +type Config struct { + // Pty, if true, will request a pty from docker with -t + Pty bool +} func (c *Communicator) Start(remote *packer.RemoteCmd) error { // Create a temporary file to store the output. Because of a bug in // Docker, sometimes all the output doesn't properly show up. This @@ -45,7 +49,11 @@ func (c *Communicator) Start(remote *packer.RemoteCmd) error { var cmd *exec.Cmd if c.canExec() { - cmd = exec.Command("docker", "exec", "-i", c.ContainerId, "/bin/sh") + if c.config.Pty { + cmd = exec.Command("docker", "exec", "-i", "-t", c.ContainerId, "/bin/sh") + } else { + cmd = exec.Command("docker", "exec", "-i", c.ContainerId, "/bin/sh") + } } else { cmd = exec.Command("docker", "attach", c.ContainerId) } From fb39fa2cc6d3f9ae8ea9e3d205d3beed155d9400 Mon Sep 17 00:00:00 2001 From: georgevicbell Date: Thu, 16 Jul 2015 21:13:04 -0400 Subject: [PATCH 647/956] Update step_connect_docker.go --- builder/docker/step_connect_docker.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/builder/docker/step_connect_docker.go b/builder/docker/step_connect_docker.go index 31f2ea2e4..da2be51af 100644 --- a/builder/docker/step_connect_docker.go +++ b/builder/docker/step_connect_docker.go @@ -7,6 +7,7 @@ import ( type StepConnectDocker struct{} func (s *StepConnectDocker) Run(state multistep.StateBag) multistep.StepAction { + config := state.Get("config").(Config) containerId := state.Get("container_id").(string) driver := state.Get("driver").(Driver) tempDir := state.Get("temp_dir").(string) @@ -25,6 +26,7 @@ func (s *StepConnectDocker) Run(state multistep.StateBag) multistep.StepAction { HostDir: tempDir, ContainerDir: "/packer-files", Version: version, + Config: config, } state.Put("communicator", comm) From 518ad704b7f161535f0a621ffff1229f2acf1152 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 16 Jul 2015 18:18:59 -0700 Subject: [PATCH 648/956] Added notes to warn against manifest_dir and note that manifest_file can be overloaded with a directory --- .../provisioners/puppet-masterless.html.markdown | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index 6ba570add..4e7b5d1bc 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -40,9 +40,10 @@ The reference of available configuration options is listed below. Required parameters: -* `manifest_file` (string) - The manifest file for Puppet to use in order - to compile and run a catalog. This file must exist on your local system - and will be uploaded to the remote machine. +* `manifest_file` (string) - This is either a path to a puppet manifest (`.pp` + file) _or_ a directory containing multiple manifests that puppet will apply. + These file(s) must exist on your local system and will be uploaded to the + remote machine. Optional parameters: @@ -64,6 +65,11 @@ Optional parameters: the `manifest_file`. It is a separate directory that will be set as the "manifestdir" setting on Puppet. + ~> `manifest_dir` is passed to `puppet apply` as the `--manifestdir` option. + This option was deprecated in puppet 3.6, and is slated to be removed in + puppet 4.0. If you have multiple manifests you should simply use + `manifest_file` instead. + * `module_paths` (array of strings) - This is an array of paths to module directories on your local filesystem. These will be uploaded to the remote machine. By default, this is empty. From 3a681d0c0c0eb8d4b5c00f87cfc39d1dc9b4ee07 Mon Sep 17 00:00:00 2001 From: georgevicbell Date: Thu, 16 Jul 2015 21:22:11 -0400 Subject: [PATCH 649/956] Add Pty Bool --- builder/docker/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/docker/config.go b/builder/docker/config.go index af5f25fec..03b9dbfb0 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -28,7 +28,7 @@ type Config struct { LoginUsername string `mapstructure:"login_username"` LoginPassword string `mapstructure:"login_password"` LoginServer string `mapstructure:"login_server"` - + Pty bool ctx interpolate.Context } From 4da4150abe51b67687e5bdf9b6e5d1797aa60eed Mon Sep 17 00:00:00 2001 From: georgevicbell Date: Thu, 16 Jul 2015 21:37:08 -0400 Subject: [PATCH 650/956] Update communicator.go --- builder/docker/communicator.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 56ea3b1d3..3d495463f 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -28,10 +28,6 @@ type Communicator struct { lock sync.Mutex } -type Config struct { - // Pty, if true, will request a pty from docker with -t - Pty bool -} func (c *Communicator) Start(remote *packer.RemoteCmd) error { // Create a temporary file to store the output. Because of a bug in // Docker, sometimes all the output doesn't properly show up. This From d00271aab382fad79d7a85b9cf3e6bb5a6eeb9b8 Mon Sep 17 00:00:00 2001 From: georgevicbell Date: Thu, 16 Jul 2015 21:50:24 -0400 Subject: [PATCH 651/956] Fix Capitilzation --- builder/docker/communicator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 3d495463f..c07cc2f70 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -24,7 +24,7 @@ type Communicator struct { HostDir string ContainerDir string Version *version.Version - config *Config + Config *Config lock sync.Mutex } From b2811a8252a1e69bc064da38811a1cb92771c7a5 Mon Sep 17 00:00:00 2001 From: georgevicbell Date: Thu, 16 Jul 2015 21:51:13 -0400 Subject: [PATCH 652/956] Update communicator.go --- builder/docker/communicator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index c07cc2f70..63ef4cd5b 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -45,7 +45,7 @@ func (c *Communicator) Start(remote *packer.RemoteCmd) error { var cmd *exec.Cmd if c.canExec() { - if c.config.Pty { + if c.Config.Pty { cmd = exec.Command("docker", "exec", "-i", "-t", c.ContainerId, "/bin/sh") } else { cmd = exec.Command("docker", "exec", "-i", c.ContainerId, "/bin/sh") From bf0c326cd5f88111f9b1b7736ccf469ccb46ebe0 Mon Sep 17 00:00:00 2001 From: georgevicbell Date: Thu, 16 Jul 2015 22:07:39 -0400 Subject: [PATCH 653/956] Update step_connect_docker.go --- builder/docker/step_connect_docker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/docker/step_connect_docker.go b/builder/docker/step_connect_docker.go index da2be51af..315cfc204 100644 --- a/builder/docker/step_connect_docker.go +++ b/builder/docker/step_connect_docker.go @@ -7,7 +7,7 @@ import ( type StepConnectDocker struct{} func (s *StepConnectDocker) Run(state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(Config) + config := state.Get("config").(*Config) containerId := state.Get("container_id").(string) driver := state.Get("driver").(Driver) tempDir := state.Get("temp_dir").(string) From 56745e14f585904c340707c8a75657833d178266 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 16 Jul 2015 19:15:16 -0700 Subject: [PATCH 654/956] manifest_file can now be a folder or file.pp and we will upload it correctly in either case --- provisioner/puppet-masterless/provisioner.go | 49 +++++++++++++------- 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index 177cae23c..546224a54 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -270,28 +270,43 @@ func (p *Provisioner) uploadManifests(ui packer.Ui, comm packer.Communicator) (s return "", fmt.Errorf("Error creating manifests directory: %s", err) } - // Upload the main manifest - f, err := os.Open(p.config.ManifestFile) + // NOTE! manifest_file may either be a directory or a file, as puppet apply + // now accepts either one. + + fi, err := os.Stat(p.config.ManifestFile) if err != nil { - return "", err - } - defer f.Close() - - manifestFilename := p.config.ManifestFile - if fi, err := os.Stat(p.config.ManifestFile); err != nil { return "", fmt.Errorf("Error inspecting manifest file: %s", err) - } else if !fi.IsDir() { - manifestFilename = filepath.Base(manifestFilename) + } + + if fi.IsDir() { + // If manifest_file is a directory we'll upload the whole thing + ui.Message(fmt.Sprintf( + "Uploading manifest directory from: %s", p.config.ManifestFile)) + + remoteManifestDir := fmt.Sprintf("%s/manifests", p.config.StagingDir) + err := p.uploadDirectory(ui, comm, remoteManifestDir, p.config.ManifestFile) + if err != nil { + return "", fmt.Errorf("Error uploading manifest dir: %s", err) + } + return remoteManifestDir, nil } else { - ui.Say("WARNING: manifest_file should be a file. Use manifest_dir for directories") - } + // Otherwise manifest_file is a file and we'll upload it + ui.Message(fmt.Sprintf( + "Uploading manifest file from: %s", p.config.ManifestFile)) - remoteManifestFile := fmt.Sprintf("%s/%s", remoteManifestsPath, manifestFilename) - if err := comm.Upload(remoteManifestFile, f, nil); err != nil { - return "", err - } + f, err := os.Open(p.config.ManifestFile) + if err != nil { + return "", err + } + defer f.Close() - return remoteManifestFile, nil + manifestFilename := filepath.Base(p.config.ManifestFile) + remoteManifestFile := fmt.Sprintf("%s/%s", remoteManifestsPath, manifestFilename) + if err := comm.Upload(remoteManifestFile, f, nil); err != nil { + return "", err + } + return remoteManifestFile, nil + } } func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir string) error { From f3f3d1fe738bd7a5f2ed119614fc284d8860e131 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 16 Jul 2015 19:34:36 -0700 Subject: [PATCH 655/956] Formatting --- builder/docker/config.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/builder/docker/config.go b/builder/docker/config.go index 03b9dbfb0..36322080c 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -28,7 +28,8 @@ type Config struct { LoginUsername string `mapstructure:"login_username"` LoginPassword string `mapstructure:"login_password"` LoginServer string `mapstructure:"login_server"` - Pty bool + Pty bool + ctx interpolate.Context } From d8cde46e763f4680d55a2dc25d7e8cd276ecf78b Mon Sep 17 00:00:00 2001 From: Tyler Tidman Date: Fri, 17 Jul 2015 12:10:22 -0400 Subject: [PATCH 656/956] Add missing default values for 'disk_cache' and 'disk_discard' in qemu builder docs, sort qemu_binary before qemuargs so it matches ordering from 'packer fix' --- website/source/docs/builders/qemu.html.markdown | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index ae5d4464f..294d5d563 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -117,10 +117,10 @@ can be configured for this builder. * `disk_cache` (string) - The cache mode to use for disk. Allowed values values include any of "writethrough", "writeback", "none", "unsafe" or - "directsync". + "directsync". By default, this is set to "writeback". * `disk_discard` (string) - The discard mode to use for disk. Allowed values - include any of "unmap" or "ignore". + include any of "unmap" or "ignore". By default, this is set to "ignore". * `disk_image` (boolean) - Packer defaults to building from an ISO file, this parameter controls whether the ISO URL supplied is actually a bootable @@ -148,7 +148,7 @@ can be configured for this builder. * `format` (string) - Either "qcow2" or "raw", this specifies the output format of the virtual machine image. This defaults to "qcow2". -* `headless` (boolean) - Packer defaults to building virtual machines by +* `headless` (boolean) - Packer defaults to building QEMU virtual machines by launching a GUI that shows the console of the machine being built. When this value is set to true, the machine will start without a console. @@ -188,6 +188,11 @@ can be configured for this builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the name of the build. +* `qemu_binary` (string) - The name of the Qemu binary to look for. This + defaults to "qemu-system-x86_64", but may need to be changed for some + platforms. For example "qemu-kvm", or "qemu-system-i386" may be a better + choice for some systems. + * `qemuargs` (array of array of strings) - Allows complete control over the qemu command line (though not, at this time, qemu-img). Each array of strings makes up a command line switch that overrides matching default @@ -225,11 +230,6 @@ qemu-system-x86 command. The arguments are all printed for review. qemu-system-x86 -m 1024m --no-acpi -netdev user,id=mynet0,hostfwd=hostip:hostport-guestip:guestport -device virtio-net,netdev=mynet0" -* `qemu_binary` (string) - The name of the Qemu binary to look for. This - defaults to "qemu-system-x86_64", but may need to be changed for some - platforms. For example "qemu-kvm", or "qemu-system-i386" may be a better - choice for some systems. - * `shutdown_command` (string) - The command to use to gracefully shut down the machine once all the provisioning is done. By default this is an empty string, which tells Packer to just forcefully shut down the machine. From bb6db46962a359ebf134f82881793b26a14a480c Mon Sep 17 00:00:00 2001 From: Tyler Tidman Date: Fri, 17 Jul 2015 12:20:57 -0400 Subject: [PATCH 657/956] Cull duplicate word 'values' for disk_cache description --- website/source/docs/builders/qemu.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index 294d5d563..5516d1f2e 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -116,7 +116,7 @@ can be configured for this builder. the default is 10 seconds. * `disk_cache` (string) - The cache mode to use for disk. Allowed values - values include any of "writethrough", "writeback", "none", "unsafe" or + include any of "writethrough", "writeback", "none", "unsafe" or "directsync". By default, this is set to "writeback". * `disk_discard` (string) - The discard mode to use for disk. Allowed values From 37c20e2bf056fb221d0a0bee3093bb0990ed79fc Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 17 Jul 2015 12:06:20 -0700 Subject: [PATCH 658/956] Added links to puppet docs to clarify behavior of manifest with multiple files --- .../provisioners/puppet-masterless.html.markdown | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index 4e7b5d1bc..8fd05e4f2 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -41,9 +41,11 @@ The reference of available configuration options is listed below. Required parameters: * `manifest_file` (string) - This is either a path to a puppet manifest (`.pp` - file) _or_ a directory containing multiple manifests that puppet will apply. - These file(s) must exist on your local system and will be uploaded to the - remote machine. + file) _or_ a directory containing multiple manifests that puppet will apply + (the ["main manifest"][1]). These file(s) must exist on your local system and + will be uploaded to the remote machine. + + [1]: https://docs.puppetlabs.com/puppet/latest/reference/dirs_manifest.html Optional parameters: @@ -66,9 +68,8 @@ Optional parameters: the "manifestdir" setting on Puppet. ~> `manifest_dir` is passed to `puppet apply` as the `--manifestdir` option. - This option was deprecated in puppet 3.6, and is slated to be removed in - puppet 4.0. If you have multiple manifests you should simply use - `manifest_file` instead. + This option was deprecated in puppet 3.6, and removed in puppet 4.0. If you + have multiple manifests you should use `manifest_file` instead. * `module_paths` (array of strings) - This is an array of paths to module directories on your local filesystem. These will be uploaded to the remote From 9c72861438a96d2d1a141dcf23486730ac7c2ebf Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 17 Jul 2015 12:45:15 -0700 Subject: [PATCH 659/956] Prep release for 0.8.2 --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 1462dccd9..b858802e6 100644 --- a/version.go +++ b/version.go @@ -9,4 +9,4 @@ const Version = "0.8.2" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" From 28c80a648c7e35c320530561a00c889837bd6b22 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 17 Jul 2015 14:02:22 -0700 Subject: [PATCH 660/956] Updated changelog for 0.8.2 --- CHANGELOG.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b58cda75..172589cd4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +## 0.8.2 (July 17, 2015) + +IMPROVEMENTS: + + * builder/docker: Add option to use a Pty [GH-2425] + +BUG FIXES: + + * core: Fix crash when `min_packer_version` is specified in a template. [GH-2385] + * builder/amazon: Fix EC2 devices being included in EBS mappings [GH-2459] + * builder/googlecompute: Fix default name for GCE images [GH-2400] + * builder/null: Fix error message with missing ssh_host [GH-2407] + * builder/virtualbox: Use --portcount on VirtualBox 5.x [GH-2438] + * provisioner/puppet: Packer now correctly handles a directory for manifest_file [GH-2463] + * provisioner/winrm: Fix potential crash with WinRM [GH-2416] + ## 0.8.1 (July 2, 2015) IMPROVEMENTS: From 3139d3d2c504c37cd26914f030862764ecc2da70 Mon Sep 17 00:00:00 2001 From: Tyler Tidman Date: Sun, 19 Jul 2015 10:56:00 -0400 Subject: [PATCH 661/956] Fixes #2474: Replace use of 'int' and 'uint' in website docs with 'integer' and wrap long lines --- .../source/docs/builders/qemu.html.markdown | 2 +- .../docs/templates/communicator.html.md | 26 +++++++++++-------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index 5516d1f2e..ce39c53ec 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -239,7 +239,7 @@ qemu-system-x86 command. The arguments are all printed for review. If it doesn't shut down in this time, it is an error. By default, the timeout is "5m", or five minutes. -* `ssh_host_port_min` and `ssh_host_port_max` (uint) - The minimum and +* `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and maximum port to use for the SSH port on the host machine which is forwarded to the SSH port on the guest machine. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to use as the diff --git a/website/source/docs/templates/communicator.html.md b/website/source/docs/templates/communicator.html.md index 6afb9b0f1..f38815309 100644 --- a/website/source/docs/templates/communicator.html.md +++ b/website/source/docs/templates/communicator.html.md @@ -57,7 +57,7 @@ The SSH communicator has the following options: * `ssh_host` (string) - The address to SSH to. This usually is automatically configured by the builder. - * `ssh_port` (int) - The port to connect to SSH. This defaults to 22. + * `ssh_port` (integer) - The port to connect to SSH. This defaults to 22. * `ssh_username` (string) - The username to connect to SSH with. @@ -67,24 +67,27 @@ The SSH communicator has the following options: * `ssh_private_key_file` (string) - Path to a PEM encoded private key file to use to authentiate with SSH. - * `ssh_pty` (boolean) - If true, a PTY will be requested for the SSH connection. - This defaults to false. + * `ssh_pty` (boolean) - If true, a PTY will be requested for the SSH + connection. This defaults to false. * `ssh_timeout` (string) - The time to wait for SSH to become available. - Packer uses this to determine when the machine has booted so this is usually - quite long. Example value: "10m" + Packer uses this to determine when the machine has booted so this is + usually quite long. Example value: "10m" - * `ssh_handshake_attempts` (int) - The number of handshakes to attempt with - SSH once it can connect. This defaults to 10. + * `ssh_handshake_attempts` (integer) - The number of handshakes to attempt + with SSH once it can connect. This defaults to 10. - * `ssh_disable_agent` (boolean) - If true, SSH agent forwarding will be disabled. + * `ssh_disable_agent` (boolean) - If true, SSH agent forwarding will be + disabled. * `ssh_bastion_host` (string) - A bastion host to use for the actual SSH connection. - * `ssh_bastion_port` (int) - The port of the bastion host. Defaults to 22. + * `ssh_bastion_port` (integer) - The port of the bastion host. Defaults to + 22. - * `ssh_bastion_username` (string) - The username to connect to the bastion host. + * `ssh_bastion_username` (string) - The username to connect to the bastion + host. * `ssh_bastion_password` (string) - The password to use to authenticate with the bastion host. @@ -98,7 +101,8 @@ The WinRM communicator has the following options. * `winrm_host` (string) - The address for WinRM to connect to. - * `winrm_port` (int) - The WinRM port to connect to. This defaults to 5985. + * `winrm_port` (integer) - The WinRM port to connect to. This defaults to + 5985. * `winrm_username` (string) - The username to use to connect to WinRM. From 628462b919970494962095097d5e92ae1c5e1f6e Mon Sep 17 00:00:00 2001 From: Yo Takezawa Date: Tue, 21 Jul 2015 14:07:30 +0900 Subject: [PATCH 662/956] Use snapshot size if you don't specify a VolumeSize --- builder/amazon/common/block_device.go | 6 +++++- builder/amazon/common/block_device_test.go | 16 ++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index fb14a66ae..83b79ac9e 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -32,7 +32,6 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { for _, blockDevice := range b { ebsBlockDevice := &ec2.EBSBlockDevice{ VolumeType: aws.String(blockDevice.VolumeType), - VolumeSize: aws.Long(blockDevice.VolumeSize), DeleteOnTermination: aws.Boolean(blockDevice.DeleteOnTermination), } @@ -48,6 +47,11 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { ebsBlockDevice.Encrypted = aws.Boolean(blockDevice.Encrypted) } + // Use snapshot size if you don't specify a VolumeSize + if blockDevice.VolumeSize != 0 { + ebsBlockDevice.VolumeSize = aws.Long(blockDevice.VolumeSize) + } + mapping := &ec2.BlockDeviceMapping{ DeviceName: aws.String(blockDevice.DeviceName), VirtualName: aws.String(blockDevice.VirtualName), diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index c69ef2efb..89adbc334 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -81,6 +81,22 @@ func TestBlockDevice(t *testing.T) { VirtualName: aws.String("ephemeral0"), }, }, + { + Config: &BlockDevice{ + DeviceName: "/dev/sdb", + VolumeType: "standard", + DeleteOnTermination: true, + }, + + Result: &ec2.BlockDeviceMapping{ + DeviceName: aws.String("/dev/sdb"), + VirtualName: aws.String(""), + EBS: &ec2.EBSBlockDevice{ + VolumeType: aws.String("standard"), + DeleteOnTermination: aws.Boolean(true), + }, + }, + }, } for _, tc := range cases { From ee1b6a72ea38f05fdbddb90a832f402769d22c32 Mon Sep 17 00:00:00 2001 From: Sam Dunne Date: Tue, 21 Jul 2015 15:54:48 +0100 Subject: [PATCH 663/956] Fixes #2478 --- post-processor/atlas/post-processor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/post-processor/atlas/post-processor.go b/post-processor/atlas/post-processor.go index edfc1d7c4..59335086c 100644 --- a/post-processor/atlas/post-processor.go +++ b/post-processor/atlas/post-processor.go @@ -35,7 +35,7 @@ type Config struct { TypeOverride bool `mapstructure:"artifact_type_override"` Metadata map[string]string - ServerAddr string `mapstructure:"server_address"` + ServerAddr string `mapstructure:"atlas_url"` Token string // This shouldn't ever be set outside of unit tests. From 9007b1cc6762b12a98f39ccdd2ec5460391aeb35 Mon Sep 17 00:00:00 2001 From: Matthew Patton Date: Tue, 21 Jul 2015 17:24:55 -0400 Subject: [PATCH 664/956] Document behavior of AWS {access,secret}_key in relation to credentials file and profile lookup via AWS_PROFILE --- .../docs/builders/amazon-chroot.html.markdown | 14 ++++---------- .../source/docs/builders/amazon-ebs.html.markdown | 8 ++++---- .../docs/builders/amazon-instance.html.markdown | 9 +++++---- website/source/docs/builders/amazon.html.markdown | 4 ++-- 4 files changed, 15 insertions(+), 20 deletions(-) diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index d6b61ca8b..b3d1644dd 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -58,11 +58,9 @@ can be configured for this builder. ### Required: * `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set. - If the environmental variables aren't set and Packer is running on - an EC2 instance, Packer will check the instance metadata for IAM role - keys. + If not specified, Packer will search the standard [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file using environment variable `AWS_PROFILE` as the profile name, will use the `[default]` entry, + or will fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`. + Finally, if Packer is running on an EC2 instance it will check the instance metadata for IAM role keys. * `ami_name` (string) - The name of the resulting AMI that will appear when managing AMIs in the AWS console or via APIs. This must be unique. @@ -70,11 +68,7 @@ can be configured for this builder. [configuration templates](/docs/templates/configuration-templates.html) for more info) * `secret_key` (string) - The secret key used to communicate with AWS. - If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set. - If the environmental variables aren't set and Packer is running on - an EC2 instance, Packer will check the instance metadata for IAM role - keys. + Lookup behavior is as above for `access_key` except the variables are `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY`. * `source_ami` (string) - The source AMI whose root volume will be copied and provisioned on the currently running instance. This must be an diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 69a9a5c04..fc78901a6 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -38,8 +38,9 @@ can be configured for this builder. ### Required: * `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set. + If not specified, Packer will search the standard [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file using environment variable `AWS_PROFILE` as the profile name, will use the `[default]` entry, + or will fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`. + Finally, if Packer is running on an EC2 instance it will check the instance metadata for IAM role keys. * `ami_name` (string) - The name of the resulting AMI that will appear when managing AMIs in the AWS console or via APIs. This must be unique. @@ -53,8 +54,7 @@ can be configured for this builder. to launch the EC2 instance to create the AMI. * `secret_key` (string) - The secret key used to communicate with AWS. - If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set. + Lookup behavior is as above for `access_key` except the variables are `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` * `source_ami` (string) - The initial AMI used as a base for the newly created machine. diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index fa3c8a190..81e425c9a 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -43,8 +43,9 @@ can be configured for this builder. ### Required: * `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set. + If not specified, Packer will search the standard [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file using environment variable `AWS_PROFILE` as the profile name, will use the `[default]` entry, + or will fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`. + Finally, if Packer is running on an EC2 instance it will check the instance metadata for IAM role keys. * `account_id` (string) - Your AWS account ID. This is required for bundling the AMI. This is _not the same_ as the access key. You can find your @@ -65,8 +66,8 @@ can be configured for this builder. This bucket will be created if it doesn't exist. * `secret_key` (string) - The secret key used to communicate with AWS. - If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set. + Lookup behavior is as above for `access_key` except the variables are `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` + * `source_ami` (string) - The initial AMI used as a base for the newly created machine. diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index ad336ad1c..f82457f1a 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -33,8 +33,8 @@ much easier to use and Amazon generally recommends EBS-backed images nowadays. ## Using an IAM Instance Profile -If AWS keys are not specified in the template, a [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file or through environment variables -Packer will use credentials provided by the instance's IAM profile, if it has one. +If AWS keys are not specified in the template, Packer will consult the [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file, try the standard AWS environment variables, and then +any IAM role credentials defined by the instance's metadata. The following policy document provides the minimal set permissions necessary for Packer to work: From 985c3c576b5da25ccc4f653fc4936c95ee21fb90 Mon Sep 17 00:00:00 2001 From: Xiol Date: Wed, 22 Jul 2015 10:13:04 +0100 Subject: [PATCH 665/956] Update setup documentation to cover issue #1117 In issue #1117, the packer binary can sometimes conflict with the packer binary supplied with cracklib. This documentation update covers this and provides workarounds for affected users. --- .../intro/getting-started/setup.html.markdown | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/website/source/intro/getting-started/setup.html.markdown b/website/source/intro/getting-started/setup.html.markdown index ae14c2748..5e4734e08 100644 --- a/website/source/intro/getting-started/setup.html.markdown +++ b/website/source/intro/getting-started/setup.html.markdown @@ -58,6 +58,34 @@ If you get an error that `packer` could not be found, then your PATH environment variable was not setup properly. Please go back and ensure that your PATH variable contains the directory which has Packer installed. +The `packer` binary may conflict with the cracklib-supplied packer binary +on RPM-based systems like Fedora, RHEL or CentOS. If this happens, running +`packer` will result in no output or something like this: + +```text +$ packer +/usr/share/cracklib/pw_dict.pwd: Permission denied +/usr/share/cracklib/pw_dict: Permission denied +``` + +In this case you may wish to symlink the `packer` binary to `packer.io` +and use that instead. e.g. + +```text +ln -s /usr/local/bin/packer /usr/local/bin/packer.io +``` + +Then replace `packer` with `packer.io` when following the rest of the +documentation. + +Alternatively you could change your `$PATH` so that the right packer +binary is selected first, however this may cause issues when attempting +to change passwords in the future. + +```text +export PATH="/path/to/packer/directory:$PATH" +``` + Otherwise, Packer is installed and you're ready to go! ## Alternative Installation Methods From feb2db7b01f3ed77c57b465e99460584a8c0d724 Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Wed, 22 Jul 2015 14:05:10 -0700 Subject: [PATCH 666/956] website: remove packer push var interpolation This removes a message that is no longer accurate and fixed in https://github.com/mitchellh/packer/issues/1861 --- website/source/docs/templates/push.html.markdown | 3 --- 1 file changed, 3 deletions(-) diff --git a/website/source/docs/templates/push.html.markdown b/website/source/docs/templates/push.html.markdown index 4bb5df378..568b45ec1 100644 --- a/website/source/docs/templates/push.html.markdown +++ b/website/source/docs/templates/push.html.markdown @@ -89,6 +89,3 @@ files to include: } } ``` - -~> **Variable interpolation** is not currently possible in Packer push -configurations. This will be fixed in an upcoming release. From 823e9e73fe45f1e3055465fe00dd45b8cc47fc32 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 17:18:08 -0700 Subject: [PATCH 667/956] Docs cleanup - Reformat .html / .erb files - Remove extra in index.html.erb - Add htmlbeautifier gem - Add middleman-breadcrumbs - Add make format (calls htmlbeautifier) --- website/Gemfile | 2 + website/Gemfile.lock | 5 + website/Makefile | 4 + website/README.md | 10 + website/config.rb | 2 + website/source/downloads.html.erb | 84 ++++---- website/source/index.html.erb | 125 +++++------ website/source/layouts/community.erb | 8 +- website/source/layouts/docs.erb | 196 +++++++++--------- .../source/layouts/docs_machine_readable.erb | 27 +-- website/source/layouts/inner.erb | 47 ++--- website/source/layouts/intro.erb | 50 ++--- website/source/layouts/layout.erb | 136 ++++++------ 13 files changed, 358 insertions(+), 338 deletions(-) diff --git a/website/Gemfile b/website/Gemfile index 2b35e2810..14f80e508 100644 --- a/website/Gemfile +++ b/website/Gemfile @@ -3,3 +3,5 @@ source "https://rubygems.org" ruby "2.2.2" gem "middleman-hashicorp", github: "hashicorp/middleman-hashicorp" +gem "middleman-breadcrumbs" +gem "htmlbeautifier" \ No newline at end of file diff --git a/website/Gemfile.lock b/website/Gemfile.lock index 216114847..3895f5bb1 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -69,6 +69,7 @@ GEM hitimes (1.2.2) hooks (0.4.0) uber (~> 0.0.4) + htmlbeautifier (1.1.0) htmlcompressor (0.2.0) http_parser.rb (0.6.0) i18n (0.7.0) @@ -92,6 +93,8 @@ GEM middleman-sprockets (>= 3.1.2) sass (>= 3.4.0, < 4.0) uglifier (~> 2.5) + middleman-breadcrumbs (0.1.0) + middleman (>= 3.3.5) middleman-core (3.3.12) activesupport (~> 4.1.0) bundler (~> 1.1) @@ -179,4 +182,6 @@ PLATFORMS ruby DEPENDENCIES + htmlbeautifier + middleman-breadcrumbs middleman-hashicorp! diff --git a/website/Makefile b/website/Makefile index 9888cfa82..100a4dbf9 100644 --- a/website/Makefile +++ b/website/Makefile @@ -8,3 +8,7 @@ dev: init build: init PACKER_DISABLE_DOWNLOAD_FETCH=true PACKER_VERSION=1.0 bundle exec middleman build + +format: + bundle exec htmlbeautifier -t 2 source/*.erb + bundle exec htmlbeautifier -t 2 source/layouts/*.erb \ No newline at end of file diff --git a/website/README.md b/website/README.md index 881362f5a..e86ccc60e 100644 --- a/website/README.md +++ b/website/README.md @@ -21,3 +21,13 @@ make dev Then open up `localhost:4567`. Note that some URLs you may need to append ".html" to make them work (in the navigation and such). + +## Keeping Tidy + +To keep the source code nicely formatted, there is a `make format` target. This +runs `htmlbeautify` and `pandoc` to reformat the source code so it's nicely formatted. + + make format + +Note that you will need to install pandoc yourself. `make format` will skip it +if you don't have it installed. \ No newline at end of file diff --git a/website/config.rb b/website/config.rb index 9c21ff297..80fc3680b 100644 --- a/website/config.rb +++ b/website/config.rb @@ -4,6 +4,8 @@ set :base_url, "https://www.packer.io/" +activate :breadcrumbs + activate :hashicorp do |h| h.version = ENV["PACKER_VERSION"] h.bintray_enabled = ENV["BINTRAY_ENABLED"] diff --git a/website/source/downloads.html.erb b/website/source/downloads.html.erb index d10dfb5c6..e8c66f970 100644 --- a/website/source/downloads.html.erb +++ b/website/source/downloads.html.erb @@ -3,47 +3,49 @@ page_title: "Downloads" ---
      -
      -

      Downloads

      - Latest version: <%= latest_version %> -
      +
      +

      Downloads

      + Latest version: <%= latest_version %> +
      -
      -
      -
      -
      -

      - Below are all available downloads for the latest version of Packer - (<%= latest_version %>). Please download the proper package for your - operating system and architecture. You can find SHA256 checksums - for packages here. -

      -
      -
      - <% product_versions.each do |os, versions| %> -
      -
      -
      <%= system_icon(os) %>
      -
      -

      <%= os %>

      - -
      -
      -
      -
      - <% end %> - -
      -
      - - - -
      -
      -
      +
      +
      +
      +

      + Below are all available downloads for the latest version of Packer ( + <%= latest_version %>). Please download the proper package for your operating system and architecture. You can find SHA256 checksums for packages here. +

      +
      +
      + <% product_versions.each do |os, versions| %> +
      +
      +
      + <%= system_icon(os) %> +
      +
      +

      <%= os %>

      + +
      +
      +
      +
      + <% end %> +
      +
      + + + +
      +
      +
      diff --git a/website/source/index.html.erb b/website/source/index.html.erb index 6d38bb645..1658f67a3 100644 --- a/website/source/index.html.erb +++ b/website/source/index.html.erb @@ -1,75 +1,58 @@ --- -description: |- - Packer is a free and open source tool for creating golden images for multiple platforms from a single source configuration. +description: Packer is a free and open source tool for creating golden images + for multiple platforms from a single source configuration. ---
      - -
      -
      -
      -
      -

      - Packer is a tool for creating machine and container images for multiple platforms from a single source configuration. -

      -
      -
      -
      -
      - -
      - -
      - - -
      -
      -
      -
      - <%= image_tag 'screenshots/vmware_and_virtualbox.png', class: 'img-responsive' %> -
      - -
      -

      Modern, Automated

      -

      - Packer is easy to use and automates the creation of any type - of machine image. It embraces modern configuration management by - encouraging you to use automated scripts to install and - configure the software within your Packer-made images. - - Packer brings machine images into the modern age, unlocking - untapped potential and opening new opportunities. -

      -
      -
      -
      -
      - -
      -
      -
      -
      -

      Works Great With

      -

      - Out of the box Packer comes with support to build images for - Amazon EC2, DigitalOcean, Docker, Google Compute Engine, QEMU, - VirtualBox, VMware, and more. Support for - more platforms is on the way, and anyone can add new platforms - via plugins. -

      -
      - -
      - <%= image_tag 'screenshots/works_with.png', class: 'img-responsive' %> -
      -
      -
      -
      - - +
      +
      +
      +
      +

      + Packer is a tool for creating machine and container images for multiple platforms from a single source configuration. +

      +
      +
      +
      +
      +
      + +
      +
      +
      +
      +
      + <%= image_tag 'screenshots/vmware_and_virtualbox.png', class: 'img-responsive' %> +
      +
      +

      Modern, Automated

      +

      + Packer is easy to use and automates the creation of any type of machine image. It embraces modern configuration management by encouraging you to use automated scripts to install and configure the software within your Packer-made images. Packer brings machine images into the modern age, unlocking untapped potential and opening new opportunities. +

      +
      +
      +
      +
      +
      +
      +
      +
      +

      Works Great With

      +

      + Out of the box Packer comes with support to build images for Amazon EC2, DigitalOcean, Docker, Google Compute Engine, QEMU, VirtualBox, VMware, and more. Support for more platforms is on the way, and anyone can add new platforms via plugins. +

      +
      +
      + <%= image_tag 'screenshots/works_with.png', class: 'img-responsive' %> +
      +
      +
      +
      + + diff --git a/website/source/layouts/community.erb b/website/source/layouts/community.erb index 12c1cc7bc..53dacbb4e 100644 --- a/website/source/layouts/community.erb +++ b/website/source/layouts/community.erb @@ -1,6 +1,6 @@ <% wrap_layout :inner do %> - <% content_for :sidebar do %> -

      - <% end %> - <%= yield %> + <% content_for :sidebar do %> +

      + <% end %> + <%= yield %> <% end %> diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index d0c331b1f..2b8bb8810 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -1,97 +1,103 @@ <% wrap_layout :inner do %> - <% content_for :sidebar do %> -

      Docs

      - - - - - - - - - - - - - - - - - <% end %> - <%= yield %> + <% content_for :sidebar do %> +

      Docs

      + + + + + + + + + <% end %> + <%= yield %> <% end %> diff --git a/website/source/layouts/docs_machine_readable.erb b/website/source/layouts/docs_machine_readable.erb index 4a3cac34d..a19c42258 100644 --- a/website/source/layouts/docs_machine_readable.erb +++ b/website/source/layouts/docs_machine_readable.erb @@ -1,15 +1,16 @@ <% wrap_layout :inner do %> - <% content_for :sidebar do %> -

      Docs

      - - - <% end %> - <%= yield %> + <% content_for :sidebar do %> +

      Docs

      + + <% end %> + <%= yield %> <% end %> diff --git a/website/source/layouts/inner.erb b/website/source/layouts/inner.erb index c570f73f6..0706d1f9d 100644 --- a/website/source/layouts/inner.erb +++ b/website/source/layouts/inner.erb @@ -1,30 +1,29 @@ <% wrap_layout :layout do %> -
      -
      - -
      -
      - <%= yield %> +
      +
      + - - <% if current_page.data.next_url %> -
      -
      <% end %> diff --git a/website/source/layouts/intro.erb b/website/source/layouts/intro.erb index 127d6ab84..cea9a3403 100644 --- a/website/source/layouts/intro.erb +++ b/website/source/layouts/intro.erb @@ -1,26 +1,28 @@ <% wrap_layout :inner do %> - <% content_for :sidebar do %> -

      Intro

      - - - - - <% end %> - <%= yield %> + <% content_for :sidebar do %> +

      Intro

      + + + <% end %> + <%= yield %> <% end %> diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index 26a1dac6b..420883de6 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -1,79 +1,83 @@ - - <%= [current_page.data.page_title, "Packer by HashiCorp"].compact.join(" - ") %> + + + <%= [current_page.data.page_title, "Packer by HashiCorp"].compact.join(" - ") %> + - - <%= stylesheet_link_tag "application" %> - + <%= stylesheet_link_tag "application" %> - - - " type="image/x-icon"> - " type="image/x-icon"> - - - - - - - - <%= yield %> - -
      - - - + " type="image/x-icon"> + " type="image/x-icon"> + + + + + + <%= yield %> +
      + - + From 448fce56c0fd9ba81b32bb714a0b1af6c7754b56 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 18:09:32 -0700 Subject: [PATCH 668/956] Replace tabs with 2 spaces --- .../source/assets/stylesheets/_footer.scss | 60 +++++++++---------- .../source/assets/stylesheets/_helpers.scss | 26 ++++---- website/source/assets/stylesheets/_reset.scss | 12 ++-- 3 files changed, 49 insertions(+), 49 deletions(-) diff --git a/website/source/assets/stylesheets/_footer.scss b/website/source/assets/stylesheets/_footer.scss index 7f771340f..67594e6fe 100644 --- a/website/source/assets/stylesheets/_footer.scss +++ b/website/source/assets/stylesheets/_footer.scss @@ -12,45 +12,45 @@ footer { margin-left: -20px; } - ul { - margin-top: 40px; - @include respond-to(mobile) { - margin-left: $baseline; - margin-top: $baseline; + ul { + margin-top: 40px; + @include respond-to(mobile) { + margin-left: $baseline; + margin-top: $baseline; } - li { - display: inline; - margin-right: 50px; - @include respond-to(mobile) { - margin-right: 20px; - display: list-item; + li { + display: inline; + margin-right: 50px; + @include respond-to(mobile) { + margin-right: 20px; + display: list-item; } } - .hashi-logo { - background: image-url('logo_footer.png') no-repeat center top; - height: 40px; - width: 40px; - background-size: 37px 40px; - text-indent: -999999px; - display: inline-block; - margin-top: -10px; - margin-right: 0; - @include respond-to(mobile) { - margin-top: -50px; - margin-right: $baseline; - } - } - } + .hashi-logo { + background: image-url('logo_footer.png') no-repeat center top; + height: 40px; + width: 40px; + background-size: 37px 40px; + text-indent: -999999px; + display: inline-block; + margin-top: -10px; + margin-right: 0; + @include respond-to(mobile) { + margin-top: -50px; + margin-right: $baseline; + } + } + } - .active { + .active { color: $green; - } + } - button { + button { margin-top: 20px; - } + } } .page-wrap { diff --git a/website/source/assets/stylesheets/_helpers.scss b/website/source/assets/stylesheets/_helpers.scss index d28b5265c..8c20db3fc 100644 --- a/website/source/assets/stylesheets/_helpers.scss +++ b/website/source/assets/stylesheets/_helpers.scss @@ -70,17 +70,17 @@ $mono: 'Inconsolata', 'courier new', courier, mono-space; background-color: #000; color: $white; - a { + a { color: inherit; &:hover { - color: $green; - } + color: $green; + } - &:active { - color: darken($green, 30%); - } - } + &:active { + color: darken($green, 30%); + } + } } .white-background { @@ -102,9 +102,9 @@ $mono: 'Inconsolata', 'courier new', courier, mono-space; color: $orange; font-size: 20px; - a:hover, a:active, a:visited { + a:hover, a:active, a:visited { color: inherit; - } + } } // media queries @@ -170,13 +170,13 @@ $break-lg: 980px; @mixin transform-scale($value) { -webkit-transform: scale($value); - -moz-transform: scale($value); - transform: scale($value); + -moz-transform: scale($value); + transform: scale($value); } @mixin transition($type, $speed, $easing) { - -webkit-transition: $type $speed $easing; - -moz-transition: $type $speed $easing; + -webkit-transition: $type $speed $easing; + -moz-transition: $type $speed $easing; -o-transition: $type $speed $easing; transition: $type $speed $easing; } diff --git a/website/source/assets/stylesheets/_reset.scss b/website/source/assets/stylesheets/_reset.scss index 4ebb5fd27..5a417ec09 100644 --- a/website/source/assets/stylesheets/_reset.scss +++ b/website/source/assets/stylesheets/_reset.scss @@ -14,10 +14,10 @@ form, input, textarea, button { line-height: 1.0; color: inherit; - &:focus { - line-height: 1.0; - box-shadow: none !important; - outline: none; - vertical-align: middle; - } + &:focus { + line-height: 1.0; + box-shadow: none !important; + outline: none; + vertical-align: middle; + } } From b9c9da7157a0bbc2709fc1fdbbe91c4189583d7a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 19:26:48 -0700 Subject: [PATCH 669/956] Added a static version of this so we can partial it into place in the layout. This prevents it from being reformatted. --- website/source/layouts/google-analytics.html | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 website/source/layouts/google-analytics.html diff --git a/website/source/layouts/google-analytics.html b/website/source/layouts/google-analytics.html new file mode 100644 index 000000000..6cd45279d --- /dev/null +++ b/website/source/layouts/google-analytics.html @@ -0,0 +1,9 @@ + From dd255df412fe8447281dbcd0751f98658a1a2f41 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 19:28:03 -0700 Subject: [PATCH 670/956] Add pandoc magical markdown reformatter --- website/Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/website/Makefile b/website/Makefile index 100a4dbf9..1cc81038c 100644 --- a/website/Makefile +++ b/website/Makefile @@ -11,4 +11,7 @@ build: init format: bundle exec htmlbeautifier -t 2 source/*.erb - bundle exec htmlbeautifier -t 2 source/layouts/*.erb \ No newline at end of file + bundle exec htmlbeautifier -t 2 source/layouts/*.erb + pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "pandoc -r markdown -w markdown --tab-stop=2 --atx-headers -s --columns=80 {} > {}.new"\; || true + pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "mv {}.new {}"\; || true + From 36052d8c2e19ae162e380e556a45cbf9fe3eb0ca Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 19:28:35 -0700 Subject: [PATCH 671/956] Add new layout with magic footer link and static google analytics partial --- website/source/layouts/layout.erb | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index 420883de6..a5cc83c5b 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -52,9 +52,16 @@
    • A HashiCorp project.
    • -
    • - Edit this page -
    • + <% # current_page.path does not have an extension, but + # current_page.source_file does. Also, we don't want to show + # this on the homepage. + if current_page.url != "/" + current_page_source = current_page.path + \ + current_page.source_file.split(current_page.path)[1] %> +
    • + Edit this page +
    • + <% end %> @@ -63,21 +70,6 @@
      - + <%= partial "layouts/google-analytics.html" %> From 13ac8896a9561be234e310554248ea5f412bbda4 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 19:29:59 -0700 Subject: [PATCH 672/956] Reformat the layout file (again) --- website/source/layouts/layout.erb | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index a5cc83c5b..f66adb067 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -58,18 +58,18 @@ if current_page.url != "/" current_page_source = current_page.path + \ current_page.source_file.split(current_page.path)[1] %> -
    • - Edit this page -
    • - <% end %> - - - -
    - +
  • + Edit this page +
  • + <% end %> + + + + - - <%= partial "layouts/google-analytics.html" %> - + + + <%= partial "layouts/google-analytics.html" %> + From d57c051651d86a2a1900c92ab600c8008f97ca8a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 19:31:00 -0700 Subject: [PATCH 673/956] Reformat everything --- website/source/community/index.html.markdown | 137 ++--- .../docs/basics/terminology.html.markdown | 87 ++-- .../docs/builders/amazon-chroot.html.markdown | 264 +++++----- .../docs/builders/amazon-ebs.html.markdown | 251 ++++----- .../builders/amazon-instance.html.markdown | 379 +++++++------- .../source/docs/builders/amazon.html.markdown | 51 +- .../source/docs/builders/custom.html.markdown | 18 +- .../docs/builders/digitalocean.html.markdown | 81 +-- .../source/docs/builders/docker.html.markdown | 234 +++++---- .../source/docs/builders/null.html.markdown | 27 +- .../docs/builders/openstack.html.markdown | 139 ++--- .../docs/builders/parallels-iso.html.markdown | 315 ++++++------ .../docs/builders/parallels-pvm.html.markdown | 222 ++++---- .../docs/builders/parallels.html.markdown | 49 +- .../source/docs/builders/qemu.html.markdown | 338 ++++++------- .../builders/virtualbox-iso.html.markdown | 417 +++++++-------- .../builders/virtualbox-ovf.html.markdown | 325 ++++++------ .../docs/builders/virtualbox.html.markdown | 41 +- .../docs/builders/vmware-iso.html.markdown | 477 +++++++++--------- .../docs/builders/vmware-vmx.html.markdown | 180 +++---- .../source/docs/builders/vmware.html.markdown | 41 +- .../docs/command-line/build.html.markdown | 45 +- .../docs/command-line/fix.html.markdown | 47 +- .../docs/command-line/inspect.html.markdown | 42 +- .../command-line/introduction.html.markdown | 33 +- .../machine-readable.html.markdown | 91 ++-- .../docs/command-line/push.html.markdown | 29 +- .../docs/command-line/validate.html.markdown | 26 +- .../source/docs/extend/builder.html.markdown | 197 ++++---- .../source/docs/extend/command.html.markdown | 92 ++-- .../extend/developing-plugins.html.markdown | 166 +++--- .../source/docs/extend/plugins.html.markdown | 91 ++-- .../docs/extend/post-processor.html.markdown | 101 ++-- .../docs/extend/provisioner.html.markdown | 101 ++-- website/source/docs/index.html.markdown | 17 +- .../source/docs/installation.html.markdown | 68 +-- .../command-build.html.markdown | 274 +++++----- .../command-inspect.html.markdown | 100 ++-- .../command-version.html.markdown | 72 +-- .../machine-readable/general.html.markdown | 44 +- .../docs/machine-readable/index.html.markdown | 41 +- .../other/core-configuration.html.markdown | 49 +- .../source/docs/other/debugging.html.markdown | 46 +- .../environmental-variables.html.markdown | 48 +- .../docs/post-processors/atlas.html.markdown | 69 ++- .../post-processors/compress.html.markdown | 45 +- .../docker-import.html.markdown | 44 +- .../post-processors/docker-push.html.markdown | 36 +- .../post-processors/docker-save.html.markdown | 32 +- .../post-processors/docker-tag.html.markdown | 53 +- .../vagrant-cloud.html.markdown | 103 ++-- .../post-processors/vagrant.html.markdown | 133 +++-- .../post-processors/vsphere.html.markdown | 51 +- .../provisioners/ansible-local.html.markdown | 89 ++-- .../provisioners/chef-client.html.markdown | 180 +++---- .../docs/provisioners/chef-solo.html.markdown | 172 ++++--- .../docs/provisioners/custom.html.markdown | 19 +- .../docs/provisioners/file.html.markdown | 68 +-- .../provisioners/powershell.html.markdown | 87 ++-- .../puppet-masterless.html.markdown | 146 +++--- .../provisioners/puppet-server.html.markdown | 83 +-- .../salt-masterless.html.markdown | 51 +- .../docs/provisioners/shell.html.markdown | 234 ++++----- .../docs/templates/builders.html.markdown | 76 +-- .../configuration-templates.html.markdown | 194 ++++--- .../docs/templates/introduction.html.markdown | 100 ++-- .../templates/post-processors.html.markdown | 129 ++--- .../docs/templates/provisioners.html.markdown | 114 ++--- .../source/docs/templates/push.html.markdown | 58 +-- .../templates/user-variables.html.markdown | 150 +++--- .../templates/veewee-to-packer.html.markdown | 65 +-- .../getting-started/build-image.html.markdown | 167 +++--- .../intro/getting-started/next.html.markdown | 40 +- .../parallel-builds.html.markdown | 188 +++---- .../getting-started/provision.html.markdown | 118 ++--- .../remote-builds.html.markdown | 98 ++-- .../intro/getting-started/setup.html.markdown | 66 +-- .../getting-started/vagrant.html.markdown | 78 +-- .../intro/hashicorp-ecosystem.html.markdown | 65 ++- website/source/intro/index.html.markdown | 43 +- website/source/intro/platforms.html.markdown | 100 ++-- website/source/intro/use-cases.html.markdown | 67 +-- website/source/intro/why.html.markdown | 64 +-- 83 files changed, 4946 insertions(+), 4622 deletions(-) diff --git a/website/source/community/index.html.markdown b/website/source/community/index.html.markdown index 1b21e818a..f4069fbdf 100644 --- a/website/source/community/index.html.markdown +++ b/website/source/community/index.html.markdown @@ -1,22 +1,25 @@ --- -layout: "community" -page_title: "Community" -description: |- - Packer is a new project with a growing community. Despite this, there are dedicated users willing to help through various mediums. ---- +description: | + Packer is a new project with a growing community. Despite this, there are + dedicated users willing to help through various mediums. +layout: community +page_title: Community +... # Community Packer is a new project with a growing community. Despite this, there are dedicated users willing to help through various mediums. -**IRC:** `#packer-tool` on Freenode. +**IRC:** `#packer-tool` on Freenode. -**Mailing List:** [Packer Google Group](http://groups.google.com/group/packer-tool) +**Mailing List:** [Packer Google +Group](http://groups.google.com/group/packer-tool) -**Bug Tracker:** [Issue tracker on GitHub](https://github.com/mitchellh/packer/issues). -Please only use this for reporting bugs. Do not ask for general help here. Use IRC -or the mailing list for that. +**Bug Tracker:** [Issue tracker on +GitHub](https://github.com/mitchellh/packer/issues). Please only use this for +reporting bugs. Do not ask for general help here. Use IRC or the mailing list +for that. ## People @@ -25,62 +28,70 @@ to Packer in some core way. Over time, faces may appear and disappear from this list as contributors come and go.
    -
    - -
    -

    Mitchell Hashimoto (@mitchellh)

    -

    - Mitchell Hashimoto is the creator of Packer. He developed the - core of Packer as well as the Amazon, VirtualBox, and VMware - builders. In addition to Packer, Mitchell is the creator of - Vagrant. He is self - described as "automation obsessed." -

    -
    -
    -
    - -
    -

    Jack Pearkes (@pearkes)

    -

    - Jack Pearkes created and maintains the DigitalOcean builder - for Packer. Outside of Packer, Jack is an avid open source - contributor and software consultant.

    -
    -
    +
    + +
    +

    Mitchell Hashimoto (@mitchellh)

    +

    + Mitchell Hashimoto is the creator of Packer. He developed the + core of Packer as well as the Amazon, VirtualBox, and VMware + builders. In addition to Packer, Mitchell is the creator of + Vagrant. He is self + described as "automation obsessed." +

    +
    +
    -
    - -
    -

    Mark Peek (@markpeek)

    -

    - In addition to Packer, Mark Peek helps maintain - various open source projects such as - cloudtools and - IronPort Python libraries. - Mark is also a FreeBSD committer.

    -
    -
    +
    + +
    +

    Jack Pearkes (@pearkes)

    +

    + Jack Pearkes created and maintains the DigitalOcean builder + for Packer. Outside of Packer, Jack is an avid open source + contributor and software consultant.

    +
    +
    -
    - -
    -

    Ross Smith II (@rasa)

    -

    -Ross Smith maintains our VMware builder on Windows, and provides other valuable assistance. -Ross is an open source enthusiast, published author, and freelance consultant.

    -
    -
    +
    + +
    +

    Mark Peek (@markpeek)

    +

    + In addition to Packer, Mark Peek helps maintain + various open source projects such as + cloudtools and + IronPort Python libraries. + Mark is also a FreeBSD committer.

    +
    +
    -
    - -
    -

    Rickard von Essen
    (@rickard-von-essen)

    -

    -Rickard von Essen maintains our Parallels Desktop builder. Rickard is an polyglot programmer and consults on Continuous Delivery.

    -
    -
    +
    + +
    +

    Ross Smith II (@rasa)

    +

    + +Ross Smith maintains our +VMware builder on Windows, and provides other valuable assistance. Ross is an +open source enthusiast, published author, and freelance consultant. +

    +
    +
    + +
    + +
    +

    Rickard von Essen
    (@rickard-von-essen)

    +

    + +Rickard von Essen maintains our Parallels Desktop builder. Rickard is an +polyglot programmer and consults on Continuous Delivery. +

    +
    +
    + +
    -
    diff --git a/website/source/docs/basics/terminology.html.markdown b/website/source/docs/basics/terminology.html.markdown index 4fce2cc79..800478143 100644 --- a/website/source/docs/basics/terminology.html.markdown +++ b/website/source/docs/basics/terminology.html.markdown @@ -1,54 +1,57 @@ --- -layout: "docs" -page_title: "Packer Terminology" -description: |- - There are a handful of terms used throughout the Packer documentation where the meaning may not be immediately obvious if you haven't used Packer before. Luckily, there are relatively few. This page documents all the terminology required to understand and use Packer. The terminology is in alphabetical order for easy referencing. ---- +description: | + There are a handful of terms used throughout the Packer documentation where the + meaning may not be immediately obvious if you haven't used Packer before. + Luckily, there are relatively few. This page documents all the terminology + required to understand and use Packer. The terminology is in alphabetical order + for easy referencing. +layout: docs +page_title: Packer Terminology +... # Packer Terminology -There are a handful of terms used throughout the Packer documentation where -the meaning may not be immediately obvious if you haven't used Packer before. +There are a handful of terms used throughout the Packer documentation where the +meaning may not be immediately obvious if you haven't used Packer before. Luckily, there are relatively few. This page documents all the terminology -required to understand and use Packer. The terminology is in alphabetical -order for easy referencing. +required to understand and use Packer. The terminology is in alphabetical order +for easy referencing. -- `Artifacts` are the results of a single build, and are usually a set of IDs -or files to represent a machine image. Every builder produces a single -artifact. As an example, in the case of the Amazon EC2 builder, the artifact is -a set of AMI IDs (one per region). For the VMware builder, the artifact is a -directory of files comprising the created virtual machine. +- `Artifacts` are the results of a single build, and are usually a set of IDs or + files to represent a machine image. Every builder produces a single artifact. + As an example, in the case of the Amazon EC2 builder, the artifact is a set of + AMI IDs (one per region). For the VMware builder, the artifact is a directory + of files comprising the created virtual machine. -- `Builds` are a single task that eventually produces an image for a single -platform. Multiple builds run in parallel. Example usage in a -sentence: "The Packer build produced an AMI to run our web application." -Or: "Packer is running the builds now for VMware, AWS, and VirtualBox." +- `Builds` are a single task that eventually produces an image for a + single platform. Multiple builds run in parallel. Example usage in a sentence: + "The Packer build produced an AMI to run our web application." Or: "Packer is + running the builds now for VMware, AWS, and VirtualBox." -- `Builders` are components of Packer that are able to create a machine -image for a single platform. Builders read in some configuration and use -that to run and generate a machine image. A builder is invoked as part of a -build in order to create the actual resulting images. Example builders include -VirtualBox, VMware, and Amazon EC2. Builders can be created and added to -Packer in the form of plugins. +- `Builders` are components of Packer that are able to create a machine image + for a single platform. Builders read in some configuration and use that to run + and generate a machine image. A builder is invoked as part of a build in order + to create the actual resulting images. Example builders include VirtualBox, + VMware, and Amazon EC2. Builders can be created and added to Packer in the + form of plugins. -- `Commands` are sub-commands for the `packer` program that perform some -job. An example command is "build", which is invoked as `packer build`. -Packer ships with a set of commands out of the box in order to define -its command-line interface. Commands can also be created and added to -Packer in the form of plugins. +- `Commands` are sub-commands for the `packer` program that perform some job. An + example command is "build", which is invoked as `packer build`. Packer ships + with a set of commands out of the box in order to define its + command-line interface. Commands can also be created and added to Packer in + the form of plugins. -- `Post-processors` are components of Packer that take the result of -a builder or another post-processor and process that to -create a new artifact. Examples of post-processors are -compress to compress artifacts, upload to upload artifacts, etc. +- `Post-processors` are components of Packer that take the result of a builder + or another post-processor and process that to create a new artifact. Examples + of post-processors are compress to compress artifacts, upload to upload + artifacts, etc. -- `Provisioners` are components of Packer that install and configure -software within a running machine prior to that machine being turned -into a static image. They perform the major work of making the image contain -useful software. Example provisioners include shell scripts, Chef, Puppet, -etc. +- `Provisioners` are components of Packer that install and configure software + within a running machine prior to that machine being turned into a + static image. They perform the major work of making the image contain + useful software. Example provisioners include shell scripts, Chef, + Puppet, etc. -- `Templates` are JSON files which define one or more builds -by configuring the various components of Packer. Packer is able to read a -template and use that information to create multiple machine images in -parallel. +- `Templates` are JSON files which define one or more builds by configuring the + various components of Packer. Packer is able to read a template and use that + information to create multiple machine images in parallel. diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index d6b61ca8b..c3e16a982 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -1,49 +1,52 @@ --- -layout: "docs" -page_title: "Amazon AMI Builder (chroot)" -description: |- - The `amazon-chroot` Packer builder is able to create Amazon AMIs backed by an EBS volume as the root device. For more information on the difference between instance storage and EBS-backed instances, storage for the root device section in the EC2 documentation. ---- +description: | + The `amazon-chroot` Packer builder is able to create Amazon AMIs backed by an + EBS volume as the root device. For more information on the difference between + instance storage and EBS-backed instances, storage for the root device section + in the EC2 documentation. +layout: docs +page_title: 'Amazon AMI Builder (chroot)' +... # AMI Builder (chroot) Type: `amazon-chroot` -The `amazon-chroot` Packer builder is able to create Amazon AMIs backed by -an EBS volume as the root device. For more information on the difference -between instance storage and EBS-backed instances, see the -["storage for the root device" section in the EC2 documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). +The `amazon-chroot` Packer builder is able to create Amazon AMIs backed by an +EBS volume as the root device. For more information on the difference between +instance storage and EBS-backed instances, see the ["storage for the root +device" section in the EC2 +documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). -The difference between this builder and the `amazon-ebs` builder is that -this builder is able to build an EBS-backed AMI without launching a new -EC2 instance. This can dramatically speed up AMI builds for organizations -who need the extra fast build. +The difference between this builder and the `amazon-ebs` builder is that this +builder is able to build an EBS-backed AMI without launching a new EC2 instance. +This can dramatically speed up AMI builds for organizations who need the extra +fast build. -~> **This is an advanced builder** If you're just getting -started with Packer, we recommend starting with the -[amazon-ebs builder](/docs/builders/amazon-ebs.html), which is -much easier to use. +\~> **This is an advanced builder** If you're just getting started with +Packer, we recommend starting with the [amazon-ebs +builder](/docs/builders/amazon-ebs.html), which is much easier to use. -The builder does _not_ manage AMIs. Once it creates an AMI and stores it -in your account, it is up to you to use, delete, etc. the AMI. +The builder does *not* manage AMIs. Once it creates an AMI and stores it in your +account, it is up to you to use, delete, etc. the AMI. ## How Does it Work? -This builder works by creating a new EBS volume from an existing source AMI -and attaching it into an already-running EC2 instance. Once attached, a -[chroot](http://en.wikipedia.org/wiki/Chroot) is used to provision the -system within that volume. After provisioning, the volume is detached, -snapshotted, and an AMI is made. +This builder works by creating a new EBS volume from an existing source AMI and +attaching it into an already-running EC2 instance. Once attached, a +[chroot](http://en.wikipedia.org/wiki/Chroot) is used to provision the system +within that volume. After provisioning, the volume is detached, snapshotted, and +an AMI is made. -Using this process, minutes can be shaved off the AMI creation process -because a new EC2 instance doesn't need to be launched. +Using this process, minutes can be shaved off the AMI creation process because a +new EC2 instance doesn't need to be launched. -There are some restrictions, however. The host EC2 instance where the -volume is attached to must be a similar system (generally the same OS -version, kernel versions, etc.) as the AMI being built. Additionally, -this process is much more expensive because the EC2 instance must be kept -running persistently in order to build AMIs, whereas the other AMI builders -start instances on-demand to build AMIs as needed. +There are some restrictions, however. The host EC2 instance where the volume is +attached to must be a similar system (generally the same OS version, kernel +versions, etc.) as the AMI being built. Additionally, this process is much more +expensive because the EC2 instance must be kept running persistently in order to +build AMIs, whereas the other AMI builders start instances on-demand to build +AMIs as needed. ## Configuration Reference @@ -52,107 +55,109 @@ segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set. - If the environmental variables aren't set and Packer is running on - an EC2 instance, Packer will check the instance metadata for IAM role - keys. +- `access_key` (string) - The access key used to communicate with AWS. If not + specified, Packer will use the key from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_ACCESS_KEY_ID` or + `AWS_ACCESS_KEY` (in that order), if set. If the environmental variables + aren't set and Packer is running on an EC2 instance, Packer will check the + instance metadata for IAM role keys. -* `ami_name` (string) - The name of the resulting AMI that will appear - when managing AMIs in the AWS console or via APIs. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `secret_key` (string) - The secret key used to communicate with AWS. - If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set. - If the environmental variables aren't set and Packer is running on - an EC2 instance, Packer will check the instance metadata for IAM role - keys. +- `secret_key` (string) - The secret key used to communicate with AWS. If not + specified, Packer will use the secret from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or + `AWS_SECRET_KEY` (in that order), if set. If the environmental variables + aren't set and Packer is running on an EC2 instance, Packer will check the + instance metadata for IAM role keys. -* `source_ami` (string) - The source AMI whose root volume will be copied - and provisioned on the currently running instance. This must be an - EBS-backed AMI with a root volume snapshot that you have access to. +- `source_ami` (string) - The source AMI whose root volume will be copied and + provisioned on the currently running instance. This must be an EBS-backed AMI + with a root volume snapshot that you have access to. ### Optional: -* `ami_description` (string) - The description to set for the resulting - AMI(s). By default this description is empty. +- `ami_description` (string) - The description to set for the resulting AMI(s). + By default this description is empty. -* `ami_groups` (array of strings) - A list of groups that have access - to launch the resulting AMI(s). By default no groups have permission - to launch the AMI. `all` will make the AMI publicly accessible. +- `ami_groups` (array of strings) - A list of groups that have access to launch + the resulting AMI(s). By default no groups have permission to launch the AMI. + `all` will make the AMI publicly accessible. -* `ami_product_codes` (array of strings) - A list of product codes to - associate with the AMI. By default no product codes are associated with - the AMI. +- `ami_product_codes` (array of strings) - A list of product codes to associate + with the AMI. By default no product codes are associated with the AMI. -* `ami_regions` (array of strings) - A list of regions to copy the AMI to. - Tags and attributes are copied along with the AMI. AMI copying takes time - depending on the size of the AMI, but will generally take many minutes. +- `ami_regions` (array of strings) - A list of regions to copy the AMI to. Tags + and attributes are copied along with the AMI. AMI copying takes time depending + on the size of the AMI, but will generally take many minutes. -* `ami_users` (array of strings) - A list of account IDs that have access - to launch the resulting AMI(s). By default no additional users other than the user - creating the AMI has permissions to launch it. +- `ami_users` (array of strings) - A list of account IDs that have access to + launch the resulting AMI(s). By default no additional users other than the + user creating the AMI has permissions to launch it. -* `ami_virtualization_type` (string) - The type of virtualization for the AMI +- `ami_virtualization_type` (string) - The type of virtualization for the AMI you are building. This option is required to register HVM images. Can be "paravirtual" (default) or "hvm". -* `chroot_mounts` (array of array of strings) - This is a list of additional +- `chroot_mounts` (array of array of strings) - This is a list of additional devices to mount into the chroot environment. This configuration parameter - requires some additional documentation which is in the "Chroot Mounts" section - below. Please read that section for more information on how to use this. + requires some additional documentation which is in the "Chroot Mounts" + section below. Please read that section for more information on how to + use this. -* `command_wrapper` (string) - How to run shell commands. This - defaults to "{{.Command}}". This may be useful to set if you want to set - environmental variables or perhaps run it with `sudo` or so on. This is a - configuration template where the `.Command` variable is replaced with the - command to be run. +- `command_wrapper` (string) - How to run shell commands. This defaults + to "{{.Command}}". This may be useful to set if you want to set environmental + variables or perhaps run it with `sudo` or so on. This is a configuration + template where the `.Command` variable is replaced with the command to be run. -* `copy_files` (array of strings) - Paths to files on the running EC2 instance - that will be copied into the chroot environment prior to provisioning. - This is useful, for example, to copy `/etc/resolv.conf` so that DNS lookups - work. +- `copy_files` (array of strings) - Paths to files on the running EC2 instance + that will be copied into the chroot environment prior to provisioning. This is + useful, for example, to copy `/etc/resolv.conf` so that DNS lookups work. -* `device_path` (string) - The path to the device where the root volume - of the source AMI will be attached. This defaults to "" (empty string), - which forces Packer to find an open device automatically. +- `device_path` (string) - The path to the device where the root volume of the + source AMI will be attached. This defaults to "" (empty string), which forces + Packer to find an open device automatically. -* `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on - HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. +- `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) + on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS + IAM policy. -* `force_deregister` (boolean) - Force Packer to first deregister an existing -AMI if one with the same name already exists. Default `false`. +- `force_deregister` (boolean) - Force Packer to first deregister an existing + AMI if one with the same name already exists. Default `false`. -* `mount_path` (string) - The path where the volume will be mounted. This is +- `mount_path` (string) - The path where the volume will be mounted. This is where the chroot environment will be. This defaults to - `packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration - template where the `.Device` variable is replaced with the name of the - device where the volume is attached. + `packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration template + where the `.Device` variable is replaced with the name of the device where the + volume is attached. -* `mount_options` (array of strings) - Options to supply the `mount` command -when mounting devices. Each option will be prefixed with `-o ` and supplied to -the `mount` command ran by Packer. Because this command is ran in a shell, user -discrestion is advised. See [this manual page for the mount command][1] for valid -file system specific options +- `mount_options` (array of strings) - Options to supply the `mount` command + when mounting devices. Each option will be prefixed with `-o` and supplied to + the `mount` command ran by Packer. Because this command is ran in a shell, + user discrestion is advised. See [this manual page for the mount + command](http://linuxcommand.org/man_pages/mount8.html) for valid file system + specific options -* `root_volume_size` (integer) - The size of the root volume for the chroot -environment, and the resulting AMI +- `root_volume_size` (integer) - The size of the root volume for the chroot + environment, and the resulting AMI -* `tags` (object of key/value strings) - Tags applied to the AMI. +- `tags` (object of key/value strings) - Tags applied to the AMI. ## Basic Example Here is a basic example. It is completely valid except for the access keys: -```javascript +``` {.javascript} { "type": "amazon-chroot", "access_key": "YOUR KEY HERE", @@ -164,21 +169,21 @@ Here is a basic example. It is completely valid except for the access keys: ## Chroot Mounts -The `chroot_mounts` configuration can be used to mount additional devices -within the chroot. By default, the following additional mounts are added -into the chroot by Packer: +The `chroot_mounts` configuration can be used to mount additional devices within +the chroot. By default, the following additional mounts are added into the +chroot by Packer: -* `/proc` (proc) -* `/sys` (sysfs) -* `/dev` (bind to real `/dev`) -* `/dev/pts` (devpts) -* `/proc/sys/fs/binfmt_misc` (binfmt_misc) +- `/proc` (proc) +- `/sys` (sysfs) +- `/dev` (bind to real `/dev`) +- `/dev/pts` (devpts) +- `/proc/sys/fs/binfmt_misc` (binfmt\_misc) -These default mounts are usually good enough for anyone and are sane -defaults. However, if you want to change or add the mount points, you may -using the `chroot_mounts` configuration. Here is an example configuration: +These default mounts are usually good enough for anyone and are sane defaults. +However, if you want to change or add the mount points, you may using the +`chroot_mounts` configuration. Here is an example configuration: -```javascript +``` {.javascript} { "chroot_mounts": [ ["proc", "proc", "/proc"], @@ -187,25 +192,25 @@ using the `chroot_mounts` configuration. Here is an example configuration: } ``` -`chroot_mounts` is a list of a 3-tuples of strings. The three components -of the 3-tuple, in order, are: +`chroot_mounts` is a list of a 3-tuples of strings. The three components of the +3-tuple, in order, are: -* The filesystem type. If this is "bind", then Packer will properly bind - the filesystem to another mount point. +- The filesystem type. If this is "bind", then Packer will properly bind the + filesystem to another mount point. -* The source device. +- The source device. -* The mount directory. +- The mount directory. ## Parallelism -A quick note on parallelism: it is perfectly safe to run multiple -_separate_ Packer processes with the `amazon-chroot` builder on the same -EC2 instance. In fact, this is recommended as a way to push the most performance -out of your AMI builds. +A quick note on parallelism: it is perfectly safe to run multiple *separate* +Packer processes with the `amazon-chroot` builder on the same EC2 instance. In +fact, this is recommended as a way to push the most performance out of your AMI +builds. -Packer properly obtains a process lock for the parallelism-sensitive parts -of its internals such as finding an available device. +Packer properly obtains a process lock for the parallelism-sensitive parts of +its internals such as finding an available device. ## Gotchas @@ -213,10 +218,12 @@ One of the difficulties with using the chroot builder is that your provisioning scripts must not leave any processes running or packer will be unable to unmount the filesystem. -For debian based distributions you can setup a [policy-rc.d](http://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt) file which will -prevent packages installed by your provisioners from starting services: +For debian based distributions you can setup a +[policy-rc.d](http://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt) +file which will prevent packages installed by your provisioners from starting +services: -```javascript +``` {.javascript} { "type": "shell", "inline": [ @@ -235,6 +242,3 @@ prevent packages installed by your provisioners from starting services: ] } ``` - - -[1]: http://linuxcommand.org/man_pages/mount8.html diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 69a9a5c04..cb6b7c9d5 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -1,29 +1,32 @@ --- -layout: "docs" -page_title: "Amazon AMI Builder (EBS backed)" -description: |- - The `amazon-ebs` Packer builder is able to create Amazon AMIs backed by EBS volumes for use in EC2. For more information on the difference between EBS-backed instances and instance-store backed instances, see the storage for the root device section in the EC2 documentation. ---- +description: | + The `amazon-ebs` Packer builder is able to create Amazon AMIs backed by EBS + volumes for use in EC2. For more information on the difference between + EBS-backed instances and instance-store backed instances, see the storage for + the root device section in the EC2 documentation. +layout: docs +page_title: 'Amazon AMI Builder (EBS backed)' +... # AMI Builder (EBS backed) Type: `amazon-ebs` The `amazon-ebs` Packer builder is able to create Amazon AMIs backed by EBS -volumes for use in [EC2](http://aws.amazon.com/ec2/). For more information -on the difference between EBS-backed instances and instance-store backed -instances, see the -["storage for the root device" section in the EC2 documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). +volumes for use in [EC2](http://aws.amazon.com/ec2/). For more information on +the difference between EBS-backed instances and instance-store backed instances, +see the ["storage for the root device" section in the EC2 +documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). This builder builds an AMI by launching an EC2 instance from a source AMI, provisioning that running machine, and then creating an AMI from that machine. This is all done in your own AWS account. The builder will create temporary -keypairs, security group rules, etc. that provide it temporary access to -the instance while the image is being created. This simplifies configuration -quite a bit. +keypairs, security group rules, etc. that provide it temporary access to the +instance while the image is being created. This simplifies configuration quite a +bit. -The builder does _not_ manage AMIs. Once it creates an AMI and stores it -in your account, it is up to you to use, delete, etc. the AMI. +The builder does *not* manage AMIs. Once it creates an AMI and stores it in your +account, it is up to you to use, delete, etc. the AMI. ## Configuration Reference @@ -32,170 +35,173 @@ segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set. +- `access_key` (string) - The access key used to communicate with AWS. If not + specified, Packer will use the key from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_ACCESS_KEY_ID` or + `AWS_ACCESS_KEY` (in that order), if set. -* `ami_name` (string) - The name of the resulting AMI that will appear - when managing AMIs in the AWS console or via APIs. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `instance_type` (string) - The EC2 instance type to use while building - the AMI, such as "m1.small". +- `instance_type` (string) - The EC2 instance type to use while building the + AMI, such as "m1.small". -* `region` (string) - The name of the region, such as "us-east-1", in which - to launch the EC2 instance to create the AMI. +- `region` (string) - The name of the region, such as "us-east-1", in which to + launch the EC2 instance to create the AMI. -* `secret_key` (string) - The secret key used to communicate with AWS. - If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set. +- `secret_key` (string) - The secret key used to communicate with AWS. If not + specified, Packer will use the secret from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or + `AWS_SECRET_KEY` (in that order), if set. -* `source_ami` (string) - The initial AMI used as a base for the newly +- `source_ami` (string) - The initial AMI used as a base for the newly created machine. -* `ssh_username` (string) - The username to use in order to communicate - over SSH to the running machine. +- `ssh_username` (string) - The username to use in order to communicate over SSH + to the running machine. ### Optional: -* `ami_block_device_mappings` (array of block device mappings) - Add the block +- `ami_block_device_mappings` (array of block device mappings) - Add the block device mappings to the AMI. The block device mappings allow for keys: - `device_name` (string) - The device name exposed to the instance (for - example, "/dev/sdh" or "xvdh") + example, "/dev/sdh" or "xvdh") - `virtual_name` (string) - The virtual device name. See the documentation on - [Block Device Mapping][1] for more information + [Block Device + Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) + for more information - `snapshot_id` (string) - The ID of the snapshot - `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) - volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic - volumes + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + volumes - `volume_size` (integer) - The size of the volume, in GiB. Required if not - specifying a `snapshot_id` + specifying a `snapshot_id` - `delete_on_termination` (boolean) - Indicates whether the EBS volume is - deleted on instance termination + deleted on instance termination - `encrypted` (boolean) - Indicates whether to encrypt the volume or not - `no_device` (boolean) - Suppresses the specified device included in the - block device mapping of the AMI + block device mapping of the AMI - `iops` (integer) - The number of I/O operations per second (IOPS) that the - volume supports. See the documentation on [IOPs][2] for more information + volume supports. See the documentation on + [IOPs](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) + for more information +- `ami_description` (string) - The description to set for the resulting AMI(s). + By default this description is empty. +- `ami_groups` (array of strings) - A list of groups that have access to launch + the resulting AMI(s). By default no groups have permission to launch the AMI. + `all` will make the AMI publicly accessible. AWS currently doesn't accept any + value other than "all". -* `ami_description` (string) - The description to set for the resulting - AMI(s). By default this description is empty. +- `ami_product_codes` (array of strings) - A list of product codes to associate + with the AMI. By default no product codes are associated with the AMI. -* `ami_groups` (array of strings) - A list of groups that have access - to launch the resulting AMI(s). By default no groups have permission - to launch the AMI. `all` will make the AMI publicly accessible. - AWS currently doesn't accept any value other than "all". +- `ami_regions` (array of strings) - A list of regions to copy the AMI to. Tags + and attributes are copied along with the AMI. AMI copying takes time depending + on the size of the AMI, but will generally take many minutes. -* `ami_product_codes` (array of strings) - A list of product codes to - associate with the AMI. By default no product codes are associated with - the AMI. +- `ami_users` (array of strings) - A list of account IDs that have access to + launch the resulting AMI(s). By default no additional users other than the + user creating the AMI has permissions to launch it. -* `ami_regions` (array of strings) - A list of regions to copy the AMI to. - Tags and attributes are copied along with the AMI. AMI copying takes time - depending on the size of the AMI, but will generally take many minutes. - -* `ami_users` (array of strings) - A list of account IDs that have access - to launch the resulting AMI(s). By default no additional users other than the user - creating the AMI has permissions to launch it. - -* `associate_public_ip_address` (boolean) - If using a non-default VPC, public +- `associate_public_ip_address` (boolean) - If using a non-default VPC, public IP addresses are not provided by default. If this is toggled, your new instance will get a Public IP. -* `availability_zone` (string) - Destination availability zone to launch instance in. - Leave this empty to allow Amazon to auto-assign. +- `availability_zone` (string) - Destination availability zone to launch + instance in. Leave this empty to allow Amazon to auto-assign. -* `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on - HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. +- `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) + on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS + IAM policy. -* `force_deregister` (boolean) - Force Packer to first deregister an existing -AMI if one with the same name already exists. Default `false`. +- `force_deregister` (boolean) - Force Packer to first deregister an existing + AMI if one with the same name already exists. Default `false`. -* `iam_instance_profile` (string) - The name of an - [IAM instance profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) +- `iam_instance_profile` (string) - The name of an [IAM instance + profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) to launch the EC2 instance with. -* `launch_block_device_mappings` (array of block device mappings) - Add the +- `launch_block_device_mappings` (array of block device mappings) - Add the block device mappings to the launch instance. The block device mappings are the same as `ami_block_device_mappings` above. -* `run_tags` (object of key/value strings) - Tags to apply to the instance - that is _launched_ to create the AMI. These tags are _not_ applied to - the resulting AMI unless they're duplicated in `tags`. +- `run_tags` (object of key/value strings) - Tags to apply to the instance that + is *launched* to create the AMI. These tags are *not* applied to the resulting + AMI unless they're duplicated in `tags`. -* `security_group_id` (string) - The ID (_not_ the name) of the security - group to assign to the instance. By default this is not set and Packer - will automatically create a new temporary security group to allow SSH - access. Note that if this is specified, you must be sure the security - group allows access to the `ssh_port` given below. +- `security_group_id` (string) - The ID (*not* the name) of the security group + to assign to the instance. By default this is not set and Packer will + automatically create a new temporary security group to allow SSH access. Note + that if this is specified, you must be sure the security group allows access + to the `ssh_port` given below. -* `security_group_ids` (array of strings) - A list of security groups as +- `security_group_ids` (array of strings) - A list of security groups as described above. Note that if this is specified, you must omit the `security_group_id`. -* `spot_price` (string) - The maximum hourly price to pay for a spot instance - to create the AMI. Spot instances are a type of instance that EC2 starts when - the current spot price is less than the maximum price you specify. Spot price - will be updated based on available spot instance capacity and current spot +- `spot_price` (string) - The maximum hourly price to pay for a spot instance to + create the AMI. Spot instances are a type of instance that EC2 starts when the + current spot price is less than the maximum price you specify. Spot price will + be updated based on available spot instance capacity and current spot instance requests. It may save you some costs. You can set this to "auto" for Packer to automatically discover the best spot price. -* `spot_price_auto_product` (string) - Required if `spot_price` is set to - "auto". This tells Packer what sort of AMI you're launching to find the best - spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, - `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +- `spot_price_auto_product` (string) - Required if `spot_price` is set + to "auto". This tells Packer what sort of AMI you're launching to find the + best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, + `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` -* `ssh_keypair_name` (string) - If specified, this is the key that will be - used for SSH with the machine. By default, this is blank, and Packer will - generate a temporary keypair. `ssh_private_key_file` must be specified - with this. +- `ssh_keypair_name` (string) - If specified, this is the key that will be used + for SSH with the machine. By default, this is blank, and Packer will generate + a temporary keypair. `ssh_private_key_file` must be specified with this. -* `ssh_private_ip` (boolean) - If true, then SSH will always use the private - IP if available. +- `ssh_private_ip` (boolean) - If true, then SSH will always use the private IP + if available. -* `subnet_id` (string) - If using VPC, the ID of the subnet, such as +- `subnet_id` (string) - If using VPC, the ID of the subnet, such as "subnet-12345def", where Packer will launch the EC2 instance. This field is required if you are using an non-default VPC. -* `tags` (object of key/value strings) - Tags applied to the AMI and +- `tags` (object of key/value strings) - Tags applied to the AMI and relevant snapshots. -* `temporary_key_pair_name` (string) - The name of the temporary keypair +- `temporary_key_pair_name` (string) - The name of the temporary keypair to generate. By default, Packer generates a name with a UUID. -* `token` (string) - The access token to use. This is different from - the access key and secret key. If you're not sure what this is, then you - probably don't need it. This will also be read from the `AWS_SECURITY_TOKEN` +- `token` (string) - The access token to use. This is different from the access + key and secret key. If you're not sure what this is, then you probably don't + need it. This will also be read from the `AWS_SECURITY_TOKEN` environmental variable. -* `user_data` (string) - User data to apply when launching the instance. - Note that you need to be careful about escaping characters due to the - templates being JSON. It is often more convenient to use `user_data_file`, - instead. +- `user_data` (string) - User data to apply when launching the instance. Note + that you need to be careful about escaping characters due to the templates + being JSON. It is often more convenient to use `user_data_file`, instead. -* `user_data_file` (string) - Path to a file that will be used for the - user data when launching the instance. +- `user_data_file` (string) - Path to a file that will be used for the user data + when launching the instance. -* `vpc_id` (string) - If launching into a VPC subnet, Packer needs the - VPC ID in order to create a temporary security group within the VPC. +- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID in + order to create a temporary security group within the VPC. -* `windows_password_timeout` (string) - The timeout for waiting for - a Windows password for Windows instances. Defaults to 20 minutes. - Example value: "10m" +- `windows_password_timeout` (string) - The timeout for waiting for a Windows + password for Windows instances. Defaults to 20 minutes. Example value: "10m" ## Basic Example Here is a basic example. It is completely valid except for the access keys: -```javascript +``` {.javascript} { "type": "amazon-ebs", "access_key": "YOUR KEY HERE", @@ -208,25 +214,23 @@ Here is a basic example. It is completely valid except for the access keys: } ``` --> **Note:** Packer can also read the access key and secret -access key from environmental variables. See the configuration reference in -the section above for more information on what environmental variables Packer -will look for. +-> **Note:** Packer can also read the access key and secret access key from +environmental variables. See the configuration reference in the section above +for more information on what environmental variables Packer will look for. ## Accessing the Instance to Debug If you need to access the instance to debug for some reason, run the builder -with the `-debug` flag. In debug mode, the Amazon builder will save the -private key in the current directory and will output the DNS or IP information -as well. You can use this information to access the instance as it is -running. +with the `-debug` flag. In debug mode, the Amazon builder will save the private +key in the current directory and will output the DNS or IP information as well. +You can use this information to access the instance as it is running. ## AMI Block Device Mappings Example Here is an example using the optional AMI block device mappings. This will add the /dev/sdb and /dev/sdc block device mappings to the finished AMI. -```javascript +``` {.javascript} { "type": "amazon-ebs", "access_key": "YOUR KEY HERE", @@ -252,9 +256,9 @@ the /dev/sdb and /dev/sdc block device mappings to the finished AMI. ## Tag Example Here is an example using the optional AMI tags. This will add the tags -"OS_Version" and "Release" to the finished AMI. +"OS\_Version" and "Release" to the finished AMI. -```javascript +``` {.javascript} { "type": "amazon-ebs", "access_key": "YOUR KEY HERE", @@ -271,13 +275,10 @@ Here is an example using the optional AMI tags. This will add the tags } ``` --> **Note:** Packer uses pre-built AMIs as the source for building images. +-> **Note:** Packer uses pre-built AMIs as the source for building images. These source AMIs may include volumes that are not flagged to be destroyed on termiation of the instance building the new image. Packer will attempt to clean up all residual volumes that are not designated by the user to remain after termination. If you need to preserve those source volumes, you can overwrite the termination setting by specifying `delete_on_termination=false` in the `launch_device_mappings` block for the device. - -[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html -[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index fa3c8a190..5ff36ccf2 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -1,9 +1,12 @@ --- -layout: "docs" -page_title: "Amazon AMI Builder (instance-store)" -description: |- - The `amazon-instance` Packer builder is able to create Amazon AMIs backed by instance storage as the root device. For more information on the difference between instance storage and EBS-backed instances, see the storage for the root device section in the EC2 documentation. ---- +description: | + The `amazon-instance` Packer builder is able to create Amazon AMIs backed by + instance storage as the root device. For more information on the difference + between instance storage and EBS-backed instances, see the storage for the root + device section in the EC2 documentation. +layout: docs +page_title: 'Amazon AMI Builder (instance-store)' +... # AMI Builder (instance-store) @@ -11,24 +14,24 @@ Type: `amazon-instance` The `amazon-instance` Packer builder is able to create Amazon AMIs backed by instance storage as the root device. For more information on the difference -between instance storage and EBS-backed instances, see the -["storage for the root device" section in the EC2 documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). +between instance storage and EBS-backed instances, see the ["storage for the +root device" section in the EC2 +documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). This builder builds an AMI by launching an EC2 instance from an existing instance-storage backed AMI, provisioning that running machine, and then -bundling and creating a new AMI from that machine. -This is all done in your own AWS account. The builder will create temporary -keypairs, security group rules, etc. that provide it temporary access to -the instance while the image is being created. This simplifies configuration -quite a bit. +bundling and creating a new AMI from that machine. This is all done in your own +AWS account. The builder will create temporary keypairs, security group rules, +etc. that provide it temporary access to the instance while the image is being +created. This simplifies configuration quite a bit. -The builder does _not_ manage AMIs. Once it creates an AMI and stores it -in your account, it is up to you to use, delete, etc. the AMI. +The builder does *not* manage AMIs. Once it creates an AMI and stores it in your +account, it is up to you to use, delete, etc. the AMI. --> **Note** This builder requires that the -[Amazon EC2 AMI Tools](http://aws.amazon.com/developertools/368) -are installed onto the machine. This can be done within a provisioner, but -must be done before the builder finishes running. +-> **Note** This builder requires that the [Amazon EC2 AMI +Tools](http://aws.amazon.com/developertools/368) are installed onto the machine. +This can be done within a provisioner, but must be done before the builder +finishes running. ## Configuration Reference @@ -37,204 +40,207 @@ segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set. +- `access_key` (string) - The access key used to communicate with AWS. If not + specified, Packer will use the key from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_ACCESS_KEY_ID` or + `AWS_ACCESS_KEY` (in that order), if set. -* `account_id` (string) - Your AWS account ID. This is required for bundling - the AMI. This is _not the same_ as the access key. You can find your - account ID in the security credentials page of your AWS account. +- `account_id` (string) - Your AWS account ID. This is required for bundling + the AMI. This is *not the same* as the access key. You can find your account + ID in the security credentials page of your AWS account. -* `ami_name` (string) - The name of the resulting AMI that will appear - when managing AMIs in the AWS console or via APIs. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `instance_type` (string) - The EC2 instance type to use while building - the AMI, such as "m1.small". +- `instance_type` (string) - The EC2 instance type to use while building the + AMI, such as "m1.small". -* `region` (string) - The name of the region, such as "us-east-1", in which - to launch the EC2 instance to create the AMI. +- `region` (string) - The name of the region, such as "us-east-1", in which to + launch the EC2 instance to create the AMI. -* `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. - This bucket will be created if it doesn't exist. +- `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. This + bucket will be created if it doesn't exist. -* `secret_key` (string) - The secret key used to communicate with AWS. - If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file - or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set. +- `secret_key` (string) - The secret key used to communicate with AWS. If not + specified, Packer will use the secret from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or + `AWS_SECRET_KEY` (in that order), if set. -* `source_ami` (string) - The initial AMI used as a base for the newly +- `source_ami` (string) - The initial AMI used as a base for the newly created machine. -* `ssh_username` (string) - The username to use in order to communicate - over SSH to the running machine. +- `ssh_username` (string) - The username to use in order to communicate over SSH + to the running machine. -* `x509_cert_path` (string) - The local path to a valid X509 certificate for +- `x509_cert_path` (string) - The local path to a valid X509 certificate for your AWS account. This is used for bundling the AMI. This X509 certificate - must be registered with your account from the security credentials page - in the AWS console. + must be registered with your account from the security credentials page in the + AWS console. -* `x509_key_path` (string) - The local path to the private key for the X509 +- `x509_key_path` (string) - The local path to the private key for the X509 certificate specified by `x509_cert_path`. This is used for bundling the AMI. ### Optional: -* `ami_block_device_mappings` (array of block device mappings) - Add the block +- `ami_block_device_mappings` (array of block device mappings) - Add the block device mappings to the AMI. The block device mappings allow for keys: - `device_name` (string) - The device name exposed to the instance (for - example, "/dev/sdh" or "xvdh") + example, "/dev/sdh" or "xvdh") - `virtual_name` (string) - The virtual device name. See the documentation on - [Block Device Mapping][1] for more information + [Block Device + Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) + for more information - `snapshot_id` (string) - The ID of the snapshot - `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) - volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic - volumes + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + volumes - `volume_size` (integer) - The size of the volume, in GiB. Required if not - specifying a `snapshot_id` + specifying a `snapshot_id` - `delete_on_termination` (boolean) - Indicates whether the EBS volume is - deleted on instance termination + deleted on instance termination - `encrypted` (boolean) - Indicates whether to encrypt the volume or not - `no_device` (boolean) - Suppresses the specified device included in the - block device mapping of the AMI + block device mapping of the AMI - `iops` (integer) - The number of I/O operations per second (IOPS) that the - volume supports. See the documentation on [IOPs][2] for more information + volume supports. See the documentation on + [IOPs](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) + for more information +- `ami_description` (string) - The description to set for the resulting AMI(s). + By default this description is empty. -* `ami_description` (string) - The description to set for the resulting - AMI(s). By default this description is empty. +- `ami_groups` (array of strings) - A list of groups that have access to launch + the resulting AMI(s). By default no groups have permission to launch the AMI. + `all` will make the AMI publicly accessible. AWS currently doesn't accept any + value other than "all". -* `ami_groups` (array of strings) - A list of groups that have access - to launch the resulting AMI(s). By default no groups have permission - to launch the AMI. `all` will make the AMI publicly accessible. - AWS currently doesn't accept any value other than "all". +- `ami_product_codes` (array of strings) - A list of product codes to associate + with the AMI. By default no product codes are associated with the AMI. -* `ami_product_codes` (array of strings) - A list of product codes to - associate with the AMI. By default no product codes are associated with - the AMI. +- `ami_regions` (array of strings) - A list of regions to copy the AMI to. Tags + and attributes are copied along with the AMI. AMI copying takes time depending + on the size of the AMI, but will generally take many minutes. -* `ami_regions` (array of strings) - A list of regions to copy the AMI to. - Tags and attributes are copied along with the AMI. AMI copying takes time - depending on the size of the AMI, but will generally take many minutes. +- `ami_users` (array of strings) - A list of account IDs that have access to + launch the resulting AMI(s). By default no additional users other than the + user creating the AMI has permissions to launch it. -* `ami_users` (array of strings) - A list of account IDs that have access - to launch the resulting AMI(s). By default no additional users other than the user - creating the AMI has permissions to launch it. - -* `ami_virtualization_type` (string) - The type of virtualization for the AMI +- `ami_virtualization_type` (string) - The type of virtualization for the AMI you are building. This option is required to register HVM images. Can be "paravirtual" (default) or "hvm". -* `associate_public_ip_address` (boolean) - If using a non-default VPC, public +- `associate_public_ip_address` (boolean) - If using a non-default VPC, public IP addresses are not provided by default. If this is toggled, your new - instance will get a Public IP. + instance will get a Public IP. -* `availability_zone` (string) - Destination availability zone to launch instance in. - Leave this empty to allow Amazon to auto-assign. +- `availability_zone` (string) - Destination availability zone to launch + instance in. Leave this empty to allow Amazon to auto-assign. -* `bundle_destination` (string) - The directory on the running instance - where the bundled AMI will be saved prior to uploading. By default this is - "/tmp". This directory must exist and be writable. +- `bundle_destination` (string) - The directory on the running instance where + the bundled AMI will be saved prior to uploading. By default this is "/tmp". + This directory must exist and be writable. -* `bundle_prefix` (string) - The prefix for files created from bundling - the root volume. By default this is "image-{{timestamp}}". The `timestamp` - variable should be used to make sure this is unique, otherwise it can - collide with other created AMIs by Packer in your account. +- `bundle_prefix` (string) - The prefix for files created from bundling the + root volume. By default this is "image-{{timestamp}}". The `timestamp` + variable should be used to make sure this is unique, otherwise it can collide + with other created AMIs by Packer in your account. -* `bundle_upload_command` (string) - The command to use to upload the - bundled volume. See the "custom bundle commands" section below for more - information. +- `bundle_upload_command` (string) - The command to use to upload the + bundled volume. See the "custom bundle commands" section below for + more information. -* `bundle_vol_command` (string) - The command to use to bundle the volume. - See the "custom bundle commands" section below for more information. +- `bundle_vol_command` (string) - The command to use to bundle the volume. See + the "custom bundle commands" section below for more information. -* `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on - HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. +- `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) + on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS + IAM policy. -* `force_deregister` (boolean) - Force Packer to first deregister an existing -AMI if one with the same name already exists. Default `false`. +- `force_deregister` (boolean) - Force Packer to first deregister an existing + AMI if one with the same name already exists. Default `false`. -* `iam_instance_profile` (string) - The name of an - [IAM instance profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) +- `iam_instance_profile` (string) - The name of an [IAM instance + profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) to launch the EC2 instance with. -* `launch_block_device_mappings` (array of block device mappings) - Add the +- `launch_block_device_mappings` (array of block device mappings) - Add the block device mappings to the launch instance. The block device mappings are the same as `ami_block_device_mappings` above. -* `run_tags` (object of key/value strings) - Tags to apply to the instance - that is _launched_ to create the AMI. These tags are _not_ applied to - the resulting AMI unless they're duplicated in `tags`. +- `run_tags` (object of key/value strings) - Tags to apply to the instance that + is *launched* to create the AMI. These tags are *not* applied to the resulting + AMI unless they're duplicated in `tags`. -* `security_group_id` (string) - The ID (_not_ the name) of the security - group to assign to the instance. By default this is not set and Packer - will automatically create a new temporary security group to allow SSH - access. Note that if this is specified, you must be sure the security - group allows access to the `ssh_port` given below. +- `security_group_id` (string) - The ID (*not* the name) of the security group + to assign to the instance. By default this is not set and Packer will + automatically create a new temporary security group to allow SSH access. Note + that if this is specified, you must be sure the security group allows access + to the `ssh_port` given below. -* `security_group_ids` (array of strings) - A list of security groups as +- `security_group_ids` (array of strings) - A list of security groups as described above. Note that if this is specified, you must omit the `security_group_id`. -* `spot_price` (string) - The maximum hourly price to launch a spot instance - to create the AMI. It is a type of instances that EC2 starts when the maximum +- `spot_price` (string) - The maximum hourly price to launch a spot instance to + create the AMI. It is a type of instances that EC2 starts when the maximum price that you specify exceeds the current spot price. Spot price will be - updated based on available spot instance capacity and current spot Instance - requests. It may save you some costs. You can set this to "auto" for + updated based on available spot instance capacity and current spot + Instance requests. It may save you some costs. You can set this to "auto" for Packer to automatically discover the best spot price. -* `spot_price_auto_product` (string) - Required if `spot_price` is set to - "auto". This tells Packer what sort of AMI you're launching to find the best - spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, - `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +- `spot_price_auto_product` (string) - Required if `spot_price` is set + to "auto". This tells Packer what sort of AMI you're launching to find the + best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, + `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` -* `ssh_keypair_name` (string) - If specified, this is the key that will be - used for SSH with the machine. By default, this is blank, and Packer will - generate a temporary keypair. `ssh_private_key_file` must be specified - with this. +- `ssh_keypair_name` (string) - If specified, this is the key that will be used + for SSH with the machine. By default, this is blank, and Packer will generate + a temporary keypair. `ssh_private_key_file` must be specified with this. -* `ssh_private_ip` (boolean) - If true, then SSH will always use the private - IP if available. +- `ssh_private_ip` (boolean) - If true, then SSH will always use the private IP + if available. -* `subnet_id` (string) - If using VPC, the ID of the subnet, such as +- `subnet_id` (string) - If using VPC, the ID of the subnet, such as "subnet-12345def", where Packer will launch the EC2 instance. This field is required if you are using an non-default VPC. -* `tags` (object of key/value strings) - Tags applied to the AMI. +- `tags` (object of key/value strings) - Tags applied to the AMI. -* `temporary_key_pair_name` (string) - The name of the temporary keypair +- `temporary_key_pair_name` (string) - The name of the temporary keypair to generate. By default, Packer generates a name with a UUID. -* `user_data` (string) - User data to apply when launching the instance. - Note that you need to be careful about escaping characters due to the - templates being JSON. It is often more convenient to use `user_data_file`, - instead. +- `user_data` (string) - User data to apply when launching the instance. Note + that you need to be careful about escaping characters due to the templates + being JSON. It is often more convenient to use `user_data_file`, instead. -* `user_data_file` (string) - Path to a file that will be used for the - user data when launching the instance. +- `user_data_file` (string) - Path to a file that will be used for the user data + when launching the instance. -* `vpc_id` (string) - If launching into a VPC subnet, Packer needs the - VPC ID in order to create a temporary security group within the VPC. +- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID in + order to create a temporary security group within the VPC. -* `x509_upload_path` (string) - The path on the remote machine where the - X509 certificate will be uploaded. This path must already exist and be - writable. X509 certificates are uploaded after provisioning is run, so - it is perfectly okay to create this directory as part of the provisioning - process. +- `x509_upload_path` (string) - The path on the remote machine where the X509 + certificate will be uploaded. This path must already exist and be writable. + X509 certificates are uploaded after provisioning is run, so it is perfectly + okay to create this directory as part of the provisioning process. -* `windows_password_timeout` (string) - The timeout for waiting for - a Windows password for Windows instances. Defaults to 20 minutes. - Example value: "10m" +- `windows_password_timeout` (string) - The timeout for waiting for a Windows + password for Windows instances. Defaults to 20 minutes. Example value: "10m" ## Basic Example Here is a basic example. It is completely valid except for the access keys: -```javascript +``` {.javascript} { "type": "amazon-instance", "access_key": "YOUR KEY HERE", @@ -254,84 +260,79 @@ Here is a basic example. It is completely valid except for the access keys: } ``` --> **Note:** Packer can also read the access key and secret -access key from environmental variables. See the configuration reference in -the section above for more information on what environmental variables Packer -will look for. +-> **Note:** Packer can also read the access key and secret access key from +environmental variables. See the configuration reference in the section above +for more information on what environmental variables Packer will look for. ## Accessing the Instance to Debug If you need to access the instance to debug for some reason, run the builder -with the `-debug` flag. In debug mode, the Amazon builder will save the -private key in the current directory and will output the DNS or IP information -as well. You can use this information to access the instance as it is -running. +with the `-debug` flag. In debug mode, the Amazon builder will save the private +key in the current directory and will output the DNS or IP information as well. +You can use this information to access the instance as it is running. ## Custom Bundle Commands -A lot of the process required for creating an instance-store backed AMI -involves commands being run on the actual source instance. Specifically, the -`ec2-bundle-vol` and `ec2-upload-bundle` commands must be used to bundle -the root filesystem and upload it, respectively. +A lot of the process required for creating an instance-store backed AMI involves +commands being run on the actual source instance. Specifically, the +`ec2-bundle-vol` and `ec2-upload-bundle` commands must be used to bundle the +root filesystem and upload it, respectively. Each of these commands have a lot of available flags. Instead of exposing each -possible flag as a template configuration option, the instance-store AMI -builder for Packer lets you customize the entire command used to bundle -and upload the AMI. +possible flag as a template configuration option, the instance-store AMI builder +for Packer lets you customize the entire command used to bundle and upload the +AMI. -These are configured with `bundle_vol_command` and `bundle_upload_command`. -Both of these configurations are -[configuration templates](/docs/templates/configuration-templates.html) -and have support for their own set of template variables. +These are configured with `bundle_vol_command` and `bundle_upload_command`. Both +of these configurations are [configuration +templates](/docs/templates/configuration-templates.html) and have support for +their own set of template variables. ### Bundle Volume Command -The default value for `bundle_vol_command` is shown below. It is split -across multiple lines for convenience of reading. The bundle volume command -is responsible for executing `ec2-bundle-vol` in order to store and image -of the root filesystem to use to create the AMI. +The default value for `bundle_vol_command` is shown below. It is split across +multiple lines for convenience of reading. The bundle volume command is +responsible for executing `ec2-bundle-vol` in order to store and image of the +root filesystem to use to create the AMI. -```text +``` {.text} sudo -i -n ec2-bundle-vol \ - -k {{.KeyPath}} \ - -u {{.AccountId}} \ - -c {{.CertPath}} \ - -r {{.Architecture}} \ - -e {{.PrivatePath}}/* \ - -d {{.Destination}} \ - -p {{.Prefix}} \ - --batch \ - --no-filter + -k {{.KeyPath}} \ + -u {{.AccountId}} \ + -c {{.CertPath}} \ + -r {{.Architecture}} \ + -e {{.PrivatePath}}/* \ + -d {{.Destination}} \ + -p {{.Prefix}} \ + --batch \ + --no-filter ``` The available template variables should be self-explanatory based on the parameters they're used to satisfy the `ec2-bundle-vol` command. -~> **Warning!** Some versions of ec2-bundle-vol silently ignore all .pem and +\~> **Warning!** Some versions of ec2-bundle-vol silently ignore all .pem and .gpg files during the bundling of the AMI, which can cause problems on some -systems, such as Ubuntu. You may want to customize the bundle volume command -to include those files (see the `--no-filter` option of ec2-bundle-vol). +systems, such as Ubuntu. You may want to customize the bundle volume command to +include those files (see the `--no-filter` option of ec2-bundle-vol). ### Bundle Upload Command -The default value for `bundle_upload_command` is shown below. It is split -across multiple lines for convenience of reading. The bundle upload command -is responsible for taking the bundled volume and uploading it to S3. +The default value for `bundle_upload_command` is shown below. It is split across +multiple lines for convenience of reading. The bundle upload command is +responsible for taking the bundled volume and uploading it to S3. -```text +``` {.text} sudo -i -n ec2-upload-bundle \ - -b {{.BucketName}} \ - -m {{.ManifestPath}} \ - -a {{.AccessKey}} \ - -s {{.SecretKey}} \ - -d {{.BundleDirectory}} \ - --batch \ - --region {{.Region}} \ - --retry + -b {{.BucketName}} \ + -m {{.ManifestPath}} \ + -a {{.AccessKey}} \ + -s {{.SecretKey}} \ + -d {{.BundleDirectory}} \ + --batch \ + --region {{.Region}} \ + --retry ``` The available template variables should be self-explanatory based on the parameters they're used to satisfy the `ec2-upload-bundle` command. - -[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html -[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index ad336ad1c..69b4e509b 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -1,44 +1,47 @@ --- -layout: "docs" -page_title: "Amazon AMI Builder" -description: |- - Packer is able to create Amazon AMIs. To achieve this, Packer comes with multiple builders depending on the strategy you want to use to build the AMI. ---- +description: | + Packer is able to create Amazon AMIs. To achieve this, Packer comes with + multiple builders depending on the strategy you want to use to build the AMI. +layout: docs +page_title: Amazon AMI Builder +... # Amazon AMI Builder Packer is able to create Amazon AMIs. To achieve this, Packer comes with -multiple builders depending on the strategy you want to use to build the -AMI. Packer supports the following builders at the moment: +multiple builders depending on the strategy you want to use to build the AMI. +Packer supports the following builders at the moment: -* [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs - by launching a source AMI and re-packaging it into a new AMI after - provisioning. If in doubt, use this builder, which is the easiest to get - started with. +- [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs by + launching a source AMI and re-packaging it into a new AMI after provisioning. + If in doubt, use this builder, which is the easiest to get started with. -* [amazon-instance](/docs/builders/amazon-instance.html) - Create - instance-store AMIs by launching and provisioning a source instance, then - rebundling it and uploading it to S3. +- [amazon-instance](/docs/builders/amazon-instance.html) - Create instance-store + AMIs by launching and provisioning a source instance, then rebundling it and + uploading it to S3. -* [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs +- [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs from an existing EC2 instance by mounting the root device and using a [Chroot](http://en.wikipedia.org/wiki/Chroot) environment to provision that device. This is an **advanced builder and should not be used by - newcomers**. However, it is also the fastest way to build an EBS-backed - AMI since no new EC2 instance needs to be launched. + newcomers**. However, it is also the fastest way to build an EBS-backed AMI + since no new EC2 instance needs to be launched. --> **Don't know which builder to use?** If in doubt, use the -[amazon-ebs builder](/docs/builders/amazon-ebs.html). It is -much easier to use and Amazon generally recommends EBS-backed images nowadays. +-> **Don't know which builder to use?** If in doubt, use the [amazon-ebs +builder](/docs/builders/amazon-ebs.html). It is much easier to use and Amazon +generally recommends EBS-backed images nowadays. ## Using an IAM Instance Profile -If AWS keys are not specified in the template, a [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file or through environment variables -Packer will use credentials provided by the instance's IAM profile, if it has one. +If AWS keys are not specified in the template, a +[credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) +file or through environment variables Packer will use credentials provided by +the instance's IAM profile, if it has one. -The following policy document provides the minimal set permissions necessary for Packer to work: +The following policy document provides the minimal set permissions necessary for +Packer to work: -```javascript +``` {.javascript} { "Statement": [{ "Effect": "Allow", diff --git a/website/source/docs/builders/custom.html.markdown b/website/source/docs/builders/custom.html.markdown index a737c1bd1..dc6928d4b 100644 --- a/website/source/docs/builders/custom.html.markdown +++ b/website/source/docs/builders/custom.html.markdown @@ -1,13 +1,15 @@ --- -layout: "docs" -page_title: "Custom Builder" -description: |- - Packer is extensible, allowing you to write new builders without having to modify the core source code of Packer itself. Documentation for creating new builders is covered in the custom builders page of the Packer plugin section. ---- +description: | + Packer is extensible, allowing you to write new builders without having to + modify the core source code of Packer itself. Documentation for creating new + builders is covered in the custom builders page of the Packer plugin section. +layout: docs +page_title: Custom Builder +... # Custom Builder Packer is extensible, allowing you to write new builders without having to -modify the core source code of Packer itself. Documentation for creating -new builders is covered in the [custom builders](/docs/extend/builder.html) -page of the Packer plugin section. +modify the core source code of Packer itself. Documentation for creating new +builders is covered in the [custom builders](/docs/extend/builder.html) page of +the Packer plugin section. diff --git a/website/source/docs/builders/digitalocean.html.markdown b/website/source/docs/builders/digitalocean.html.markdown index c9ef3b315..b20523944 100644 --- a/website/source/docs/builders/digitalocean.html.markdown +++ b/website/source/docs/builders/digitalocean.html.markdown @@ -1,22 +1,26 @@ --- -layout: "docs" -page_title: "DigitalOcean Builder" -description: |- - The `digitalocean` Packer builder is able to create new images for use with DigitalOcean. The builder takes a source image, runs any provisioning necessary on the image after launching it, then snapshots it into a reusable image. This reusable image can then be used as the foundation of new servers that are launched within DigitalOcean. ---- +description: | + The `digitalocean` Packer builder is able to create new images for use with + DigitalOcean. The builder takes a source image, runs any provisioning necessary + on the image after launching it, then snapshots it into a reusable image. This + reusable image can then be used as the foundation of new servers that are + launched within DigitalOcean. +layout: docs +page_title: DigitalOcean Builder +... # DigitalOcean Builder Type: `digitalocean` The `digitalocean` Packer builder is able to create new images for use with -[DigitalOcean](http://www.digitalocean.com). The builder takes a source -image, runs any provisioning necessary on the image after launching it, -then snapshots it into a reusable image. This reusable image can then be -used as the foundation of new servers that are launched within DigitalOcean. +[DigitalOcean](http://www.digitalocean.com). The builder takes a source image, +runs any provisioning necessary on the image after launching it, then snapshots +it into a reusable image. This reusable image can then be used as the foundation +of new servers that are launched within DigitalOcean. -The builder does _not_ manage images. Once it creates an image, it is up to -you to use it or delete it. +The builder does *not* manage images. Once it creates an image, it is up to you +to use it or delete it. ## Configuration Reference @@ -25,50 +29,53 @@ segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `api_token` (string) - The client TOKEN to use to access your account. - It can also be specified via environment variable `DIGITALOCEAN_API_TOKEN`, if set. +- `api_token` (string) - The client TOKEN to use to access your account. It can + also be specified via environment variable `DIGITALOCEAN_API_TOKEN`, if set. -* `image` (string) - The name (or slug) of the base image to use. This is the - image that will be used to launch a new droplet and provision it. - See https://developers.digitalocean.com/documentation/v2/#list-all-images for details on how to get a list of the the accepted image names/slugs. +- `image` (string) - The name (or slug) of the base image to use. This is the + image that will be used to launch a new droplet and provision it. See + https://developers.digitalocean.com/documentation/v2/\#list-all-images for + details on how to get a list of the the accepted image names/slugs. -* `region` (string) - The name (or slug) of the region to launch the droplet in. - Consequently, this is the region where the snapshot will be available. - See https://developers.digitalocean.com/documentation/v2/#list-all-regions for the accepted region names/slugs. +- `region` (string) - The name (or slug) of the region to launch the droplet in. + Consequently, this is the region where the snapshot will be available. See + https://developers.digitalocean.com/documentation/v2/\#list-all-regions for + the accepted region names/slugs. -* `size` (string) - The name (or slug) of the droplet size to use. - See https://developers.digitalocean.com/documentation/v2/#list-all-sizes for the accepted size names/slugs. +- `size` (string) - The name (or slug) of the droplet size to use. See + https://developers.digitalocean.com/documentation/v2/\#list-all-sizes for the + accepted size names/slugs. ### Optional: -* `droplet_name` (string) - The name assigned to the droplet. DigitalOcean - sets the hostname of the machine to this value. +- `droplet_name` (string) - The name assigned to the droplet. DigitalOcean sets + the hostname of the machine to this value. -* `private_networking` (boolean) - Set to `true` to enable private networking +- `private_networking` (boolean) - Set to `true` to enable private networking for the droplet being created. This defaults to `false`, or not enabled. -* `snapshot_name` (string) - The name of the resulting snapshot that will - appear in your account. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `snapshot_name` (string) - The name of the resulting snapshot that will appear + in your account. This must be unique. To help make this unique, use a function + like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `state_timeout` (string) - The time to wait, as a duration string, - for a droplet to enter a desired state (such as "active") before - timing out. The default state timeout is "6m". +- `state_timeout` (string) - The time to wait, as a duration string, for a + droplet to enter a desired state (such as "active") before timing out. The + default state timeout is "6m". -* `user_data` (string) - User data to launch with the Droplet. +- `user_data` (string) - User data to launch with the Droplet. ## Basic Example -Here is a basic example. It is completely valid as soon as you enter your -own access tokens: +Here is a basic example. It is completely valid as soon as you enter your own +access tokens: -```javascript +``` {.javascript} { "type": "digitalocean", "api_token": "YOUR API KEY", diff --git a/website/source/docs/builders/docker.html.markdown b/website/source/docs/builders/docker.html.markdown index b5fe95075..b2fab5b19 100644 --- a/website/source/docs/builders/docker.html.markdown +++ b/website/source/docs/builders/docker.html.markdown @@ -1,39 +1,40 @@ --- -layout: "docs" -page_title: "Docker Builder" -description: |- - The `docker` Packer builder builds Docker images using Docker. The builder starts a Docker container, runs provisioners within this container, then exports the container for reuse or commits the image. ---- +description: | + The `docker` Packer builder builds Docker images using Docker. The builder + starts a Docker container, runs provisioners within this container, then exports + the container for reuse or commits the image. +layout: docs +page_title: Docker Builder +... # Docker Builder Type: `docker` The `docker` Packer builder builds [Docker](http://www.docker.io) images using -Docker. The builder starts a Docker container, runs provisioners within -this container, then exports the container for reuse or commits the image. +Docker. The builder starts a Docker container, runs provisioners within this +container, then exports the container for reuse or commits the image. -Packer builds Docker containers _without_ the use of -[Dockerfiles](https://docs.docker.com/reference/builder/). -By not using Dockerfiles, Packer is able to provision -containers with portable scripts or configuration management systems -that are not tied to Docker in any way. It also has a simpler mental model: -you provision containers much the same way you provision a normal virtualized -or dedicated server. For more information, read the section on -[Dockerfiles](#toc_8). +Packer builds Docker containers *without* the use of +[Dockerfiles](https://docs.docker.com/reference/builder/). By not using +Dockerfiles, Packer is able to provision containers with portable scripts or +configuration management systems that are not tied to Docker in any way. It also +has a simpler mental model: you provision containers much the same way you +provision a normal virtualized or dedicated server. For more information, read +the section on [Dockerfiles](#toc_8). The Docker builder must run on a machine that has Docker installed. Therefore the builder only works on machines that support Docker (modern Linux machines). -If you want to use Packer to build Docker containers on another platform, -use [Vagrant](http://www.vagrantup.com) to start a Linux environment, then -run Packer within that environment. +If you want to use Packer to build Docker containers on another platform, use +[Vagrant](http://www.vagrantup.com) to start a Linux environment, then run +Packer within that environment. ## Basic Example: Export -Below is a fully functioning example. It doesn't do anything useful, since -no provisioners are defined, but it will effectively repackage an image. +Below is a fully functioning example. It doesn't do anything useful, since no +provisioners are defined, but it will effectively repackage an image. -```javascript +``` {.javascript} { "type": "docker", "image": "ubuntu", @@ -43,11 +44,11 @@ no provisioners are defined, but it will effectively repackage an image. ## Basic Example: Commit -Below is another example, the same as above but instead of exporting the -running container, this one commits the container to an image. The image -can then be more easily tagged, pushed, etc. +Below is another example, the same as above but instead of exporting the running +container, this one commits the container to an image. The image can then be +more easily tagged, pushed, etc. -```javascript +``` {.javascript} { "type": "docker", "image": "ubuntu", @@ -55,7 +56,6 @@ can then be more easily tagged, pushed, etc. } ``` - ## Configuration Reference Configuration options are organized below into two categories: required and @@ -63,47 +63,47 @@ optional. Within each category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `commit` (boolean) - If true, the container will be committed to an - image rather than exported. This cannot be set if `export_path` is set. +- `commit` (boolean) - If true, the container will be committed to an image + rather than exported. This cannot be set if `export_path` is set. -* `export_path` (string) - The path where the final container will be exported +- `export_path` (string) - The path where the final container will be exported as a tar file. This cannot be set if `commit` is set to true. -* `image` (string) - The base image for the Docker container that will - be started. This image will be pulled from the Docker registry if it - doesn't already exist. +- `image` (string) - The base image for the Docker container that will + be started. This image will be pulled from the Docker registry if it doesn't + already exist. ### Optional: -* `login` (boolean) - Defaults to false. If true, the builder will - login in order to pull the image. The builder only logs in for the - duration of the pull. It always logs out afterwards. +- `login` (boolean) - Defaults to false. If true, the builder will login in + order to pull the image. The builder only logs in for the duration of + the pull. It always logs out afterwards. -* `login_email` (string) - The email to use to authenticate to login. +- `login_email` (string) - The email to use to authenticate to login. -* `login_username` (string) - The username to use to authenticate to login. +- `login_username` (string) - The username to use to authenticate to login. -* `login_password` (string) - The password to use to authenticate to login. +- `login_password` (string) - The password to use to authenticate to login. -* `login_server` (string) - The server address to login to. +- `login_server` (string) - The server address to login to. -* `pull` (boolean) - If true, the configured image will be pulled using - `docker pull` prior to use. Otherwise, it is assumed the image already - exists and can be used. This defaults to true if not set. +- `pull` (boolean) - If true, the configured image will be pulled using + `docker pull` prior to use. Otherwise, it is assumed the image already exists + and can be used. This defaults to true if not set. -* `run_command` (array of strings) - An array of arguments to pass to +- `run_command` (array of strings) - An array of arguments to pass to `docker run` in order to run the container. By default this is set to - `["-d", "-i", "-t", "{{.Image}}", "/bin/bash"]`. - As you can see, you have a couple template variables to customize, as well. + `["-d", "-i", "-t", "{{.Image}}", "/bin/bash"]`. As you can see, you have a + couple template variables to customize, as well. -* `volumes` (map of strings to strings) - A mapping of additional volumes - to mount into this container. The key of the object is the host path, - the value is the container path. +- `volumes` (map of strings to strings) - A mapping of additional volumes to + mount into this container. The key of the object is the host path, the value + is the container path. ## Using the Artifact: Export @@ -113,27 +113,26 @@ with the [docker-import](/docs/post-processors/docker-import.html) and [docker-push](/docs/post-processors/docker-push.html) post-processors. **Note:** This section is covering how to use an artifact that has been -_exported_. More specifically, if you set `export_path` in your configuration. +*exported*. More specifically, if you set `export_path` in your configuration. If you set `commit`, see the next section. -The example below shows a full configuration that would import and push -the created image. This is accomplished using a sequence definition (a -collection of post-processors that are treated as as single pipeline, see -[Post-Processors](/docs/templates/post-processors.html) -for more information): +The example below shows a full configuration that would import and push the +created image. This is accomplished using a sequence definition (a collection of +post-processors that are treated as as single pipeline, see +[Post-Processors](/docs/templates/post-processors.html) for more information): -```javascript +``` {.javascript} { "post-processors": [ - [ - { - "type": "docker-import", - "repository": "mitchellh/packer", - "tag": "0.7" - }, - "docker-push" - ] - ] + [ + { + "type": "docker-import", + "repository": "mitchellh/packer", + "tag": "0.7" + }, + "docker-push" + ] + ] } ``` @@ -143,10 +142,10 @@ post-processor which will import the artifact as a docker image. The resulting docker image is then passed on to the `docker-push` post-processor which handles pushing the image to a container repository. -If you want to do this manually, however, perhaps from a script, you can -import the image using the process below: +If you want to do this manually, however, perhaps from a script, you can import +the image using the process below: -```text +``` {.text} $ docker import - registry.mydomain.com/mycontainer:latest < artifact.tar ``` @@ -157,23 +156,22 @@ and `docker push`, respectively. If you committed your container to an image, you probably want to tag, save, push, etc. Packer can do this automatically for you. An example is shown below -which tags and pushes an image. This is accomplished using a sequence -definition (a collection of post-processors that are treated as as single -pipeline, see [Post-Processors](/docs/templates/post-processors.html) for more -information): +which tags and pushes an image. This is accomplished using a sequence definition +(a collection of post-processors that are treated as as single pipeline, see +[Post-Processors](/docs/templates/post-processors.html) for more information): -```javascript +``` {.javascript} { "post-processors": [ - [ - { - "type": "docker-tag", - "repository": "mitchellh/packer", - "tag": "0.7" - }, - "docker-push" - ] - ] + [ + { + "type": "docker-tag", + "repository": "mitchellh/packer", + "tag": "0.7" + }, + "docker-push" + ] + ] } ``` @@ -187,52 +185,52 @@ Going a step further, if you wanted to tag and push an image to multiple container repositories, this could be accomplished by defining two, nearly-identical sequence definitions, as demonstrated by the example below: -```javascript +``` {.javascript} { - "post-processors": [ - [ - { - "type": "docker-tag", - "repository": "mitchellh/packer", - "tag": "0.7" - }, - "docker-push" - ], - [ - { - "type": "docker-tag", - "repository": "hashicorp/packer", - "tag": "0.7" - }, - "docker-push" - ] - ] + "post-processors": [ + [ + { + "type": "docker-tag", + "repository": "mitchellh/packer", + "tag": "0.7" + }, + "docker-push" + ], + [ + { + "type": "docker-tag", + "repository": "hashicorp/packer", + "tag": "0.7" + }, + "docker-push" + ] + ] } ``` ## Dockerfiles -This builder allows you to build Docker images _without_ Dockerfiles. +This builder allows you to build Docker images *without* Dockerfiles. -With this builder, you can repeatably create Docker images without the use of -a Dockerfile. You don't need to know the syntax or semantics of Dockerfiles. +With this builder, you can repeatably create Docker images without the use of a +Dockerfile. You don't need to know the syntax or semantics of Dockerfiles. Instead, you can just provide shell scripts, Chef recipes, Puppet manifests, etc. to provision your Docker container just like you would a regular virtualized or dedicated machine. -While Docker has many features, Packer views Docker simply as an LXC -container runner. To that end, Packer is able to repeatably build these -LXC containers using portable provisioning scripts. +While Docker has many features, Packer views Docker simply as an LXC container +runner. To that end, Packer is able to repeatably build these LXC containers +using portable provisioning scripts. -Dockerfiles have some additional features that Packer doesn't support -which are able to be worked around. Many of these features will be automated -by Packer in the future: +Dockerfiles have some additional features that Packer doesn't support which are +able to be worked around. Many of these features will be automated by Packer in +the future: -* Dockerfiles will snapshot the container at each step, allowing you to - go back to any step in the history of building. Packer doesn't do this yet, - but inter-step snapshotting is on the way. +- Dockerfiles will snapshot the container at each step, allowing you to go back + to any step in the history of building. Packer doesn't do this yet, but + inter-step snapshotting is on the way. -* Dockerfiles can contain information such as exposed ports, shared - volumes, and other metadata. Packer builds a raw Docker container image - that has none of this metadata. You can pass in much of this metadata - at runtime with `docker run`. +- Dockerfiles can contain information such as exposed ports, shared volumes, and + other metadata. Packer builds a raw Docker container image that has none of + this metadata. You can pass in much of this metadata at runtime with + `docker run`. diff --git a/website/source/docs/builders/null.html.markdown b/website/source/docs/builders/null.html.markdown index 7398cadd7..037165ba2 100644 --- a/website/source/docs/builders/null.html.markdown +++ b/website/source/docs/builders/null.html.markdown @@ -1,24 +1,28 @@ --- -layout: "docs" -page_title: "Null Builder" -description: |- - The `null` Packer builder is not really a builder, it just sets up an SSH connection and runs the provisioners. It can be used to debug provisioners without incurring high wait times. It does not create any kind of image or artifact. ---- +description: | + The `null` Packer builder is not really a builder, it just sets up an SSH + connection and runs the provisioners. It can be used to debug provisioners + without incurring high wait times. It does not create any kind of image or + artifact. +layout: docs +page_title: Null Builder +... # Null Builder Type: `null` -The `null` Packer builder is not really a builder, it just sets up an SSH connection -and runs the provisioners. It can be used to debug provisioners without -incurring high wait times. It does not create any kind of image or artifact. +The `null` Packer builder is not really a builder, it just sets up an SSH +connection and runs the provisioners. It can be used to debug provisioners +without incurring high wait times. It does not create any kind of image or +artifact. ## Basic Example -Below is a fully functioning example. It doesn't do anything useful, since -no provisioners are defined, but it will connect to the specified host via ssh. +Below is a fully functioning example. It doesn't do anything useful, since no +provisioners are defined, but it will connect to the specified host via ssh. -```javascript +``` {.javascript} { "type": "null", "ssh_host": "127.0.0.1", @@ -31,4 +35,3 @@ no provisioners are defined, but it will connect to the specified host via ssh. The null builder has no configuration parameters other than the [communicator](/docs/templates/communicator.html) settings. - diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index fec1a85a6..409275c7b 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -1,25 +1,30 @@ --- -layout: "docs" -page_title: "OpenStack Builder" -description: |- - The `openstack` Packer builder is able to create new images for use with OpenStack. The builder takes a source image, runs any provisioning necessary on the image after launching it, then creates a new reusable image. This reusable image can then be used as the foundation of new servers that are launched within OpenStack. The builder will create temporary keypairs that provide temporary access to the server while the image is being created. This simplifies configuration quite a bit. ---- +description: | + The `openstack` Packer builder is able to create new images for use with + OpenStack. The builder takes a source image, runs any provisioning necessary on + the image after launching it, then creates a new reusable image. This reusable + image can then be used as the foundation of new servers that are launched within + OpenStack. The builder will create temporary keypairs that provide temporary + access to the server while the image is being created. This simplifies + configuration quite a bit. +layout: docs +page_title: OpenStack Builder +... # OpenStack Builder Type: `openstack` The `openstack` Packer builder is able to create new images for use with -[OpenStack](http://www.openstack.org). The builder takes a source -image, runs any provisioning necessary on the image after launching it, -then creates a new reusable image. This reusable image can then be -used as the foundation of new servers that are launched within OpenStack. -The builder will create temporary keypairs that provide temporary access to -the server while the image is being created. This simplifies configuration -quite a bit. +[OpenStack](http://www.openstack.org). The builder takes a source image, runs +any provisioning necessary on the image after launching it, then creates a new +reusable image. This reusable image can then be used as the foundation of new +servers that are launched within OpenStack. The builder will create temporary +keypairs that provide temporary access to the server while the image is being +created. This simplifies configuration quite a bit. -The builder does _not_ manage images. Once it creates an image, it is up to -you to use it or delete it. +The builder does *not* manage images. Once it creates an image, it is up to you +to use it or delete it. ## Configuration Reference @@ -28,81 +33,79 @@ segmented below into two categories: required and optional parameters. Within each category, the available configuration keys are alphabetized. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `flavor` (string) - The ID, name, or full URL for the desired flavor for the +- `flavor` (string) - The ID, name, or full URL for the desired flavor for the server to be created. -* `image_name` (string) - The name of the resulting image. +- `image_name` (string) - The name of the resulting image. -* `source_image` (string) - The ID or full URL to the base image to use. - This is the image that will be used to launch a new server and provision it. - Unless you specify completely custom SSH settings, the source image must - have `cloud-init` installed so that the keypair gets assigned properly. +- `source_image` (string) - The ID or full URL to the base image to use. This is + the image that will be used to launch a new server and provision it. Unless + you specify completely custom SSH settings, the source image must have + `cloud-init` installed so that the keypair gets assigned properly. -* `username` (string) - The username used to connect to the OpenStack service. - If not specified, Packer will use the environment variable - `OS_USERNAME`, if set. +- `username` (string) - The username used to connect to the OpenStack service. + If not specified, Packer will use the environment variable `OS_USERNAME`, + if set. -* `password` (string) - The password used to connect to the OpenStack service. - If not specified, Packer will use the environment variables - `OS_PASSWORD`, if set. +- `password` (string) - The password used to connect to the OpenStack service. + If not specified, Packer will use the environment variables `OS_PASSWORD`, + if set. ### Optional: -* `api_key` (string) - The API key used to access OpenStack. Some OpenStack +- `api_key` (string) - The API key used to access OpenStack. Some OpenStack installations require this. -* `availability_zone` (string) - The availability zone to launch the - server in. If this isn't specified, the default enforced by your OpenStack - cluster will be used. This may be required for some OpenStack clusters. +- `availability_zone` (string) - The availability zone to launch the server in. + If this isn't specified, the default enforced by your OpenStack cluster will + be used. This may be required for some OpenStack clusters. -* `floating_ip` (string) - A specific floating IP to assign to this instance. +- `floating_ip` (string) - A specific floating IP to assign to this instance. `use_floating_ip` must also be set to true for this to have an affect. -* `floating_ip_pool` (string) - The name of the floating IP pool to use - to allocate a floating IP. `use_floating_ip` must also be set to true - for this to have an affect. +- `floating_ip_pool` (string) - The name of the floating IP pool to use to + allocate a floating IP. `use_floating_ip` must also be set to true for this to + have an affect. -* `insecure` (boolean) - Whether or not the connection to OpenStack can be done +- `insecure` (boolean) - Whether or not the connection to OpenStack can be done over an insecure connection. By default this is false. -* `networks` (array of strings) - A list of networks by UUID to attach - to this instance. +- `networks` (array of strings) - A list of networks by UUID to attach to + this instance. -* `tenant_id` or `tenant_name` (string) - The tenant ID or name to boot the - instance into. Some OpenStack installations require this. - If not specified, Packer will use the environment variable - `OS_TENANT_NAME`, if set. +- `tenant_id` or `tenant_name` (string) - The tenant ID or name to boot the + instance into. Some OpenStack installations require this. If not specified, + Packer will use the environment variable `OS_TENANT_NAME`, if set. -* `security_groups` (array of strings) - A list of security groups by name - to add to this instance. +- `security_groups` (array of strings) - A list of security groups by name to + add to this instance. -* `region` (string) - The name of the region, such as "DFW", in which - to launch the server to create the AMI. - If not specified, Packer will use the environment variable - `OS_REGION_NAME`, if set. +- `region` (string) - The name of the region, such as "DFW", in which to launch + the server to create the AMI. If not specified, Packer will use the + environment variable `OS_REGION_NAME`, if set. -* `ssh_interface` (string) - The type of interface to connect via SSH. Values - useful for Rackspace are "public" or "private", and the default behavior is - to connect via whichever is returned first from the OpenStack API. +- `ssh_interface` (string) - The type of interface to connect via SSH. Values + useful for Rackspace are "public" or "private", and the default behavior is to + connect via whichever is returned first from the OpenStack API. -* `use_floating_ip` (boolean) - Whether or not to use a floating IP for +- `use_floating_ip` (boolean) - Whether or not to use a floating IP for the instance. Defaults to false. -* `rackconnect_wait` (boolean) - For rackspace, whether or not to wait for +- `rackconnect_wait` (boolean) - For rackspace, whether or not to wait for Rackconnect to assign the machine an IP address before connecting via SSH. Defaults to false. ## Basic Example: Rackspace public cloud -Here is a basic example. This is a working example to build a -Ubuntu 12.04 LTS (Precise Pangolin) on Rackspace OpenStack cloud offering. +Here is a basic example. This is a working example to build a Ubuntu 12.04 LTS +(Precise Pangolin) on Rackspace OpenStack cloud offering. -```javascript +``` {.javascript} { "type": "openstack", "username": "foo", @@ -117,10 +120,10 @@ Ubuntu 12.04 LTS (Precise Pangolin) on Rackspace OpenStack cloud offering. ## Basic Example: Private OpenStack cloud -This example builds an Ubuntu 14.04 image on a private OpenStack cloud, -powered by Metacloud. +This example builds an Ubuntu 14.04 image on a private OpenStack cloud, powered +by Metacloud. -```javascript +``` {.javascript} { "type": "openstack", "ssh_username": "root", @@ -130,12 +133,12 @@ powered by Metacloud. } ``` -In this case, the connection information for connecting to OpenStack -doesn't appear in the template. That is because I source a standard -OpenStack script with environment variables set before I run this. This -script is setting environment variables like: +In this case, the connection information for connecting to OpenStack doesn't +appear in the template. That is because I source a standard OpenStack script +with environment variables set before I run this. This script is setting +environment variables like: -* `OS_AUTH_URL` -* `OS_TENANT_ID` -* `OS_USERNAME` -* `OS_PASSWORD` +- `OS_AUTH_URL` +- `OS_TENANT_ID` +- `OS_USERNAME` +- `OS_PASSWORD` diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index f0192b301..d89b5394f 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -1,31 +1,31 @@ --- -layout: "docs" -page_title: "Parallels Builder (from an ISO)" -description: |- - The Parallels Packer builder is able to create Parallels Desktop for Mac virtual machines and export them in the PVM format, starting from an ISO image. ---- +description: | + The Parallels Packer builder is able to create Parallels Desktop for Mac virtual + machines and export them in the PVM format, starting from an ISO image. +layout: docs +page_title: 'Parallels Builder (from an ISO)' +... # Parallels Builder (from an ISO) Type: `parallels-iso` -The Parallels Packer builder is able to create -[Parallels Desktop for Mac](http://www.parallels.com/products/desktop/) virtual -machines and export them in the PVM format, starting from an -ISO image. +The Parallels Packer builder is able to create [Parallels Desktop for +Mac](http://www.parallels.com/products/desktop/) virtual machines and export +them in the PVM format, starting from an ISO image. -The builder builds a virtual machine by creating a new virtual machine -from scratch, booting it, installing an OS, provisioning software within -the OS, then shutting it down. The result of the Parallels builder is a directory -containing all the files necessary to run the virtual machine portably. +The builder builds a virtual machine by creating a new virtual machine from +scratch, booting it, installing an OS, provisioning software within the OS, then +shutting it down. The result of the Parallels builder is a directory containing +all the files necessary to run the virtual machine portably. ## Basic Example -Here is a basic example. This example is not functional. It will start the -OS installer but then fail because we don't provide the preseed file for -Ubuntu to self-install. Still, the example serves to show the basic configuration: +Here is a basic example. This example is not functional. It will start the OS +installer but then fail because we don't provide the preseed file for Ubuntu to +self-install. Still, the example serves to show the basic configuration: -```javascript +``` {.javascript} { "type": "parallels-iso", "guest_os_type": "ubuntu", @@ -40,219 +40,219 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio } ``` -It is important to add a `shutdown_command`. By default Packer halts the -virtual machine and the file system may not be sync'd. Thus, changes made in a +It is important to add a `shutdown_command`. By default Packer halts the virtual +machine and the file system may not be sync'd. Thus, changes made in a provisioner might not be saved. ## Configuration Reference -There are many configuration options available for the Parallels builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the Parallels builder. They +are organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - files are so large, this is required and Packer will verify it prior - to booting a virtual machine with the ISO attached. The type of the - checksum is specified with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files + are so large, this is required and Packer will verify it prior to booting a + virtual machine with the ISO attached. The type of the checksum is specified + with `iso_checksum_type`, documented below. -* `iso_checksum_type` (string) - The type of the checksum specified in +- `iso_checksum_type` (string) - The type of the checksum specified in `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or "sha512" currently. While "none" will skip checksumming, this is not recommended since ISO files are generally large and corruption does happen from time to time. -* `iso_url` (string) - A URL to the ISO containing the installation image. - This URL can be either an HTTP URL or a file URL (or path to a file). - If this is an HTTP URL, Packer will download it and cache it between - runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. This + URL can be either an HTTP URL or a file URL (or path to a file). If this is an + HTTP URL, Packer will download it and cache it between runs. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. -* `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to +- `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to install into the VM. Valid values are "win", "lin", "mac", "os2" and "other". This can be omitted only if `parallels_tools_mode` is "disable". ### Optional: -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `disk_size` (integer) - The size, in megabytes, of the hard disk to create - for the VM. By default, this is 40000 (about 40 GB). +- `disk_size` (integer) - The size, in megabytes, of the hard disk to create for + the VM. By default, this is 40000 (about 40 GB). -* `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file - on removable media. By default, no floppy will be attached. All files - listed in this setting get placed into the root directory of the floppy - and the floppy is attached as the first floppy device. Currently, no - support exists for creating sub-directories on the floppy. Wildcard - characters (*, ?, and []) are allowed. Directory names are also allowed, - which will add all the files found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) + are allowed. Directory names are also allowed, which will add all the files + found in the directory to the floppy. -* `guest_os_type` (string) - The guest OS type being installed. By default - this is "other", but you can get _dramatic_ performance improvements by - setting this to the proper value. To view all available values for this - run `prlctl create x --distribution list`. Setting the correct value hints to - Parallels Desktop how to optimize the virtual hardware to work best with - that operating system. +- `guest_os_type` (string) - The guest OS type being installed. By default this + is "other", but you can get *dramatic* performance improvements by setting + this to the proper value. To view all available values for this run + `prlctl create x --distribution list`. Setting the correct value hints to + Parallels Desktop how to optimize the virtual hardware to work best with that + operating system. -* `hard_drive_interface` (string) - The type of controller that the - hard drives are attached to, defaults to "sata". Valid options are - "sata", "ide", and "scsi". +- `hard_drive_interface` (string) - The type of controller that the hard drives + are attached to, defaults to "sata". Valid options are "sata", "ide", + and "scsi". -* `host_interfaces` (array of strings) - A list of which interfaces on the - host should be searched for a IP address. The first IP address found on - one of these will be used as `{{ .HTTPIP }}` in the `boot_command`. - Defaults to ["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", - "en9", "ppp0", "ppp1", "ppp2"]. +- `host_interfaces` (array of strings) - A list of which interfaces on the host + should be searched for a IP address. The first IP address found on one of + these will be used as `{{ .HTTPIP }}` in the `boot_command`. Defaults to + \["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9", + "ppp0", "ppp1", "ppp2"\]. -* `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `http_directory` (string) - Path to a directory to serve using an HTTP server. + The files in this directory will be available over HTTP that will be + requestable from the virtual machine. This is useful for hosting kickstart + files and so on. By default this is "", which means no HTTP server will + be started. The address and port of the HTTP server will be available as + variables in `boot_command`. This is covered in more detail below. -* `http_port_min` and `http_port_max` (integer) - These are the minimum and +- `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the `http_directory`. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. - By default the values are 8000 and 9000, respectively. + server to be on one port, make this minimum and maximum port the same. By + default the values are 8000 and 9000, respectively. -* `iso_urls` (array of strings) - Multiple URLs for the ISO to download. - Packer will try these in order. If anything goes wrong attempting to download - or while downloading a single URL, it will move on to the next. All URLs - must point to the same file (same checksum). By default this is empty - and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer + will try these in order. If anything goes wrong attempting to download or + while downloading a single URL, it will move on to the next. All URLs must + point to the same file (same checksum). By default this is empty and `iso_url` + is used. Only one of `iso_url` or `iso_urls` can be specified. -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `parallels_tools_guest_path` (string) - The path in the virtual machine to upload - Parallels Tools. This only takes effect if `parallels_tools_mode` is "upload". - This is a [configuration template](/docs/templates/configuration-templates.html) - that has a single valid variable: `Flavor`, which will be the value of - `parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso" which - should upload into the login directory of the user. +- `parallels_tools_guest_path` (string) - The path in the virtual machine to + upload Parallels Tools. This only takes effect if `parallels_tools_mode` + is "upload". This is a [configuration + template](/docs/templates/configuration-templates.html) that has a single + valid variable: `Flavor`, which will be the value of `parallels_tools_flavor`. + By default this is "prl-tools-{{.Flavor}}.iso" which should upload into the + login directory of the user. -* `parallels_tools_mode` (string) - The method by which Parallels Tools are made +- `parallels_tools_mode` (string) - The method by which Parallels Tools are made available to the guest for installation. Valid options are "upload", "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will be attached as a CD device to the virtual machine. If the mode is "upload" the Parallels Tools ISO will be uploaded to the path specified by `parallels_tools_guest_path`. The default value is "upload". -* `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in +- `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in order to further customize the virtual machine being created. The value of - this is an array of commands to execute. The commands are executed in the order - defined in the template. For each command, the command is defined itself as an - array of strings, where each string represents a single argument on the + this is an array of commands to execute. The commands are executed in the + order defined in the template. For each command, the command is defined itself + as an array of strings, where each string represents a single argument on the command-line to `prlctl` (but excluding `prlctl` itself). Each arg is treated as a [configuration template](/docs/templates/configuration-templates.html), - where the `Name` variable is replaced with the VM name. More details on how - to use `prlctl` are below. + where the `Name` variable is replaced with the VM name. More details on how to + use `prlctl` are below. -* `prlctl_post` (array of array of strings) - Identical to `prlctl`, - except that it is run after the virtual machine is shutdown, and before the - virtual machine is exported. +- `prlctl_post` (array of array of strings) - Identical to `prlctl`, except that + it is run after the virtual machine is shutdown, and before the virtual + machine is exported. -* `prlctl_version_file` (string) - The path within the virtual machine to upload +- `prlctl_version_file` (string) - The path within the virtual machine to upload a file that contains the `prlctl` version that was used to create the machine. This information can be useful for provisioning. By default this is - ".prlctl_version", which will generally upload it into the home directory. + ".prlctl\_version", which will generally upload it into the home directory. -* `shutdown_command` (string) - The command to use to gracefully shut down - the machine once all the provisioning is done. By default this is an empty - string, which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `vm_name` (string) - This is the name of the PVM directory for the new - virtual machine, without the file extension. By default this is - "packer-BUILDNAME", where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the PVM directory for the new virtual + machine, without the file extension. By default this is "packer-BUILDNAME", + where "BUILDNAME" is the name of the build. ## Boot Command -The `boot_command` configuration is very important: it specifies the keys -to type when the virtual machine is first booted in order to start the -OS installer. This command is typed after `boot_wait`, which gives the -virtual machine some time to actually load the ISO. +The `boot_command` configuration is very important: it specifies the keys to +type when the virtual machine is first booted in order to start the OS +installer. This command is typed after `boot_wait`, which gives the virtual +machine some time to actually load the ISO. -As documented above, the `boot_command` is an array of strings. The -strings are all typed in sequence. It is an array only to improve readability -within the template. +As documented above, the `boot_command` is an array of strings. The strings are +all typed in sequence. It is an array only to improve readability within the +template. The boot command is "typed" character for character (using the Parallels Virtualization SDK, see [Parallels Builder](/docs/builders/parallels.html)) -simulating a human actually typing the keyboard. There are a set of special -keys available. If these are in your boot command, they will be replaced by -the proper key: +simulating a human actually typing the keyboard. There are a set of special keys +available. If these are in your boot command, they will be replaced by the +proper key: -* `` - Backspace +- `` - Backspace -* `` - Delete +- `` - Delete -* `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -* `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -* `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -* `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -* `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -* `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -* `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -* `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -* `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -* `` `` `` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - is useful if you have to generally wait for the UI to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before sending + any additional keys. This is useful if you have to generally wait for the UI + to update before typing more. In addition to the special keys, each command to type is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The available variables are: +[configuration template](/docs/templates/configuration-templates.html). The +available variables are: -* `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server - that is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will - be blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that + is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! -Example boot command. This is actually a working boot command used to start -an Ubuntu 12.04 installer: +Example boot command. This is actually a working boot command used to start an +Ubuntu 12.04 installer: -```text +``` {.text} [ "", "/install/vmlinuz noapic ", @@ -267,17 +267,18 @@ an Ubuntu 12.04 installer: ``` ## prlctl Commands + In order to perform extra customization of the virtual machine, a template can define extra calls to `prlctl` to perform. [prlctl](http://download.parallels.com/desktop/v9/ga/docs/en_US/Parallels%20Command%20Line%20Reference%20Guide.pdf) is the command-line interface to Parallels Desktop. It can be used to configure the virtual machine, such as set RAM, CPUs, etc. -Extra `prlctl` commands are defined in the template in the `prlctl` section. -An example is shown below that sets the memory and number of CPUs within the +Extra `prlctl` commands are defined in the template in the `prlctl` section. An +example is shown below that sets the memory and number of CPUs within the virtual machine: -```javascript +``` {.javascript} { "prlctl": [ ["set", "{{.Name}}", "--memsize", "1024"], @@ -291,7 +292,7 @@ executed in the order defined. So in the above example, the memory will be set followed by the CPUs. Each command itself is an array of strings, where each string is an argument to -`prlctl`. Each argument is treated as a -[configuration template](/docs/templates/configuration-templates.html). The only -available variable is `Name` which is replaced with the unique name of the VM, -which is required for many `prlctl` calls. +`prlctl`. Each argument is treated as a [configuration +template](/docs/templates/configuration-templates.html). The only available +variable is `Name` which is replaced with the unique name of the VM, which is +required for many `prlctl` calls. diff --git a/website/source/docs/builders/parallels-pvm.html.markdown b/website/source/docs/builders/parallels-pvm.html.markdown index 4083a57fd..f4f9f352c 100644 --- a/website/source/docs/builders/parallels-pvm.html.markdown +++ b/website/source/docs/builders/parallels-pvm.html.markdown @@ -1,30 +1,31 @@ --- -layout: "docs" -page_title: "Parallels Builder (from a PVM)" -description: |- - This Parallels builder is able to create Parallels Desktop for Mac virtual machines and export them in the PVM format, starting from an existing PVM (exported virtual machine image). ---- +description: | + This Parallels builder is able to create Parallels Desktop for Mac virtual + machines and export them in the PVM format, starting from an existing PVM + (exported virtual machine image). +layout: docs +page_title: 'Parallels Builder (from a PVM)' +... # Parallels Builder (from a PVM) Type: `parallels-pvm` -This Parallels builder is able to create -[Parallels Desktop for Mac](http://www.parallels.com/products/desktop/) -virtual machines and export them in the PVM format, starting from an -existing PVM (exported virtual machine image). +This Parallels builder is able to create [Parallels Desktop for +Mac](http://www.parallels.com/products/desktop/) virtual machines and export +them in the PVM format, starting from an existing PVM (exported virtual machine +image). -The builder builds a virtual machine by importing an existing PVM -file. It then boots this image, runs provisioners on this new VM, and -exports that VM to create the image. The imported machine is deleted prior -to finishing the build. +The builder builds a virtual machine by importing an existing PVM file. It then +boots this image, runs provisioners on this new VM, and exports that VM to +create the image. The imported machine is deleted prior to finishing the build. ## Basic Example Here is a basic example. This example is functional if you have an PVM matching the settings here. -```javascript +``` {.javascript} { "type": "parallels-pvm", "parallels_tools_flavor": "lin", @@ -36,175 +37,180 @@ the settings here. } ``` -It is important to add a `shutdown_command`. By default Packer halts the -virtual machine and the file system may not be sync'd. Thus, changes made in a +It is important to add a `shutdown_command`. By default Packer halts the virtual +machine and the file system may not be sync'd. Thus, changes made in a provisioner might not be saved. ## Configuration Reference -There are many configuration options available for the Parallels builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the Parallels builder. They +are organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `source_path` (string) - The path to a PVM directory that acts as - the source of this build. +- `source_path` (string) - The path to a PVM directory that acts as the source + of this build. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. -* `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to +- `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to install into the VM. Valid values are "win", "lin", "mac", "os2" and "other". This can be omitted only if `parallels_tools_mode` is "disable". ### Optional: -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `floppy_files` (array of strings) - A list of files to put onto a floppy - disk that is attached when the VM is booted for the first time. This is - most useful for unattended Windows installs, which look for an - `Autounattend.xml` file on removable media. By default no floppy will - be attached. The files listed in this configuration will all be put - into the root directory of the floppy disk; sub-directories are not supported. +- `floppy_files` (array of strings) - A list of files to put onto a floppy disk + that is attached when the VM is booted for the first time. This is most useful + for unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default no floppy will be attached. The files listed in + this configuration will all be put into the root directory of the floppy disk; + sub-directories are not supported. -* `reassign_mac` (boolean) - If this is "false" the MAC address of the first - NIC will reused when imported else a new MAC address will be generated by - Parallels. Defaults to "false". +- `reassign_mac` (boolean) - If this is "false" the MAC address of the first NIC + will reused when imported else a new MAC address will be generated + by Parallels. Defaults to "false". -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `parallels_tools_guest_path` (string) - The path in the VM to upload +- `parallels_tools_guest_path` (string) - The path in the VM to upload Parallels Tools. This only takes effect if `parallels_tools_mode` is "upload". - This is a [configuration template](/docs/templates/configuration-templates.html) - that has a single valid variable: `Flavor`, which will be the value of - `parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso" which - should upload into the login directory of the user. + This is a [configuration + template](/docs/templates/configuration-templates.html) that has a single + valid variable: `Flavor`, which will be the value of `parallels_tools_flavor`. + By default this is "prl-tools-{{.Flavor}}.iso" which should upload into the + login directory of the user. -* `parallels_tools_mode` (string) - The method by which Parallels Tools are made +- `parallels_tools_mode` (string) - The method by which Parallels Tools are made available to the guest for installation. Valid options are "upload", "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will be attached as a CD device to the virtual machine. If the mode is "upload" the Parallels Tools ISO will be uploaded to the path specified by `parallels_tools_guest_path`. The default value is "upload". -* `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in +- `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in order to further customize the virtual machine being created. The value of - this is an array of commands to execute. The commands are executed in the order - defined in the template. For each command, the command is defined itself as an - array of strings, where each string represents a single argument on the + this is an array of commands to execute. The commands are executed in the + order defined in the template. For each command, the command is defined itself + as an array of strings, where each string represents a single argument on the command-line to `prlctl` (but excluding `prlctl` itself). Each arg is treated as a [configuration template](/docs/templates/configuration-templates.html), - where the `Name` variable is replaced with the VM name. More details on how - to use `prlctl` are below. + where the `Name` variable is replaced with the VM name. More details on how to + use `prlctl` are below. -* `prlctl_post` (array of array of strings) - Identical to `prlctl`, - except that it is run after the virtual machine is shutdown, and before the - virtual machine is exported. +- `prlctl_post` (array of array of strings) - Identical to `prlctl`, except that + it is run after the virtual machine is shutdown, and before the virtual + machine is exported. -* `prlctl_version_file` (string) - The path within the virtual machine to upload +- `prlctl_version_file` (string) - The path within the virtual machine to upload a file that contains the `prlctl` version that was used to create the machine. This information can be useful for provisioning. By default this is - ".prlctl_version", which will generally upload it into the home directory. + ".prlctl\_version", which will generally upload it into the home directory. -* `shutdown_command` (string) - The command to use to gracefully shut down - the machine once all the provisioning is done. By default this is an empty - string, which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `vm_name` (string) - This is the name of the virtual machine when it is - imported as well as the name of the PVM directory when the virtual machine is - exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is - the name of the build. +- `vm_name` (string) - This is the name of the virtual machine when it is + imported as well as the name of the PVM directory when the virtual machine + is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the + name of the build. ## Parallels Tools + After the virtual machine is up and the operating system is installed, Packer uploads the Parallels Tools into the virtual machine. The path where they are uploaded is controllable by `parallels_tools_path`, and defaults to "prl-tools.iso". Without an absolute path, it is uploaded to the home directory -of the SSH user. Parallels Tools ISO's can be found in: -"/Applications/Parallels Desktop.app/Contents/Resources/Tools/" +of the SSH user. Parallels Tools ISO's can be found in: "/Applications/Parallels +Desktop.app/Contents/Resources/Tools/" ## Boot Command -The `boot_command` specifies the keys to type when the virtual machine is first booted. This command is typed after `boot_wait`. +The `boot_command` specifies the keys to type when the virtual machine is first +booted. This command is typed after `boot_wait`. -As documented above, the `boot_command` is an array of strings. The -strings are all typed in sequence. It is an array only to improve readability -within the template. +As documented above, the `boot_command` is an array of strings. The strings are +all typed in sequence. It is an array only to improve readability within the +template. The boot command is "typed" character for character (using the Parallels Virtualization SDK, see [Parallels Builder](/docs/builders/parallels.html)) -simulating a human actually typing the keyboard. There are a set of special -keys available. If these are in your boot command, they will be replaced by -the proper key: +simulating a human actually typing the keyboard. There are a set of special keys +available. If these are in your boot command, they will be replaced by the +proper key: -* `` - Backspace +- `` - Backspace -* `` - Delete +- `` - Delete -* `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -* `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -* `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -* `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -* `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -* `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -* `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -* `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -* `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -* `` `` `` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - is useful if you have to generally wait for the UI to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before sending + any additional keys. This is useful if you have to generally wait for the UI + to update before typing more. In addition to the special keys, each command to type is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The available variables are: +[configuration template](/docs/templates/configuration-templates.html). The +available variables are: ## prlctl Commands + In order to perform extra customization of the virtual machine, a template can define extra calls to `prlctl` to perform. [prlctl](http://download.parallels.com/desktop/v9/ga/docs/en_US/Parallels%20Command%20Line%20Reference%20Guide.pdf) is the command-line interface to Parallels Desktop. It can be used to configure the virtual machine, such as set RAM, CPUs, etc. -Extra `prlctl` commands are defined in the template in the `prlctl` section. -An example is shown below that sets the memory and number of CPUs within the +Extra `prlctl` commands are defined in the template in the `prlctl` section. An +example is shown below that sets the memory and number of CPUs within the virtual machine: -```javascript +``` {.javascript} { "prlctl": [ ["set", "{{.Name}}", "--memsize", "1024"], @@ -218,7 +224,7 @@ executed in the order defined. So in the above example, the memory will be set followed by the CPUs. Each command itself is an array of strings, where each string is an argument to -`prlctl`. Each argument is treated as a -[configuration template](/docs/templates/configuration-templates.html). The only -available variable is `Name` which is replaced with the unique name of the VM, -which is required for many `prlctl` calls. +`prlctl`. Each argument is treated as a [configuration +template](/docs/templates/configuration-templates.html). The only available +variable is `Name` which is replaced with the unique name of the VM, which is +required for many `prlctl` calls. diff --git a/website/source/docs/builders/parallels.html.markdown b/website/source/docs/builders/parallels.html.markdown index db5f62139..7d355eaef 100644 --- a/website/source/docs/builders/parallels.html.markdown +++ b/website/source/docs/builders/parallels.html.markdown @@ -1,34 +1,37 @@ --- -layout: "docs" -page_title: "Parallels Builder" -description: |- - The Parallels Packer builder is able to create Parallels Desktop for Mac virtual machines and export them in the PVM format. ---- +description: | + The Parallels Packer builder is able to create Parallels Desktop for Mac virtual + machines and export them in the PVM format. +layout: docs +page_title: Parallels Builder +... # Parallels Builder -The Parallels Packer builder is able to create [Parallels Desktop for Mac](http://www.parallels.com/products/desktop/) virtual machines and export them in the PVM format. +The Parallels Packer builder is able to create [Parallels Desktop for +Mac](http://www.parallels.com/products/desktop/) virtual machines and export +them in the PVM format. -Packer actually comes with multiple builders able to create Parallels -machines, depending on the strategy you want to use to build the image. -Packer supports the following Parallels builders: +Packer actually comes with multiple builders able to create Parallels machines, +depending on the strategy you want to use to build the image. Packer supports +the following Parallels builders: -* [parallels-iso](/docs/builders/parallels-iso.html) - Starts from - an ISO file, creates a brand new Parallels VM, installs an OS, - provisions software within the OS, then exports that machine to create - an image. This is best for people who want to start from scratch. - -* [parallels-pvm](/docs/builders/parallels-pvm.html) - This builder - imports an existing PVM file, runs provisioners on top of that VM, - and exports that machine to create an image. This is best if you have - an existing Parallels VM export you want to use as the source. As an - additional benefit, you can feed the artifact of this builder back into - itself to iterate on a machine. +- [parallels-iso](/docs/builders/parallels-iso.html) - Starts from an ISO file, + creates a brand new Parallels VM, installs an OS, provisions software within + the OS, then exports that machine to create an image. This is best for people + who want to start from scratch. +- [parallels-pvm](/docs/builders/parallels-pvm.html) - This builder imports an + existing PVM file, runs provisioners on top of that VM, and exports that + machine to create an image. This is best if you have an existing Parallels VM + export you want to use as the source. As an additional benefit, you can feed + the artifact of this builder back into itself to iterate on a machine. ## Requirements -In addition to [Parallels Desktop for Mac](http://www.parallels.com/products/desktop/) this requires the -[Parallels Virtualization SDK](http://www.parallels.com/downloads/desktop/). +In addition to [Parallels Desktop for +Mac](http://www.parallels.com/products/desktop/) this requires the [Parallels +Virtualization SDK](http://www.parallels.com/downloads/desktop/). -The SDK can be installed by downloading and following the instructions in the dmg. +The SDK can be installed by downloading and following the instructions in the +dmg. diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index ce39c53ec..57c53e4c0 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -1,30 +1,31 @@ --- -layout: "docs" -page_title: "QEMU Builder" -description: |- - The Qemu Packer builder is able to create KVM and Xen virtual machine images. Support for Xen is experimental at this time. ---- +description: | + The Qemu Packer builder is able to create KVM and Xen virtual machine images. + Support for Xen is experimental at this time. +layout: docs +page_title: QEMU Builder +... # QEMU Builder Type: `qemu` -The Qemu Packer builder is able to create [KVM](http://www.linux-kvm.org) -and [Xen](http://www.xenproject.org) virtual machine images. Support -for Xen is experimental at this time. +The Qemu Packer builder is able to create [KVM](http://www.linux-kvm.org) and +[Xen](http://www.xenproject.org) virtual machine images. Support for Xen is +experimental at this time. -The builder builds a virtual machine by creating a new virtual machine -from scratch, booting it, installing an OS, rebooting the machine with the -boot media as the virtual hard drive, provisioning software within -the OS, then shutting it down. The result of the Qemu builder is a directory -containing the image file necessary to run the virtual machine on KVM or Xen. +The builder builds a virtual machine by creating a new virtual machine from +scratch, booting it, installing an OS, rebooting the machine with the boot media +as the virtual hard drive, provisioning software within the OS, then shutting it +down. The result of the Qemu builder is a directory containing the image file +necessary to run the virtual machine on KVM or Xen. ## Basic Example -Here is a basic example. This example is functional so long as you fixup -paths to files, URLS for ISOs and checksums. +Here is a basic example. This example is functional so long as you fixup paths +to files, URLS for ISOs and checksums. -```javascript +``` {.javascript} { "builders": [ @@ -62,153 +63,153 @@ paths to files, URLS for ISOs and checksums. } ``` -A working CentOS 6.x kickstart file can be found -[at this URL](https://gist.github.com/mitchellh/7328271/#file-centos6-ks-cfg), adapted from an unknown source. -Place this file in the http directory with the proper name. For the -example above, it should go into "httpdir" with a name of "centos6-ks.cfg". +A working CentOS 6.x kickstart file can be found [at this +URL](https://gist.github.com/mitchellh/7328271/#file-centos6-ks-cfg), adapted +from an unknown source. Place this file in the http directory with the proper +name. For the example above, it should go into "httpdir" with a name of +"centos6-ks.cfg". ## Configuration Reference -There are many configuration options available for the Qemu builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the Qemu builder. They are +organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - files are so large, this is required and Packer will verify it prior - to booting a virtual machine with the ISO attached. The type of the - checksum is specified with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files + are so large, this is required and Packer will verify it prior to booting a + virtual machine with the ISO attached. The type of the checksum is specified + with `iso_checksum_type`, documented below. -* `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum`. Valid values are "md5", "sha1", "sha256", or "sha512" currently. +- `iso_checksum_type` (string) - The type of the checksum specified in + `iso_checksum`. Valid values are "md5", "sha1", "sha256", or + "sha512" currently. -* `iso_url` (string) - A URL to the ISO containing the installation image. - This URL can be either an HTTP URL or a file URL (or path to a file). - If this is an HTTP URL, Packer will download it and cache it between - runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. This + URL can be either an HTTP URL or a file URL (or path to a file). If this is an + HTTP URL, Packer will download it and cache it between runs. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. ### Optional: -* `accelerator` (string) - The accelerator type to use when running the VM. - This may have a value of either "none", "kvm", "tcg", or "xen" and you must have that - support in on the machine on which you run the builder. By default "kvm" +- `accelerator` (string) - The accelerator type to use when running the VM. This + may have a value of either "none", "kvm", "tcg", or "xen" and you must have + that support in on the machine on which you run the builder. By default "kvm" is used. -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `disk_cache` (string) - The cache mode to use for disk. Allowed values - include any of "writethrough", "writeback", "none", "unsafe" or - "directsync". By default, this is set to "writeback". +- `disk_cache` (string) - The cache mode to use for disk. Allowed values include + any of "writethrough", "writeback", "none", "unsafe" or "directsync". By + default, this is set to "writeback". -* `disk_discard` (string) - The discard mode to use for disk. Allowed values - include any of "unmap" or "ignore". By default, this is set to "ignore". +- `disk_discard` (string) - The discard mode to use for disk. Allowed values + include any of "unmap" or "ignore". By default, this is set to "ignore". -* `disk_image` (boolean) - Packer defaults to building from an ISO file, - this parameter controls whether the ISO URL supplied is actually a bootable - QEMU image. When this value is set to true, the machine will clone the - source, resize it according to `disk_size` and boot the image. +- `disk_image` (boolean) - Packer defaults to building from an ISO file, this + parameter controls whether the ISO URL supplied is actually a bootable + QEMU image. When this value is set to true, the machine will clone the source, + resize it according to `disk_size` and boot the image. -* `disk_interface` (string) - The interface to use for the disk. Allowed - values include any of "ide," "scsi" or "virtio." Note also that any boot - commands or kickstart type scripts must have proper adjustments for - resulting device names. The Qemu builder uses "virtio" by default. +- `disk_interface` (string) - The interface to use for the disk. Allowed values + include any of "ide," "scsi" or "virtio." Note also that any boot commands or + kickstart type scripts must have proper adjustments for resulting + device names. The Qemu builder uses "virtio" by default. -* `disk_size` (integer) - The size, in megabytes, of the hard disk to create - for the VM. By default, this is 40000 (about 40 GB). +- `disk_size` (integer) - The size, in megabytes, of the hard disk to create for + the VM. By default, this is 40000 (about 40 GB). -* `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file - on removable media. By default, no floppy will be attached. All files - listed in this setting get placed into the root directory of the floppy - and the floppy is attached as the first floppy device. Currently, no - support exists for creating sub-directories on the floppy. Wildcard - characters (*, ?, and []) are allowed. Directory names are also allowed, - which will add all the files found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) + are allowed. Directory names are also allowed, which will add all the files + found in the directory to the floppy. -* `format` (string) - Either "qcow2" or "raw", this specifies the output - format of the virtual machine image. This defaults to "qcow2". +- `format` (string) - Either "qcow2" or "raw", this specifies the output format + of the virtual machine image. This defaults to "qcow2". -* `headless` (boolean) - Packer defaults to building QEMU virtual machines by - launching a GUI that shows the console of the machine being built. - When this value is set to true, the machine will start without a console. +- `headless` (boolean) - Packer defaults to building QEMU virtual machines by + launching a GUI that shows the console of the machine being built. When this + value is set to true, the machine will start without a console. -* `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `http_directory` (string) - Path to a directory to serve using an HTTP server. + The files in this directory will be available over HTTP that will be + requestable from the virtual machine. This is useful for hosting kickstart + files and so on. By default this is "", which means no HTTP server will + be started. The address and port of the HTTP server will be available as + variables in `boot_command`. This is covered in more detail below. -* `http_port_min` and `http_port_max` (integer) - These are the minimum and +- `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the `http_directory`. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. - By default the values are 8000 and 9000, respectively. + server to be on one port, make this minimum and maximum port the same. By + default the values are 8000 and 9000, respectively. -* `iso_urls` (array of strings) - Multiple URLs for the ISO to download. - Packer will try these in order. If anything goes wrong attempting to download - or while downloading a single URL, it will move on to the next. All URLs - must point to the same file (same checksum). By default this is empty - and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer + will try these in order. If anything goes wrong attempting to download or + while downloading a single URL, it will move on to the next. All URLs must + point to the same file (same checksum). By default this is empty and `iso_url` + is used. Only one of `iso_url` or `iso_urls` can be specified. -* `machine_type` (string) - The type of machine emulation to use. Run - your qemu binary with the flags `-machine help` to list available types - for your system. This defaults to "pc". +- `machine_type` (string) - The type of machine emulation to use. Run your qemu + binary with the flags `-machine help` to list available types for your system. + This defaults to "pc". -* `net_device` (string) - The driver to use for the network interface. Allowed - values "ne2k_pci," "i82551," "i82557b," "i82559er," "rtl8139," "e1000," +- `net_device` (string) - The driver to use for the network interface. Allowed + values "ne2k\_pci," "i82551," "i82557b," "i82559er," "rtl8139," "e1000," "pcnet" or "virtio." The Qemu builder uses "virtio" by default. -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `qemu_binary` (string) - The name of the Qemu binary to look for. This - defaults to "qemu-system-x86_64", but may need to be changed for some - platforms. For example "qemu-kvm", or "qemu-system-i386" may be a better +- `qemu_binary` (string) - The name of the Qemu binary to look for. This + defaults to "qemu-system-x86\_64", but may need to be changed for + some platforms. For example "qemu-kvm", or "qemu-system-i386" may be a better choice for some systems. -* `qemuargs` (array of array of strings) - Allows complete control over - the qemu command line (though not, at this time, qemu-img). Each array - of strings makes up a command line switch that overrides matching default - switch/value pairs. Any value specified as an empty string is ignored. - All values after the switch are concatenated with no separator. +- `qemuargs` (array of array of strings) - Allows complete control over the qemu + command line (though not, at this time, qemu-img). Each array of strings makes + up a command line switch that overrides matching default switch/value pairs. + Any value specified as an empty string is ignored. All values after the switch + are concatenated with no separator. -~> **Warning:** The qemu command line allows extreme flexibility, so beware of -conflicting arguments causing failures of your run. For instance, using +\~> **Warning:** The qemu command line allows extreme flexibility, so beware +of conflicting arguments causing failures of your run. For instance, using --no-acpi could break the ability to send power signal type commands (e.g., -shutdown -P now) to the virtual machine, thus preventing proper shutdown. To -see the defaults, look in the packer.log file and search for the -qemu-system-x86 command. The arguments are all printed for review. +shutdown -P now) to the virtual machine, thus preventing proper shutdown. To see +the defaults, look in the packer.log file and search for the qemu-system-x86 +command. The arguments are all printed for review. The following shows a sample usage: -```javascript +``` {.javascript} // ... "qemuargs": [ [ "-m", "1024M" ], @@ -227,88 +228,87 @@ qemu-system-x86 command. The arguments are all printed for review. would produce the following (not including other defaults supplied by the builder and not otherwise conflicting with the qemuargs):
    -	qemu-system-x86 -m 1024m --no-acpi -netdev user,id=mynet0,hostfwd=hostip:hostport-guestip:guestport -device virtio-net,netdev=mynet0"
    +  qemu-system-x86 -m 1024m --no-acpi -netdev user,id=mynet0,hostfwd=hostip:hostport-guestip:guestport -device virtio-net,netdev=mynet0"
     
    +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine. -* `shutdown_command` (string) - The command to use to gracefully shut down - the machine once all the provisioning is done. By default this is an empty - string, which tells Packer to just forcefully shut down the machine. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. - -* `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and - maximum port to use for the SSH port on the host machine which is forwarded - to the SSH port on the guest machine. Because Packer often runs in parallel, +- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and + maximum port to use for the SSH port on the host machine which is forwarded to + the SSH port on the guest machine. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to use as the host port. -* `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for - the new virtual machine, without the file extension. By default this is +- `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for the + new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. -* `vnc_port_min` and `vnc_port_max` (integer) - The minimum and - maximum port to use for the VNC port on the host machine which is forwarded - to the VNC port on the guest machine. Because Packer often runs in parallel, - Packer will choose a randomly available port in this range to use as the - host port. +- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to + use for the VNC port on the host machine which is forwarded to the VNC port on + the guest machine. Because Packer often runs in parallel, Packer will choose a + randomly available port in this range to use as the host port. ## Boot Command -The `boot_command` configuration is very important: it specifies the keys -to type when the virtual machine is first booted in order to start the -OS installer. This command is typed after `boot_wait`, which gives the -virtual machine some time to actually load the ISO. +The `boot_command` configuration is very important: it specifies the keys to +type when the virtual machine is first booted in order to start the OS +installer. This command is typed after `boot_wait`, which gives the virtual +machine some time to actually load the ISO. -As documented above, the `boot_command` is an array of strings. The -strings are all typed in sequence. It is an array only to improve readability -within the template. +As documented above, the `boot_command` is an array of strings. The strings are +all typed in sequence. It is an array only to improve readability within the +template. -The boot command is "typed" character for character over a VNC connection -to the machine, simulating a human actually typing the keyboard. There are -a set of special keys available. If these are in your boot command, they -will be replaced by the proper key: +The boot command is "typed" character for character over a VNC connection to the +machine, simulating a human actually typing the keyboard. There are a set of +special keys available. If these are in your boot command, they will be replaced +by the proper key: -* `` - Backspace +- `` - Backspace -* `` - Delete +- `` - Delete -* `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -* `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -* `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -* `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -* `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -* `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -* `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -* `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -* `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -* `` `` `` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - is useful if you have to generally wait for the UI to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before sending + any additional keys. This is useful if you have to generally wait for the UI + to update before typing more. In addition to the special keys, each command to type is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The available variables are: +[configuration template](/docs/templates/configuration-templates.html). The +available variables are: -* `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server - that is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will - be blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that + is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! -Example boot command. This is actually a working boot command used to start -an CentOS 6.4 installer: +Example boot command. This is actually a working boot command used to start an +CentOS 6.4 installer: -```javascript +``` {.javascript} "boot_command": [ "", diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index 97ba056f8..bdccdf768 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -1,30 +1,31 @@ --- -layout: "docs" -page_title: "VirtualBox Builder (from an ISO)" -description: |- - The VirtualBox Packer builder is able to create VirtualBox virtual machines and export them in the OVF format, starting from an ISO image. ---- +description: | + The VirtualBox Packer builder is able to create VirtualBox virtual machines and + export them in the OVF format, starting from an ISO image. +layout: docs +page_title: 'VirtualBox Builder (from an ISO)' +... # VirtualBox Builder (from an ISO) Type: `virtualbox-iso` -The VirtualBox Packer builder is able to create [VirtualBox](https://www.virtualbox.org/) -virtual machines and export them in the OVF format, starting from an -ISO image. +The VirtualBox Packer builder is able to create +[VirtualBox](https://www.virtualbox.org/) virtual machines and export them in +the OVF format, starting from an ISO image. -The builder builds a virtual machine by creating a new virtual machine -from scratch, booting it, installing an OS, provisioning software within -the OS, then shutting it down. The result of the VirtualBox builder is a directory -containing all the files necessary to run the virtual machine portably. +The builder builds a virtual machine by creating a new virtual machine from +scratch, booting it, installing an OS, provisioning software within the OS, then +shutting it down. The result of the VirtualBox builder is a directory containing +all the files necessary to run the virtual machine portably. ## Basic Example -Here is a basic example. This example is not functional. It will start the -OS installer but then fail because we don't provide the preseed file for -Ubuntu to self-install. Still, the example serves to show the basic configuration: +Here is a basic example. This example is not functional. It will start the OS +installer but then fail because we don't provide the preseed file for Ubuntu to +self-install. Still, the example serves to show the basic configuration: -```javascript +``` {.javascript} { "type": "virtualbox-iso", "guest_os_type": "Ubuntu_64", @@ -37,250 +38,249 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio } ``` -It is important to add a `shutdown_command`. By default Packer halts the -virtual machine and the file system may not be sync'd. Thus, changes made in a +It is important to add a `shutdown_command`. By default Packer halts the virtual +machine and the file system may not be sync'd. Thus, changes made in a provisioner might not be saved. ## Configuration Reference -There are many configuration options available for the VirtualBox builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the VirtualBox builder. They +are organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - files are so large, this is required and Packer will verify it prior - to booting a virtual machine with the ISO attached. The type of the - checksum is specified with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files + are so large, this is required and Packer will verify it prior to booting a + virtual machine with the ISO attached. The type of the checksum is specified + with `iso_checksum_type`, documented below. -* `iso_checksum_type` (string) - The type of the checksum specified in +- `iso_checksum_type` (string) - The type of the checksum specified in `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or "sha512" currently. While "none" will skip checksumming, this is not recommended since ISO files are generally large and corruption does happen from time to time. -* `iso_url` (string) - A URL to the ISO containing the installation image. - This URL can be either an HTTP URL or a file URL (or path to a file). - If this is an HTTP URL, Packer will download it and cache it between - runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. This + URL can be either an HTTP URL or a file URL (or path to a file). If this is an + HTTP URL, Packer will download it and cache it between runs. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. ### Optional: -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `disk_size` (integer) - The size, in megabytes, of the hard disk to create - for the VM. By default, this is 40000 (about 40 GB). +- `disk_size` (integer) - The size, in megabytes, of the hard disk to create for + the VM. By default, this is 40000 (about 40 GB). -* `export_opts` (array of strings) - Additional options to pass to the `VBoxManage export`. - This can be useful for passing product information to include in the resulting - appliance file. +- `export_opts` (array of strings) - Additional options to pass to the + `VBoxManage export`. This can be useful for passing product information to + include in the resulting appliance file. -* `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file - on removable media. By default, no floppy will be attached. All files - listed in this setting get placed into the root directory of the floppy - and the floppy is attached as the first floppy device. Currently, no - support exists for creating sub-directories on the floppy. Wildcard - characters (*, ?, and []) are allowed. Directory names are also allowed, - which will add all the files found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) + are allowed. Directory names are also allowed, which will add all the files + found in the directory to the floppy. -* `format` (string) - Either "ovf" or "ova", this specifies the output - format of the exported virtual machine. This defaults to "ovf". +- `format` (string) - Either "ovf" or "ova", this specifies the output format of + the exported virtual machine. This defaults to "ovf". -* `guest_additions_mode` (string) - The method by which guest additions - are made available to the guest for installation. Valid options are - "upload", "attach", or "disable". If the mode is "attach" the guest - additions ISO will be attached as a CD device to the virtual machine. - If the mode is "upload" the guest additions ISO will be uploaded to - the path specified by `guest_additions_path`. The default value is - "upload". If "disable" is used, guest additions won't be downloaded, - either. +- `guest_additions_mode` (string) - The method by which guest additions are made + available to the guest for installation. Valid options are "upload", "attach", + or "disable". If the mode is "attach" the guest additions ISO will be attached + as a CD device to the virtual machine. If the mode is "upload" the guest + additions ISO will be uploaded to the path specified by + `guest_additions_path`. The default value is "upload". If "disable" is used, + guest additions won't be downloaded, either. -* `guest_additions_path` (string) - The path on the guest virtual machine - where the VirtualBox guest additions ISO will be uploaded. By default this - is "VBoxGuestAdditions.iso" which should upload into the login directory - of the user. This is a [configuration template](/docs/templates/configuration-templates.html) - where the `Version` variable is replaced with the VirtualBox version. +- `guest_additions_path` (string) - The path on the guest virtual machine where + the VirtualBox guest additions ISO will be uploaded. By default this is + "VBoxGuestAdditions.iso" which should upload into the login directory of + the user. This is a [configuration + template](/docs/templates/configuration-templates.html) where the `Version` + variable is replaced with the VirtualBox version. -* `guest_additions_sha256` (string) - The SHA256 checksum of the guest - additions ISO that will be uploaded to the guest VM. By default the - checksums will be downloaded from the VirtualBox website, so this only - needs to be set if you want to be explicit about the checksum. +- `guest_additions_sha256` (string) - The SHA256 checksum of the guest additions + ISO that will be uploaded to the guest VM. By default the checksums will be + downloaded from the VirtualBox website, so this only needs to be set if you + want to be explicit about the checksum. -* `guest_additions_url` (string) - The URL to the guest additions ISO - to upload. This can also be a file URL if the ISO is at a local path. - By default, the VirtualBox builder will attempt to find the guest additions - ISO on the local file system. If it is not available locally, the builder - will download the proper guest additions ISO from the internet. +- `guest_additions_url` (string) - The URL to the guest additions ISO to upload. + This can also be a file URL if the ISO is at a local path. By default, the + VirtualBox builder will attempt to find the guest additions ISO on the local + file system. If it is not available locally, the builder will download the + proper guest additions ISO from the internet. -* `guest_os_type` (string) - The guest OS type being installed. By default - this is "other", but you can get _dramatic_ performance improvements by - setting this to the proper value. To view all available values for this - run `VBoxManage list ostypes`. Setting the correct value hints to VirtualBox - how to optimize the virtual hardware to work best with that operating - system. +- `guest_os_type` (string) - The guest OS type being installed. By default this + is "other", but you can get *dramatic* performance improvements by setting + this to the proper value. To view all available values for this run + `VBoxManage list ostypes`. Setting the correct value hints to VirtualBox how + to optimize the virtual hardware to work best with that operating system. -* `hard_drive_interface` (string) - The type of controller that the primary - hard drive is attached to, defaults to "ide". When set to "sata", the - drive is attached to an AHCI SATA controller. When set to "scsi", the drive - is attached to an LsiLogic SCSI controller. +- `hard_drive_interface` (string) - The type of controller that the primary hard + drive is attached to, defaults to "ide". When set to "sata", the drive is + attached to an AHCI SATA controller. When set to "scsi", the drive is attached + to an LsiLogic SCSI controller. -* `headless` (boolean) - Packer defaults to building VirtualBox - virtual machines by launching a GUI that shows the console of the - machine being built. When this value is set to true, the machine will - start without a console. +- `headless` (boolean) - Packer defaults to building VirtualBox virtual machines + by launching a GUI that shows the console of the machine being built. When + this value is set to true, the machine will start without a console. -* `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `http_directory` (string) - Path to a directory to serve using an HTTP server. + The files in this directory will be available over HTTP that will be + requestable from the virtual machine. This is useful for hosting kickstart + files and so on. By default this is "", which means no HTTP server will + be started. The address and port of the HTTP server will be available as + variables in `boot_command`. This is covered in more detail below. -* `http_port_min` and `http_port_max` (integer) - These are the minimum and +- `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the `http_directory`. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. - By default the values are 8000 and 9000, respectively. + server to be on one port, make this minimum and maximum port the same. By + default the values are 8000 and 9000, respectively. -* `iso_interface` (string) - The type of controller that the ISO is attached - to, defaults to "ide". When set to "sata", the drive is attached to an - AHCI SATA controller. +- `iso_interface` (string) - The type of controller that the ISO is attached to, + defaults to "ide". When set to "sata", the drive is attached to an AHCI + SATA controller. -* `iso_urls` (array of strings) - Multiple URLs for the ISO to download. - Packer will try these in order. If anything goes wrong attempting to download - or while downloading a single URL, it will move on to the next. All URLs - must point to the same file (same checksum). By default this is empty - and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer + will try these in order. If anything goes wrong attempting to download or + while downloading a single URL, it will move on to the next. All URLs must + point to the same file (same checksum). By default this is empty and `iso_url` + is used. Only one of `iso_url` or `iso_urls` can be specified. -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `shutdown_command` (string) - The command to use to gracefully shut down the machine once all - the provisioning is done. By default this is an empty string, which tells Packer to just - forcefully shut down the machine unless a shutdown command takes place inside script so this may - safely be omitted. If one or more scripts require a reboot it is suggested to leave this blank - since reboots may fail and specify the final shutdown command in your last script. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine unless a shutdown + command takes place inside script so this may safely be omitted. If one or + more scripts require a reboot it is suggested to leave this blank since + reboots may fail and specify the final shutdown command in your last script. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and - maximum port to use for the SSH port on the host machine which is forwarded - to the SSH port on the guest machine. Because Packer often runs in parallel, +- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and + maximum port to use for the SSH port on the host machine which is forwarded to + the SSH port on the guest machine. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to use as the host port. -* `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer does - not setup forwarded port mapping for SSH requests and uses `ssh_port` on the - host to communicate to the virtual machine +- `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer + does not setup forwarded port mapping for SSH requests and uses `ssh_port` on + the host to communicate to the virtual machine -* `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to - execute in order to further customize the virtual machine being created. - The value of this is an array of commands to execute. The commands are executed - in the order defined in the template. For each command, the command is - defined itself as an array of strings, where each string represents a single - argument on the command-line to `VBoxManage` (but excluding `VBoxManage` - itself). Each arg is treated as a [configuration template](/docs/templates/configuration-templates.html), - where the `Name` variable is replaced with the VM name. More details on how - to use `VBoxManage` are below. +- `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to + execute in order to further customize the virtual machine being created. The + value of this is an array of commands to execute. The commands are executed in + the order defined in the template. For each command, the command is defined + itself as an array of strings, where each string represents a single argument + on the command-line to `VBoxManage` (but excluding `VBoxManage` itself). Each + arg is treated as a [configuration + template](/docs/templates/configuration-templates.html), where the `Name` + variable is replaced with the VM name. More details on how to use `VBoxManage` + are below. -* `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, +- `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, except that it is run after the virtual machine is shutdown, and before the virtual machine is exported. -* `virtualbox_version_file` (string) - The path within the virtual machine - to upload a file that contains the VirtualBox version that was used to - create the machine. This information can be useful for provisioning. - By default this is ".vbox_version", which will generally be upload it into - the home directory. +- `virtualbox_version_file` (string) - The path within the virtual machine to + upload a file that contains the VirtualBox version that was used to create + the machine. This information can be useful for provisioning. By default this + is ".vbox\_version", which will generally be upload it into the + home directory. -* `vm_name` (string) - This is the name of the OVF file for the new virtual +- `vm_name` (string) - This is the name of the OVF file for the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. ## Boot Command -The `boot_command` configuration is very important: it specifies the keys -to type when the virtual machine is first booted in order to start the -OS installer. This command is typed after `boot_wait`, which gives the -virtual machine some time to actually load the ISO. +The `boot_command` configuration is very important: it specifies the keys to +type when the virtual machine is first booted in order to start the OS +installer. This command is typed after `boot_wait`, which gives the virtual +machine some time to actually load the ISO. -As documented above, the `boot_command` is an array of strings. The -strings are all typed in sequence. It is an array only to improve readability -within the template. +As documented above, the `boot_command` is an array of strings. The strings are +all typed in sequence. It is an array only to improve readability within the +template. -The boot command is "typed" character for character over a VNC connection -to the machine, simulating a human actually typing the keyboard. There are -a set of special keys available. If these are in your boot command, they -will be replaced by the proper key: +The boot command is "typed" character for character over a VNC connection to the +machine, simulating a human actually typing the keyboard. There are a set of +special keys available. If these are in your boot command, they will be replaced +by the proper key: -* `` - Backspace +- `` - Backspace -* `` - Delete +- `` - Delete -* `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -* `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -* `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -* `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -* `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -* `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -* `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -* `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -* `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -* `` `` `` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - is useful if you have to generally wait for the UI to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before sending + any additional keys. This is useful if you have to generally wait for the UI + to update before typing more. In addition to the special keys, each command to type is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The available variables are: +[configuration template](/docs/templates/configuration-templates.html). The +available variables are: -* `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server - that is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will - be blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that + is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! -Example boot command. This is actually a working boot command used to start -an Ubuntu 12.04 installer: +Example boot command. This is actually a working boot command used to start an +Ubuntu 12.04 installer: -```text +``` {.text} [ "", "/install/vmlinuz noapic ", @@ -296,31 +296,32 @@ an Ubuntu 12.04 installer: ## Guest Additions -Packer will automatically download the proper guest additions for the -version of VirtualBox that is running and upload those guest additions into -the virtual machine so that provisioners can easily install them. +Packer will automatically download the proper guest additions for the version of +VirtualBox that is running and upload those guest additions into the virtual +machine so that provisioners can easily install them. -Packer downloads the guest additions from the official VirtualBox website, -and verifies the file with the official checksums released by VirtualBox. +Packer downloads the guest additions from the official VirtualBox website, and +verifies the file with the official checksums released by VirtualBox. -After the virtual machine is up and the operating system is installed, -Packer uploads the guest additions into the virtual machine. The path where -they are uploaded is controllable by `guest_additions_path`, and defaults -to "VBoxGuestAdditions.iso". Without an absolute path, it is uploaded to the -home directory of the SSH user. +After the virtual machine is up and the operating system is installed, Packer +uploads the guest additions into the virtual machine. The path where they are +uploaded is controllable by `guest_additions_path`, and defaults to +"VBoxGuestAdditions.iso". Without an absolute path, it is uploaded to the home +directory of the SSH user. ## VBoxManage Commands -In order to perform extra customization of the virtual machine, a template -can define extra calls to `VBoxManage` to perform. [VBoxManage](http://www.virtualbox.org/manual/ch08.html) -is the command-line interface to VirtualBox where you can completely control -VirtualBox. It can be used to do things such as set RAM, CPUs, etc. +In order to perform extra customization of the virtual machine, a template can +define extra calls to `VBoxManage` to perform. +[VBoxManage](http://www.virtualbox.org/manual/ch08.html) is the command-line +interface to VirtualBox where you can completely control VirtualBox. It can be +used to do things such as set RAM, CPUs, etc. -Extra VBoxManage commands are defined in the template in the `vboxmanage` section. -An example is shown below that sets the memory and number of CPUs within the -virtual machine: +Extra VBoxManage commands are defined in the template in the `vboxmanage` +section. An example is shown below that sets the memory and number of CPUs +within the virtual machine: -```javascript +``` {.javascript} { "vboxmanage": [ ["modifyvm", "{{.Name}}", "--memory", "1024"], @@ -329,12 +330,12 @@ virtual machine: } ``` -The value of `vboxmanage` is an array of commands to execute. These commands -are executed in the order defined. So in the above example, the memory will be -set followed by the CPUs. +The value of `vboxmanage` is an array of commands to execute. These commands are +executed in the order defined. So in the above example, the memory will be set +followed by the CPUs. -Each command itself is an array of strings, where each string is an argument -to `VBoxManage`. Each argument is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The only available variable is `Name` which is replaced with the unique -name of the VM, which is required for many VBoxManage calls. +Each command itself is an array of strings, where each string is an argument to +`VBoxManage`. Each argument is treated as a [configuration +template](/docs/templates/configuration-templates.html). The only available +variable is `Name` which is replaced with the unique name of the VM, which is +required for many VBoxManage calls. diff --git a/website/source/docs/builders/virtualbox-ovf.html.markdown b/website/source/docs/builders/virtualbox-ovf.html.markdown index 0a4516d02..dcf5dbd5c 100644 --- a/website/source/docs/builders/virtualbox-ovf.html.markdown +++ b/website/source/docs/builders/virtualbox-ovf.html.markdown @@ -1,39 +1,41 @@ --- -layout: "docs" -page_title: "VirtualBox Builder (from an OVF/OVA)" -description: |- - This VirtualBox Packer builder is able to create VirtualBox virtual machines and export them in the OVF format, starting from an existing OVF/OVA (exported virtual machine image). ---- +description: | + This VirtualBox Packer builder is able to create VirtualBox virtual machines and + export them in the OVF format, starting from an existing OVF/OVA (exported + virtual machine image). +layout: docs +page_title: 'VirtualBox Builder (from an OVF/OVA)' +... # VirtualBox Builder (from an OVF/OVA) Type: `virtualbox-ovf` -This VirtualBox Packer builder is able to create [VirtualBox](https://www.virtualbox.org/) -virtual machines and export them in the OVF format, starting from an -existing OVF/OVA (exported virtual machine image). +This VirtualBox Packer builder is able to create +[VirtualBox](https://www.virtualbox.org/) virtual machines and export them in +the OVF format, starting from an existing OVF/OVA (exported virtual machine +image). -When exporting from VirtualBox make sure to choose OVF Version 2, since Version 1 is not compatible and will generate errors like this: +When exporting from VirtualBox make sure to choose OVF Version 2, since Version +1 is not compatible and will generate errors like this: -``` -==> virtualbox-ovf: Progress state: VBOX_E_FILE_ERROR -==> virtualbox-ovf: VBoxManage: error: Appliance read failed -==> virtualbox-ovf: VBoxManage: error: Error reading "source.ova": element "Section" has no "type" attribute, line 21 -==> virtualbox-ovf: VBoxManage: error: Details: code VBOX_E_FILE_ERROR (0x80bb0004), component Appliance, interface IAppliance -==> virtualbox-ovf: VBoxManage: error: Context: "int handleImportAppliance(HandlerArg*)" at line 304 of file VBoxManageAppliance.cpp -``` + ==> virtualbox-ovf: Progress state: VBOX_E_FILE_ERROR + ==> virtualbox-ovf: VBoxManage: error: Appliance read failed + ==> virtualbox-ovf: VBoxManage: error: Error reading "source.ova": element "Section" has no "type" attribute, line 21 + ==> virtualbox-ovf: VBoxManage: error: Details: code VBOX_E_FILE_ERROR (0x80bb0004), component Appliance, interface IAppliance + ==> virtualbox-ovf: VBoxManage: error: Context: "int handleImportAppliance(HandlerArg*)" at line 304 of file VBoxManageAppliance.cpp -The builder builds a virtual machine by importing an existing OVF or OVA -file. It then boots this image, runs provisioners on this new VM, and -exports that VM to create the image. The imported machine is deleted prior -to finishing the build. +The builder builds a virtual machine by importing an existing OVF or OVA file. +It then boots this image, runs provisioners on this new VM, and exports that VM +to create the image. The imported machine is deleted prior to finishing the +build. ## Basic Example Here is a basic example. This example is functional if you have an OVF matching the settings here. -```javascript +``` {.javascript} { "type": "virtualbox-ovf", "source_path": "source.ovf", @@ -43,193 +45,194 @@ the settings here. } ``` -It is important to add a `shutdown_command`. By default Packer halts the -virtual machine and the file system may not be sync'd. Thus, changes made in a +It is important to add a `shutdown_command`. By default Packer halts the virtual +machine and the file system may not be sync'd. Thus, changes made in a provisioner might not be saved. ## Configuration Reference -There are many configuration options available for the VirtualBox builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the VirtualBox builder. They +are organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `source_path` (string) - The path to an OVF or OVA file that acts as - the source of this build. +- `source_path` (string) - The path to an OVF or OVA file that acts as the + source of this build. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. ### Optional: -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `export_opts` (array of strings) - Additional options to pass to the `VBoxManage export`. - This can be useful for passing product information to include in the resulting - appliance file. +- `export_opts` (array of strings) - Additional options to pass to the + `VBoxManage export`. This can be useful for passing product information to + include in the resulting appliance file. -* `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file - on removable media. By default, no floppy will be attached. All files - listed in this setting get placed into the root directory of the floppy - and the floppy is attached as the first floppy device. Currently, no - support exists for creating sub-directories on the floppy. Wildcard - characters (*, ?, and []) are allowed. Directory names are also allowed, - which will add all the files found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) + are allowed. Directory names are also allowed, which will add all the files + found in the directory to the floppy. -* `format` (string) - Either "ovf" or "ova", this specifies the output - format of the exported virtual machine. This defaults to "ovf". +- `format` (string) - Either "ovf" or "ova", this specifies the output format of + the exported virtual machine. This defaults to "ovf". -* `guest_additions_mode` (string) - The method by which guest additions - are made available to the guest for installation. Valid options are - "upload", "attach", or "disable". If the mode is "attach" the guest - additions ISO will be attached as a CD device to the virtual machine. - If the mode is "upload" the guest additions ISO will be uploaded to - the path specified by `guest_additions_path`. The default value is - "upload". If "disable" is used, guest additions won't be downloaded, - either. +- `guest_additions_mode` (string) - The method by which guest additions are made + available to the guest for installation. Valid options are "upload", "attach", + or "disable". If the mode is "attach" the guest additions ISO will be attached + as a CD device to the virtual machine. If the mode is "upload" the guest + additions ISO will be uploaded to the path specified by + `guest_additions_path`. The default value is "upload". If "disable" is used, + guest additions won't be downloaded, either. -* `guest_additions_path` (string) - The path on the guest virtual machine - where the VirtualBox guest additions ISO will be uploaded. By default this - is "VBoxGuestAdditions.iso" which should upload into the login directory - of the user. This is a [configuration template](/docs/templates/configuration-templates.html) - where the `Version` variable is replaced with the VirtualBox version. +- `guest_additions_path` (string) - The path on the guest virtual machine where + the VirtualBox guest additions ISO will be uploaded. By default this is + "VBoxGuestAdditions.iso" which should upload into the login directory of + the user. This is a [configuration + template](/docs/templates/configuration-templates.html) where the `Version` + variable is replaced with the VirtualBox version. -* `guest_additions_sha256` (string) - The SHA256 checksum of the guest - additions ISO that will be uploaded to the guest VM. By default the - checksums will be downloaded from the VirtualBox website, so this only - needs to be set if you want to be explicit about the checksum. +- `guest_additions_sha256` (string) - The SHA256 checksum of the guest additions + ISO that will be uploaded to the guest VM. By default the checksums will be + downloaded from the VirtualBox website, so this only needs to be set if you + want to be explicit about the checksum. -* `guest_additions_url` (string) - The URL to the guest additions ISO - to upload. This can also be a file URL if the ISO is at a local path. - By default the VirtualBox builder will go and download the proper - guest additions ISO from the internet. +- `guest_additions_url` (string) - The URL to the guest additions ISO to upload. + This can also be a file URL if the ISO is at a local path. By default the + VirtualBox builder will go and download the proper guest additions ISO from + the internet. -* `headless` (boolean) - Packer defaults to building VirtualBox - virtual machines by launching a GUI that shows the console of the - machine being built. When this value is set to true, the machine will - start without a console. +- `headless` (boolean) - Packer defaults to building VirtualBox virtual machines + by launching a GUI that shows the console of the machine being built. When + this value is set to true, the machine will start without a console. -* `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `http_directory` (string) - Path to a directory to serve using an HTTP server. + The files in this directory will be available over HTTP that will be + requestable from the virtual machine. This is useful for hosting kickstart + files and so on. By default this is "", which means no HTTP server will + be started. The address and port of the HTTP server will be available as + variables in `boot_command`. This is covered in more detail below. -* `http_port_min` and `http_port_max` (integer) - These are the minimum and +- `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the `http_directory`. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. - By default the values are 8000 and 9000, respectively. + server to be on one port, make this minimum and maximum port the same. By + default the values are 8000 and 9000, respectively. -* `import_flags` (array of strings) - Additional flags to pass to - `VBoxManage import`. This can be used to add additional command-line flags - such as `--eula-accept` to accept a EULA in the OVF. +- `import_flags` (array of strings) - Additional flags to pass to + `VBoxManage import`. This can be used to add additional command-line flags + such as `--eula-accept` to accept a EULA in the OVF. -* `import_opts` (string) - Additional options to pass to the `VBoxManage import`. - This can be useful for passing "keepallmacs" or "keepnatmacs" options for existing - ovf images. +- `import_opts` (string) - Additional options to pass to the + `VBoxManage import`. This can be useful for passing "keepallmacs" or + "keepnatmacs" options for existing ovf images. -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `shutdown_command` (string) - The command to use to gracefully shut down the machine once all - the provisioning is done. By default this is an empty string, which tells Packer to just - forcefully shut down the machine unless a shutdown command takes place inside script so this may - safely be omitted. If one or more scripts require a reboot it is suggested to leave this blank - since reboots may fail and specify the final shutdown command in your last script. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine unless a shutdown + command takes place inside script so this may safely be omitted. If one or + more scripts require a reboot it is suggested to leave this blank since + reboots may fail and specify the final shutdown command in your last script. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and - maximum port to use for the SSH port on the host machine which is forwarded - to the SSH port on the guest machine. Because Packer often runs in parallel, +- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and + maximum port to use for the SSH port on the host machine which is forwarded to + the SSH port on the guest machine. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to use as the host port. -* `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer does - not setup forwarded port mapping for SSH requests and uses `ssh_port` on the - host to communicate to the virtual machine +- `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer + does not setup forwarded port mapping for SSH requests and uses `ssh_port` on + the host to communicate to the virtual machine -* `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to - execute in order to further customize the virtual machine being created. - The value of this is an array of commands to execute. The commands are executed - in the order defined in the template. For each command, the command is - defined itself as an array of strings, where each string represents a single - argument on the command-line to `VBoxManage` (but excluding `VBoxManage` - itself). Each arg is treated as a [configuration template](/docs/templates/configuration-templates.html), - where the `Name` variable is replaced with the VM name. More details on how - to use `VBoxManage` are below. +- `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to + execute in order to further customize the virtual machine being created. The + value of this is an array of commands to execute. The commands are executed in + the order defined in the template. For each command, the command is defined + itself as an array of strings, where each string represents a single argument + on the command-line to `VBoxManage` (but excluding `VBoxManage` itself). Each + arg is treated as a [configuration + template](/docs/templates/configuration-templates.html), where the `Name` + variable is replaced with the VM name. More details on how to use `VBoxManage` + are below. -* `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, +- `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, except that it is run after the virtual machine is shutdown, and before the virtual machine is exported. -* `virtualbox_version_file` (string) - The path within the virtual machine - to upload a file that contains the VirtualBox version that was used to - create the machine. This information can be useful for provisioning. - By default this is ".vbox_version", which will generally be upload it into - the home directory. +- `virtualbox_version_file` (string) - The path within the virtual machine to + upload a file that contains the VirtualBox version that was used to create + the machine. This information can be useful for provisioning. By default this + is ".vbox\_version", which will generally be upload it into the + home directory. -* `vm_name` (string) - This is the name of the virtual machine when it is - imported as well as the name of the OVF file when the virtual machine is - exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is - the name of the build. +- `vm_name` (string) - This is the name of the virtual machine when it is + imported as well as the name of the OVF file when the virtual machine + is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the + name of the build. ## Guest Additions -Packer will automatically download the proper guest additions for the -version of VirtualBox that is running and upload those guest additions into -the virtual machine so that provisioners can easily install them. +Packer will automatically download the proper guest additions for the version of +VirtualBox that is running and upload those guest additions into the virtual +machine so that provisioners can easily install them. -Packer downloads the guest additions from the official VirtualBox website, -and verifies the file with the official checksums released by VirtualBox. +Packer downloads the guest additions from the official VirtualBox website, and +verifies the file with the official checksums released by VirtualBox. -After the virtual machine is up and the operating system is installed, -Packer uploads the guest additions into the virtual machine. The path where -they are uploaded is controllable by `guest_additions_path`, and defaults -to "VBoxGuestAdditions.iso". Without an absolute path, it is uploaded to the -home directory of the SSH user. +After the virtual machine is up and the operating system is installed, Packer +uploads the guest additions into the virtual machine. The path where they are +uploaded is controllable by `guest_additions_path`, and defaults to +"VBoxGuestAdditions.iso". Without an absolute path, it is uploaded to the home +directory of the SSH user. ## VBoxManage Commands -In order to perform extra customization of the virtual machine, a template -can define extra calls to `VBoxManage` to perform. [VBoxManage](http://www.virtualbox.org/manual/ch08.html) -is the command-line interface to VirtualBox where you can completely control -VirtualBox. It can be used to do things such as set RAM, CPUs, etc. +In order to perform extra customization of the virtual machine, a template can +define extra calls to `VBoxManage` to perform. +[VBoxManage](http://www.virtualbox.org/manual/ch08.html) is the command-line +interface to VirtualBox where you can completely control VirtualBox. It can be +used to do things such as set RAM, CPUs, etc. -Extra VBoxManage commands are defined in the template in the `vboxmanage` section. -An example is shown below that sets the memory and number of CPUs within the -virtual machine: +Extra VBoxManage commands are defined in the template in the `vboxmanage` +section. An example is shown below that sets the memory and number of CPUs +within the virtual machine: -```javascript +``` {.javascript} { "vboxmanage": [ ["modifyvm", "{{.Name}}", "--memory", "1024"], @@ -238,12 +241,12 @@ virtual machine: } ``` -The value of `vboxmanage` is an array of commands to execute. These commands -are executed in the order defined. So in the above example, the memory will be -set followed by the CPUs. +The value of `vboxmanage` is an array of commands to execute. These commands are +executed in the order defined. So in the above example, the memory will be set +followed by the CPUs. -Each command itself is an array of strings, where each string is an argument -to `VBoxManage`. Each argument is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The only available variable is `Name` which is replaced with the unique -name of the VM, which is required for many VBoxManage calls. +Each command itself is an array of strings, where each string is an argument to +`VBoxManage`. Each argument is treated as a [configuration +template](/docs/templates/configuration-templates.html). The only available +variable is `Name` which is replaced with the unique name of the VM, which is +required for many VBoxManage calls. diff --git a/website/source/docs/builders/virtualbox.html.markdown b/website/source/docs/builders/virtualbox.html.markdown index 26e94b5b8..f96d37515 100644 --- a/website/source/docs/builders/virtualbox.html.markdown +++ b/website/source/docs/builders/virtualbox.html.markdown @@ -1,27 +1,28 @@ --- -layout: "docs" -page_title: "VirtualBox Builder" -description: |- - The VirtualBox Packer builder is able to create VirtualBox virtual machines and export them in the OVA or OVF format. ---- +description: | + The VirtualBox Packer builder is able to create VirtualBox virtual machines and + export them in the OVA or OVF format. +layout: docs +page_title: VirtualBox Builder +... # VirtualBox Builder -The VirtualBox Packer builder is able to create [VirtualBox](http://www.virtualbox.org) -virtual machines and export them in the OVA or OVF format. +The VirtualBox Packer builder is able to create +[VirtualBox](http://www.virtualbox.org) virtual machines and export them in the +OVA or OVF format. -Packer actually comes with multiple builders able to create VirtualBox -machines, depending on the strategy you want to use to build the image. -Packer supports the following VirtualBox builders: +Packer actually comes with multiple builders able to create VirtualBox machines, +depending on the strategy you want to use to build the image. Packer supports +the following VirtualBox builders: -* [virtualbox-iso](/docs/builders/virtualbox-iso.html) - Starts from - an ISO file, creates a brand new VirtualBox VM, installs an OS, - provisions software within the OS, then exports that machine to create - an image. This is best for people who want to start from scratch. +- [virtualbox-iso](/docs/builders/virtualbox-iso.html) - Starts from an ISO + file, creates a brand new VirtualBox VM, installs an OS, provisions software + within the OS, then exports that machine to create an image. This is best for + people who want to start from scratch. -* [virtualbox-ovf](/docs/builders/virtualbox-ovf.html) - This builder - imports an existing OVF/OVA file, runs provisioners on top of that VM, - and exports that machine to create an image. This is best if you have - an existing VirtualBox VM export you want to use as the source. As an - additional benefit, you can feed the artifact of this builder back into - itself to iterate on a machine. +- [virtualbox-ovf](/docs/builders/virtualbox-ovf.html) - This builder imports an + existing OVF/OVA file, runs provisioners on top of that VM, and exports that + machine to create an image. This is best if you have an existing VirtualBox VM + export you want to use as the source. As an additional benefit, you can feed + the artifact of this builder back into itself to iterate on a machine. diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index 8ac3a9fd3..ad2ac5c33 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -1,37 +1,40 @@ --- -layout: "docs" -page_title: "VMware Builder from ISO" -description: |- - This VMware Packer builder is able to create VMware virtual machines from an ISO file as a source. It currently supports building virtual machines on hosts running VMware Fusion for OS X, VMware Workstation for Linux and Windows, and VMware Player on Linux. It can also build machines directly on VMware vSphere Hypervisor using SSH as opposed to the vSphere API. ---- +description: | + This VMware Packer builder is able to create VMware virtual machines from an ISO + file as a source. It currently supports building virtual machines on hosts + running VMware Fusion for OS X, VMware Workstation for Linux and Windows, and + VMware Player on Linux. It can also build machines directly on VMware vSphere + Hypervisor using SSH as opposed to the vSphere API. +layout: docs +page_title: VMware Builder from ISO +... # VMware Builder (from ISO) Type: `vmware-iso` -This VMware Packer builder is able to create VMware virtual machines from an -ISO file as a source. It currently -supports building virtual machines on hosts running -[VMware Fusion](http://www.vmware.com/products/fusion/overview.html) for OS X, -[VMware Workstation](http://www.vmware.com/products/workstation/overview.html) -for Linux and Windows, and -[VMware Player](http://www.vmware.com/products/player/) on Linux. It can -also build machines directly on -[VMware vSphere Hypervisor](http://www.vmware.com/products/vsphere-hypervisor/) -using SSH as opposed to the vSphere API. +This VMware Packer builder is able to create VMware virtual machines from an ISO +file as a source. It currently supports building virtual machines on hosts +running [VMware Fusion](http://www.vmware.com/products/fusion/overview.html) for +OS X, [VMware +Workstation](http://www.vmware.com/products/workstation/overview.html) for Linux +and Windows, and [VMware Player](http://www.vmware.com/products/player/) on +Linux. It can also build machines directly on [VMware vSphere +Hypervisor](http://www.vmware.com/products/vsphere-hypervisor/) using SSH as +opposed to the vSphere API. -The builder builds a virtual machine by creating a new virtual machine -from scratch, booting it, installing an OS, provisioning software within -the OS, then shutting it down. The result of the VMware builder is a directory -containing all the files necessary to run the virtual machine. +The builder builds a virtual machine by creating a new virtual machine from +scratch, booting it, installing an OS, provisioning software within the OS, then +shutting it down. The result of the VMware builder is a directory containing all +the files necessary to run the virtual machine. ## Basic Example -Here is a basic example. This example is not functional. It will start the -OS installer but then fail because we don't provide the preseed file for -Ubuntu to self-install. Still, the example serves to show the basic configuration: +Here is a basic example. This example is not functional. It will start the OS +installer but then fail because we don't provide the preseed file for Ubuntu to +self-install. Still, the example serves to show the basic configuration: -```javascript +``` {.javascript} { "type": "vmware-iso", "iso_url": "http://old-releases.ubuntu.com/releases/precise/ubuntu-12.04.2-server-amd64.iso", @@ -44,261 +47,261 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio ## Configuration Reference -There are many configuration options available for the VMware builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the VMware builder. They are +organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - files are so large, this is required and Packer will verify it prior - to booting a virtual machine with the ISO attached. The type of the - checksum is specified with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files + are so large, this is required and Packer will verify it prior to booting a + virtual machine with the ISO attached. The type of the checksum is specified + with `iso_checksum_type`, documented below. -* `iso_checksum_type` (string) - The type of the checksum specified in +- `iso_checksum_type` (string) - The type of the checksum specified in `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or "sha512" currently. While "none" will skip checksumming, this is not recommended since ISO files are generally large and corruption does happen from time to time. -* `iso_url` (string) - A URL to the ISO containing the installation image. - This URL can be either an HTTP URL or a file URL (or path to a file). - If this is an HTTP URL, Packer will download it and cache it between - runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. This + URL can be either an HTTP URL or a file URL (or path to a file). If this is an + HTTP URL, Packer will download it and cache it between runs. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. ### Optional: -* `disk_additional_size` (array of integers) - The size(s) of any additional +- `disk_additional_size` (array of integers) - The size(s) of any additional hard disks for the VM in megabytes. If this is not specified then the VM will only contain a primary hard disk. The builder uses expandable, not fixed-size virtual hard disks, so the actual file representing the disk will not use the full size unless it is full. -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `disk_size` (integer) - The size of the hard disk for the VM in megabytes. - The builder uses expandable, not fixed-size virtual hard disks, so the - actual file representing the disk will not use the full size unless it is full. - By default this is set to 40,000 (about 40 GB). +- `disk_size` (integer) - The size of the hard disk for the VM in megabytes. The + builder uses expandable, not fixed-size virtual hard disks, so the actual file + representing the disk will not use the full size unless it is full. By default + this is set to 40,000 (about 40 GB). -* `disk_type_id` (string) - The type of VMware virtual disk to create. - The default is "1", which corresponds to a growable virtual disk split in - 2GB files. This option is for advanced usage, modify only if you - know what you're doing. For more information, please consult the - [Virtual Disk Manager User's Guide](http://www.vmware.com/pdf/VirtualDiskManager.pdf) - for desktop VMware clients. For ESXi, refer to the proper ESXi documentation. +- `disk_type_id` (string) - The type of VMware virtual disk to create. The + default is "1", which corresponds to a growable virtual disk split in + 2GB files. This option is for advanced usage, modify only if you know what + you're doing. For more information, please consult the [Virtual Disk Manager + User's Guide](http://www.vmware.com/pdf/VirtualDiskManager.pdf) for desktop + VMware clients. For ESXi, refer to the proper ESXi documentation. -* `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file - on removable media. By default, no floppy will be attached. All files - listed in this setting get placed into the root directory of the floppy - and the floppy is attached as the first floppy device. Currently, no - support exists for creating sub-directories on the floppy. Wildcard - characters (*, ?, and []) are allowed. Directory names are also allowed, - which will add all the files found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) + are allowed. Directory names are also allowed, which will add all the files + found in the directory to the floppy. -* `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this - is "/Applications/VMware Fusion.app" but this setting allows you to +- `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is + "/Applications/VMware Fusion.app" but this setting allows you to customize this. -* `guest_os_type` (string) - The guest OS type being installed. This will be - set in the VMware VMX. By default this is "other". By specifying a more specific - OS type, VMware may perform some optimizations or virtual hardware changes - to better support the operating system running in the virtual machine. +- `guest_os_type` (string) - The guest OS type being installed. This will be set + in the VMware VMX. By default this is "other". By specifying a more specific + OS type, VMware may perform some optimizations or virtual hardware changes to + better support the operating system running in the virtual machine. -* `headless` (boolean) - Packer defaults to building VMware - virtual machines by launching a GUI that shows the console of the - machine being built. When this value is set to true, the machine will - start without a console. For VMware machines, Packer will output VNC - connection information in case you need to connect to the console to - debug the build process. +- `headless` (boolean) - Packer defaults to building VMware virtual machines by + launching a GUI that shows the console of the machine being built. When this + value is set to true, the machine will start without a console. For VMware + machines, Packer will output VNC connection information in case you need to + connect to the console to debug the build process. -* `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `http_directory` (string) - Path to a directory to serve using an HTTP server. + The files in this directory will be available over HTTP that will be + requestable from the virtual machine. This is useful for hosting kickstart + files and so on. By default this is "", which means no HTTP server will + be started. The address and port of the HTTP server will be available as + variables in `boot_command`. This is covered in more detail below. -* `http_port_min` and `http_port_max` (integer) - These are the minimum and +- `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the `http_directory`. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. - By default the values are 8000 and 9000, respectively. + server to be on one port, make this minimum and maximum port the same. By + default the values are 8000 and 9000, respectively. -* `iso_urls` (array of strings) - Multiple URLs for the ISO to download. - Packer will try these in order. If anything goes wrong attempting to download - or while downloading a single URL, it will move on to the next. All URLs - must point to the same file (same checksum). By default this is empty - and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer + will try these in order. If anything goes wrong attempting to download or + while downloading a single URL, it will move on to the next. All URLs must + point to the same file (same checksum). By default this is empty and `iso_url` + is used. Only one of `iso_url` or `iso_urls` can be specified. -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `remote_cache_datastore` (string) - The path to the datastore where - supporting files will be stored during the build on the remote machine. - By default this is the same as the `remote_datastore` option. This only - has an effect if `remote_type` is enabled. +- `remote_cache_datastore` (string) - The path to the datastore where supporting + files will be stored during the build on the remote machine. By default this + is the same as the `remote_datastore` option. This only has an effect if + `remote_type` is enabled. -* `remote_cache_directory` (string) - The path where the ISO and/or floppy - files will be stored during the build on the remote machine. The path is - relative to the `remote_cache_datastore` on the remote machine. By default - this is "packer_cache". This only has an effect if `remote_type` is enabled. +- `remote_cache_directory` (string) - The path where the ISO and/or floppy files + will be stored during the build on the remote machine. The path is relative to + the `remote_cache_datastore` on the remote machine. By default this + is "packer\_cache". This only has an effect if `remote_type` is enabled. -* `remote_datastore` (string) - The path to the datastore where the resulting - VM will be stored when it is built on the remote machine. By default this +- `remote_datastore` (string) - The path to the datastore where the resulting VM + will be stored when it is built on the remote machine. By default this is "datastore1". This only has an effect if `remote_type` is enabled. -* `remote_host` (string) - The host of the remote machine used for access. - This is only required if `remote_type` is enabled. +- `remote_host` (string) - The host of the remote machine used for access. This + is only required if `remote_type` is enabled. -* `remote_password` (string) - The SSH password for the user used to - access the remote machine. By default this is empty. This only has an - effect if `remote_type` is enabled. +- `remote_password` (string) - The SSH password for the user used to access the + remote machine. By default this is empty. This only has an effect if + `remote_type` is enabled. -* `remote_type` (string) - The type of remote machine that will be used to - build this VM rather than a local desktop product. The only value accepted - for this currently is "esx5". If this is not set, a desktop product will be - used. By default, this is not set. +- `remote_type` (string) - The type of remote machine that will be used to build + this VM rather than a local desktop product. The only value accepted for this + currently is "esx5". If this is not set, a desktop product will be used. By + default, this is not set. -* `remote_username` (string) - The username for the SSH user that will access +- `remote_username` (string) - The username for the SSH user that will access the remote machine. This is required if `remote_type` is enabled. -* `shutdown_command` (string) - The command to use to gracefully shut down - the machine once all the provisioning is done. By default this is an empty - string, which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `skip_compaction` (boolean) - VMware-created disks are defragmented - and compacted at the end of the build process using `vmware-vdiskmanager`. - In certain rare cases, this might actually end up making the resulting disks +- `skip_compaction` (boolean) - VMware-created disks are defragmented and + compacted at the end of the build process using `vmware-vdiskmanager`. In + certain rare cases, this might actually end up making the resulting disks slightly larger. If you find this to be the case, you can disable compaction using this configuration value. -* `tools_upload_flavor` (string) - The flavor of the VMware Tools ISO to - upload into the VM. Valid values are "darwin", "linux", and "windows". - By default, this is empty, which means VMware tools won't be uploaded. +- `tools_upload_flavor` (string) - The flavor of the VMware Tools ISO to upload + into the VM. Valid values are "darwin", "linux", and "windows". By default, + this is empty, which means VMware tools won't be uploaded. -* `tools_upload_path` (string) - The path in the VM to upload the VMware - tools. This only takes effect if `tools_upload_flavor` is non-empty. - This is a [configuration template](/docs/templates/configuration-templates.html) - that has a single valid variable: `Flavor`, which will be the value of - `tools_upload_flavor`. By default the upload path is set to - `{{.Flavor}}.iso`. This setting is not used when `remote_type` is "esx5". +- `tools_upload_path` (string) - The path in the VM to upload the VMware tools. + This only takes effect if `tools_upload_flavor` is non-empty. This is a + [configuration template](/docs/templates/configuration-templates.html) that + has a single valid variable: `Flavor`, which will be the value of + `tools_upload_flavor`. By default the upload path is set to `{{.Flavor}}.iso`. + This setting is not used when `remote_type` is "esx5". -* `version` (string) - The [vmx hardware version](http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003746) for the new virtual machine. Only the default value has been tested, any other value is experimental. Default value is '9'. +- `version` (string) - The [vmx hardware + version](http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003746) + for the new virtual machine. Only the default value has been tested, any other + value is experimental. Default value is '9'. -* `vm_name` (string) - This is the name of the VMX file for the new virtual +- `vm_name` (string) - This is the name of the VMX file for the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. -* `vmdk_name` (string) - The filename of the virtual disk that'll be created, +- `vmdk_name` (string) - The filename of the virtual disk that'll be created, without the extension. This defaults to "packer". -* `vmx_data` (object of key/value strings) - Arbitrary key/values - to enter into the virtual machine VMX file. This is for advanced users - who want to set properties such as memory, CPU, etc. +- `vmx_data` (object of key/value strings) - Arbitrary key/values to enter into + the virtual machine VMX file. This is for advanced users who want to set + properties such as memory, CPU, etc. -* `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, +- `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, except that it is run after the virtual machine is shutdown, and before the virtual machine is exported. -* `vmx_template_path` (string) - Path to a - [configuration template](/docs/templates/configuration-templates.html) that - defines the contents of the virtual machine VMX file for VMware. This is - for **advanced users only** as this can render the virtual machine - non-functional. See below for more information. For basic VMX modifications, - try `vmx_data` first. +- `vmx_template_path` (string) - Path to a [configuration + template](/docs/templates/configuration-templates.html) that defines the + contents of the virtual machine VMX file for VMware. This is for **advanced + users only** as this can render the virtual machine non-functional. See below + for more information. For basic VMX modifications, try `vmx_data` first. -* `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to - use for VNC access to the virtual machine. The builder uses VNC to type - the initial `boot_command`. Because Packer generally runs in parallel, Packer - uses a randomly chosen port in this range that appears available. By default - this is 5900 to 6000. The minimum and maximum ports are inclusive. +- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to + use for VNC access to the virtual machine. The builder uses VNC to type the + initial `boot_command`. Because Packer generally runs in parallel, Packer uses + a randomly chosen port in this range that appears available. By default this + is 5900 to 6000. The minimum and maximum ports are inclusive. ## Boot Command -The `boot_command` configuration is very important: it specifies the keys -to type when the virtual machine is first booted in order to start the -OS installer. This command is typed after `boot_wait`, which gives the -virtual machine some time to actually load the ISO. +The `boot_command` configuration is very important: it specifies the keys to +type when the virtual machine is first booted in order to start the OS +installer. This command is typed after `boot_wait`, which gives the virtual +machine some time to actually load the ISO. -As documented above, the `boot_command` is an array of strings. The -strings are all typed in sequence. It is an array only to improve readability -within the template. +As documented above, the `boot_command` is an array of strings. The strings are +all typed in sequence. It is an array only to improve readability within the +template. -The boot command is "typed" character for character over a VNC connection -to the machine, simulating a human actually typing the keyboard. There are -a set of special keys available. If these are in your boot command, they -will be replaced by the proper key: +The boot command is "typed" character for character over a VNC connection to the +machine, simulating a human actually typing the keyboard. There are a set of +special keys available. If these are in your boot command, they will be replaced +by the proper key: -* `` - Backspace +- `` - Backspace -* `` - Delete +- `` - Delete -* `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -* `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -* `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -* `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -* `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -* `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -* `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -* `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -* `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -* `` `` `` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - is useful if you have to generally wait for the UI to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before sending + any additional keys. This is useful if you have to generally wait for the UI + to update before typing more. In addition to the special keys, each command to type is treated as a -[configuration template](/docs/templates/configuration-templates.html). -The available variables are: +[configuration template](/docs/templates/configuration-templates.html). The +available variables are: -* `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server - that is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will - be blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that + is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! -Example boot command. This is actually a working boot command used to start -an Ubuntu 12.04 installer: +Example boot command. This is actually a working boot command used to start an +Ubuntu 12.04 installer: -```text +``` {.text} [ "", "/install/vmlinuz noapic ", @@ -314,71 +317,73 @@ an Ubuntu 12.04 installer: ## VMX Template -The heart of a VMware machine is the "vmx" file. This contains all the -virtual hardware metadata necessary for the VM to function. Packer by default -uses a [safe, flexible VMX file](https://github.com/mitchellh/packer/blob/20541a7eda085aa5cf35bfed5069592ca49d106e/builder/vmware/step_create_vmx.go#L84). -But for advanced users, this template can be customized. This allows -Packer to build virtual machines of effectively any guest operating system -type. +The heart of a VMware machine is the "vmx" file. This contains all the virtual +hardware metadata necessary for the VM to function. Packer by default uses a +[safe, flexible VMX +file](https://github.com/mitchellh/packer/blob/20541a7eda085aa5cf35bfed5069592ca49d106e/builder/vmware/step_create_vmx.go#L84). +But for advanced users, this template can be customized. This allows Packer to +build virtual machines of effectively any guest operating system type. -~> **This is an advanced feature.** Modifying the VMX template -can easily cause your virtual machine to not boot properly. Please only -modify the template if you know what you're doing. +\~> **This is an advanced feature.** Modifying the VMX template can easily +cause your virtual machine to not boot properly. Please only modify the template +if you know what you're doing. -Within the template, a handful of variables are available so that your -template can continue working with the rest of the Packer machinery. Using -these variables isn't required, however. +Within the template, a handful of variables are available so that your template +can continue working with the rest of the Packer machinery. Using these +variables isn't required, however. -* `Name` - The name of the virtual machine. -* `GuestOS` - The VMware-valid guest OS type. -* `DiskName` - The filename (without the suffix) of the main virtual disk. -* `ISOPath` - The path to the ISO to use for the OS installation. -* `Version` - The Hardware version VMWare will execute this vm under. Also known as the `virtualhw.version`. +- `Name` - The name of the virtual machine. +- `GuestOS` - The VMware-valid guest OS type. +- `DiskName` - The filename (without the suffix) of the main virtual disk. +- `ISOPath` - The path to the ISO to use for the OS installation. +- `Version` - The Hardware version VMWare will execute this vm under. Also known + as the `virtualhw.version`. ## Building on a Remote vSphere Hypervisor -In addition to using the desktop products of VMware locally to build -virtual machines, Packer can use a remote VMware Hypervisor to build -the virtual machine. +In addition to using the desktop products of VMware locally to build virtual +machines, Packer can use a remote VMware Hypervisor to build the virtual +machine. --> **Note:** Packer supports ESXi 5.1 and above. +-> **Note:** Packer supports ESXi 5.1 and above. -Before using a remote vSphere Hypervisor, you need to enable GuestIPHack by running the following command: +Before using a remote vSphere Hypervisor, you need to enable GuestIPHack by +running the following command: -```text +``` {.text} esxcli system settings advanced set -o /Net/GuestIPHack -i 1 ``` -When using a remote VMware Hypervisor, the builder still downloads the -ISO and various files locally, and uploads these to the remote machine. -Packer currently uses SSH to communicate to the ESXi machine rather than -the vSphere API. At some point, the vSphere API may be used. +When using a remote VMware Hypervisor, the builder still downloads the ISO and +various files locally, and uploads these to the remote machine. Packer currently +uses SSH to communicate to the ESXi machine rather than the vSphere API. At some +point, the vSphere API may be used. -Packer also requires VNC to issue boot commands during a build, -which may be disabled on some remote VMware Hypervisors. Please consult -the appropriate documentation on how to update VMware Hypervisor's firewall -to allow these connections. +Packer also requires VNC to issue boot commands during a build, which may be +disabled on some remote VMware Hypervisors. Please consult the appropriate +documentation on how to update VMware Hypervisor's firewall to allow these +connections. -To use a remote VMware vSphere Hypervisor to build your virtual machine, -fill in the required `remote_*` configurations: +To use a remote VMware vSphere Hypervisor to build your virtual machine, fill in +the required `remote_*` configurations: -* `remote_type` - This must be set to "esx5". +- `remote_type` - This must be set to "esx5". -* `remote_host` - The host of the remote machine. +- `remote_host` - The host of the remote machine. -Additionally, there are some optional configurations that you'll likely -have to modify as well: +Additionally, there are some optional configurations that you'll likely have to +modify as well: -* `remote_datastore` - The path to the datastore where the VM will be - stored on the ESXi machine. +- `remote_datastore` - The path to the datastore where the VM will be stored on + the ESXi machine. -* `remote_cache_datastore` - The path to the datastore where - supporting files will be stored during the build on the remote machine. +- `remote_cache_datastore` - The path to the datastore where supporting files + will be stored during the build on the remote machine. -* `remote_cache_directory` - The path where the ISO and/or floppy - files will be stored during the build on the remote machine. The path is - relative to the `remote_cache_datastore` on the remote machine. +- `remote_cache_directory` - The path where the ISO and/or floppy files will be + stored during the build on the remote machine. The path is relative to the + `remote_cache_datastore` on the remote machine. -* `remote_username` - The SSH username used to access the remote machine. +- `remote_username` - The SSH username used to access the remote machine. -* `remote_password` - The SSH password for access to the remote machine. +- `remote_password` - The SSH password for access to the remote machine. diff --git a/website/source/docs/builders/vmware-vmx.html.markdown b/website/source/docs/builders/vmware-vmx.html.markdown index e28ea3f89..bd1afb83c 100644 --- a/website/source/docs/builders/vmware-vmx.html.markdown +++ b/website/source/docs/builders/vmware-vmx.html.markdown @@ -1,34 +1,37 @@ --- -layout: "docs" -page_title: "VMware Builder from VMX" -description: |- - This VMware Packer builder is able to create VMware virtual machines from an existing VMware virtual machine (a VMX file). It currently supports building virtual machines on hosts running VMware Fusion Professional for OS X, VMware Workstation for Linux and Windows, and VMware Player on Linux. ---- +description: | + This VMware Packer builder is able to create VMware virtual machines from an + existing VMware virtual machine (a VMX file). It currently supports building + virtual machines on hosts running VMware Fusion Professional for OS X, VMware + Workstation for Linux and Windows, and VMware Player on Linux. +layout: docs +page_title: VMware Builder from VMX +... # VMware Builder (from VMX) Type: `vmware-vmx` This VMware Packer builder is able to create VMware virtual machines from an -existing VMware virtual machine (a VMX file). It currently -supports building virtual machines on hosts running -[VMware Fusion Professional](http://www.vmware.com/products/fusion-professional/) for OS X, +existing VMware virtual machine (a VMX file). It currently supports building +virtual machines on hosts running [VMware Fusion +Professional](http://www.vmware.com/products/fusion-professional/) for OS X, [VMware Workstation](http://www.vmware.com/products/workstation/overview.html) -for Linux and Windows, and -[VMware Player](http://www.vmware.com/products/player/) on Linux. +for Linux and Windows, and [VMware +Player](http://www.vmware.com/products/player/) on Linux. -The builder builds a virtual machine by cloning the VMX file using -the clone capabilities introduced in VMware Fusion Professional 6, Workstation 10, -and Player 6. After cloning the VM, it provisions software within the -new machine, shuts it down, and compacts the disks. The resulting folder -contains a new VMware virtual machine. +The builder builds a virtual machine by cloning the VMX file using the clone +capabilities introduced in VMware Fusion Professional 6, Workstation 10, and +Player 6. After cloning the VM, it provisions software within the new machine, +shuts it down, and compacts the disks. The resulting folder contains a new +VMware virtual machine. ## Basic Example -Here is an example. This example is fully functional as long as the source -path points to a real VMX file with the proper settings: +Here is an example. This example is fully functional as long as the source path +points to a real VMX file with the proper settings: -```javascript +``` {.javascript} { "type": "vmware-vmx", "source_path": "/path/to/a/vm.vmx", @@ -40,110 +43,109 @@ path points to a real VMX file with the proper settings: ## Configuration Reference -There are many configuration options available for the VMware builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the VMware builder. They are +organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `source_path` (string) - Path to the source VMX file to clone. +- `source_path` (string) - Path to the source VMX file to clone. -* `ssh_username` (string) - The username to use to SSH into the machine - once the OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once the + OS is installed. ### Optional: -* `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. +- `boot_command` (array of strings) - This is an array of commands to type when + the virtual machine is first booted. The goal of these commands should be to + type just enough to initialize the operating system installer. Special keys + can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -* `boot_wait` (string) - The time to wait after booting the initial virtual +- `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five + seconds and one minute 30 seconds, respectively. If this isn't specified, the + default is 10 seconds. -* `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file - on removable media. By default, no floppy will be attached. All files - listed in this setting get placed into the root directory of the floppy - and the floppy is attached as the first floppy device. Currently, no - support exists for creating sub-directories on the floppy. Wildcard - characters (*, ?, and []) are allowed. Directory names are also allowed, - which will add all the files found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) + are allowed. Directory names are also allowed, which will add all the files + found in the directory to the floppy. -* `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this - is "/Applications/VMware Fusion.app" but this setting allows you to +- `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is + "/Applications/VMware Fusion.app" but this setting allows you to customize this. -* `headless` (boolean) - Packer defaults to building VMware - virtual machines by launching a GUI that shows the console of the - machine being built. When this value is set to true, the machine will - start without a console. For VMware machines, Packer will output VNC - connection information in case you need to connect to the console to - debug the build process. +- `headless` (boolean) - Packer defaults to building VMware virtual machines by + launching a GUI that shows the console of the machine being built. When this + value is set to true, the machine will start without a console. For VMware + machines, Packer will output VNC connection information in case you need to + connect to the console to debug the build process. -* `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `http_directory` (string) - Path to a directory to serve using an HTTP server. + The files in this directory will be available over HTTP that will be + requestable from the virtual machine. This is useful for hosting kickstart + files and so on. By default this is "", which means no HTTP server will + be started. The address and port of the HTTP server will be available as + variables in `boot_command`. This is covered in more detail below. -* `http_port_min` and `http_port_max` (integer) - These are the minimum and +- `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the `http_directory`. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. - By default the values are 8000 and 9000, respectively. + server to be on one port, make this minimum and maximum port the same. By + default the values are 8000 and 9000, respectively. -* `output_directory` (string) - This is the path to the directory where the +- `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -* `shutdown_command` (string) - The command to use to gracefully shut down the machine once all - the provisioning is done. By default this is an empty string, which tells Packer to just - forcefully shut down the machine unless a shutdown command takes place inside script so this may - safely be omitted. If one or more scripts require a reboot it is suggested to leave this blank - since reboots may fail and specify the final shutdown command in your last script. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty string, + which tells Packer to just forcefully shut down the machine unless a shutdown + command takes place inside script so this may safely be omitted. If one or + more scripts require a reboot it is suggested to leave this blank since + reboots may fail and specify the final shutdown command in your last script. -* `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -* `skip_compaction` (boolean) - VMware-created disks are defragmented - and compacted at the end of the build process using `vmware-vdiskmanager`. - In certain rare cases, this might actually end up making the resulting disks +- `skip_compaction` (boolean) - VMware-created disks are defragmented and + compacted at the end of the build process using `vmware-vdiskmanager`. In + certain rare cases, this might actually end up making the resulting disks slightly larger. If you find this to be the case, you can disable compaction using this configuration value. -* `vm_name` (string) - This is the name of the VMX file for the new virtual +- `vm_name` (string) - This is the name of the VMX file for the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. -* `vmx_data` (object of key/value strings) - Arbitrary key/values - to enter into the virtual machine VMX file. This is for advanced users - who want to set properties such as memory, CPU, etc. +- `vmx_data` (object of key/value strings) - Arbitrary key/values to enter into + the virtual machine VMX file. This is for advanced users who want to set + properties such as memory, CPU, etc. -* `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, +- `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, except that it is run after the virtual machine is shutdown, and before the virtual machine is exported. -* `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to - use for VNC access to the virtual machine. The builder uses VNC to type - the initial `boot_command`. Because Packer generally runs in parallel, Packer - uses a randomly chosen port in this range that appears available. By default - this is 5900 to 6000. The minimum and maximum ports are inclusive. +- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to + use for VNC access to the virtual machine. The builder uses VNC to type the + initial `boot_command`. Because Packer generally runs in parallel, Packer uses + a randomly chosen port in this range that appears available. By default this + is 5900 to 6000. The minimum and maximum ports are inclusive. diff --git a/website/source/docs/builders/vmware.html.markdown b/website/source/docs/builders/vmware.html.markdown index 84d94a369..e77fe574a 100644 --- a/website/source/docs/builders/vmware.html.markdown +++ b/website/source/docs/builders/vmware.html.markdown @@ -1,27 +1,28 @@ --- -layout: "docs" -page_title: "VMware Builder" -description: |- - The VMware Packer builder is able to create VMware virtual machines for use with any VMware product. ---- +description: | + The VMware Packer builder is able to create VMware virtual machines for use with + any VMware product. +layout: docs +page_title: VMware Builder +... # VMware Builder -The VMware Packer builder is able to create VMware virtual machines for use -with any VMware product. +The VMware Packer builder is able to create VMware virtual machines for use with +any VMware product. -Packer actually comes with multiple builders able to create VMware -machines, depending on the strategy you want to use to build the image. -Packer supports the following VMware builders: +Packer actually comes with multiple builders able to create VMware machines, +depending on the strategy you want to use to build the image. Packer supports +the following VMware builders: -* [vmware-iso](/docs/builders/vmware-iso.html) - Starts from - an ISO file, creates a brand new VMware VM, installs an OS, - provisions software within the OS, then exports that machine to create - an image. This is best for people who want to start from scratch. +- [vmware-iso](/docs/builders/vmware-iso.html) - Starts from an ISO file, + creates a brand new VMware VM, installs an OS, provisions software within the + OS, then exports that machine to create an image. This is best for people who + want to start from scratch. -* [vmware-vmx](/docs/builders/vmware-vmx.html) - This builder - imports an existing VMware machine (from a VMX file), runs provisioners - on top of that VM, and exports that machine to create an image. - This is best if you have an existing VMware VM you want to use as the - source. As an additional benefit, you can feed the artifact of this - builder back into Packer to iterate on a machine. +- [vmware-vmx](/docs/builders/vmware-vmx.html) - This builder imports an + existing VMware machine (from a VMX file), runs provisioners on top of that + VM, and exports that machine to create an image. This is best if you have an + existing VMware VM you want to use as the source. As an additional benefit, + you can feed the artifact of this builder back into Packer to iterate on + a machine. diff --git a/website/source/docs/command-line/build.html.markdown b/website/source/docs/command-line/build.html.markdown index bada564a1..92afda570 100644 --- a/website/source/docs/command-line/build.html.markdown +++ b/website/source/docs/command-line/build.html.markdown @@ -1,37 +1,40 @@ --- -layout: "docs" -page_title: "Build - Command-Line" -description: |- - The `packer build` Packer command takes a template and runs all the builds within it in order to generate a set of artifacts. The various builds specified within a template are executed in parallel, unless otherwise specified. And the artifacts that are created will be outputted at the end of the build. ---- +description: | + The `packer build` Packer command takes a template and runs all the builds + within it in order to generate a set of artifacts. The various builds specified + within a template are executed in parallel, unless otherwise specified. And the + artifacts that are created will be outputted at the end of the build. +layout: docs +page_title: 'Build - Command-Line' +... # Command-Line: Build -The `packer build` Packer command takes a template and runs all the builds within -it in order to generate a set of artifacts. The various builds specified within -a template are executed in parallel, unless otherwise specified. And the +The `packer build` Packer command takes a template and runs all the builds +within it in order to generate a set of artifacts. The various builds specified +within a template are executed in parallel, unless otherwise specified. And the artifacts that are created will be outputted at the end of the build. ## Options -* `-color=false` - Disables colorized output. Enabled by default. +- `-color=false` - Disables colorized output. Enabled by default. -* `-debug` - Disables parallelization and enables debug mode. Debug mode flags +- `-debug` - Disables parallelization and enables debug mode. Debug mode flags the builders that they should output debugging information. The exact behavior of debug mode is left to the builder. In general, builders usually will stop - between each step, waiting for keyboard input before continuing. This will allow - the user to inspect state and so on. + between each step, waiting for keyboard input before continuing. This will + allow the user to inspect state and so on. -* `-except=foo,bar,baz` - Builds all the builds except those with the given +- `-except=foo,bar,baz` - Builds all the builds except those with the given comma-separated names. Build names by default are the names of their builders, unless a specific `name` attribute is specified within the configuration. -* `-force` - Forces a builder to run when artifacts from a previous build prevent - a build from running. The exact behavior of a forced build is left to the builder. - In general, a builder supporting the forced build will remove the artifacts from - the previous build. This will allow the user to repeat a build without having to - manually clean these artifacts beforehand. +- `-force` - Forces a builder to run when artifacts from a previous build + prevent a build from running. The exact behavior of a forced build is left to + the builder. In general, a builder supporting the forced build will remove the + artifacts from the previous build. This will allow the user to repeat a build + without having to manually clean these artifacts beforehand. -* `-only=foo,bar,baz` - Only build the builds with the given comma-separated - names. Build names by default are the names of their builders, unless a - specific `name` attribute is specified within the configuration. +- `-only=foo,bar,baz` - Only build the builds with the given + comma-separated names. Build names by default are the names of their builders, + unless a specific `name` attribute is specified within the configuration. diff --git a/website/source/docs/command-line/fix.html.markdown b/website/source/docs/command-line/fix.html.markdown index 958ebd0f8..eb383fec6 100644 --- a/website/source/docs/command-line/fix.html.markdown +++ b/website/source/docs/command-line/fix.html.markdown @@ -1,33 +1,34 @@ --- -layout: "docs" -page_title: "Fix - Command-Line" -description: |- - The `packer fix` Packer command takes a template and finds backwards incompatible parts of it and brings it up to date so it can be used with the latest version of Packer. After you update to a new Packer release, you should run the fix command to make sure your templates work with the new release. ---- +description: | + The `packer fix` Packer command takes a template and finds backwards + incompatible parts of it and brings it up to date so it can be used with the + latest version of Packer. After you update to a new Packer release, you should + run the fix command to make sure your templates work with the new release. +layout: docs +page_title: 'Fix - Command-Line' +... # Command-Line: Fix -The `packer fix` Packer command takes a template and finds backwards incompatible -parts of it and brings it up to date so it can be used with the latest version -of Packer. After you update to a new Packer release, you should run the -fix command to make sure your templates work with the new release. +The `packer fix` Packer command takes a template and finds backwards +incompatible parts of it and brings it up to date so it can be used with the +latest version of Packer. After you update to a new Packer release, you should +run the fix command to make sure your templates work with the new release. -The fix command will output the changed template to standard out, so you -should redirect standard using standard OS-specific techniques if you want to -save it to a file. For example, on Linux systems, you may want to do this: +The fix command will output the changed template to standard out, so you should +redirect standard using standard OS-specific techniques if you want to save it +to a file. For example, on Linux systems, you may want to do this: -``` -$ packer fix old.json > new.json -``` + $ packer fix old.json > new.json -If fixing fails for any reason, the fix command will exit with a non-zero -exit status. Error messages appear on standard error, so if you're redirecting +If fixing fails for any reason, the fix command will exit with a non-zero exit +status. Error messages appear on standard error, so if you're redirecting output, you'll still see error messages. --> **Even when Packer fix doesn't do anything** to the template, -the template will be outputted to standard out. Things such as configuration -key ordering and indentation may be changed. The output format however, is -pretty-printed for human readability. +-> **Even when Packer fix doesn't do anything** to the template, the template +will be outputted to standard out. Things such as configuration key ordering and +indentation may be changed. The output format however, is pretty-printed for +human readability. -The full list of fixes that the fix command performs is visible in the -help output, which can be seen via `packer fix -h`. +The full list of fixes that the fix command performs is visible in the help +output, which can be seen via `packer fix -h`. diff --git a/website/source/docs/command-line/inspect.html.markdown b/website/source/docs/command-line/inspect.html.markdown index 09f979208..a1a86e3e5 100644 --- a/website/source/docs/command-line/inspect.html.markdown +++ b/website/source/docs/command-line/inspect.html.markdown @@ -1,33 +1,35 @@ --- -layout: "docs" -page_title: "Inspect - Command-Line" -description: |- - The `packer inspect` Packer command takes a template and outputs the various components a template defines. This can help you quickly learn about a template without having to dive into the JSON itself. The command will tell you things like what variables a template accepts, the builders it defines, the provisioners it defines and the order they'll run, and more. ---- +description: | + The `packer inspect` Packer command takes a template and outputs the various + components a template defines. This can help you quickly learn about a template + without having to dive into the JSON itself. The command will tell you things + like what variables a template accepts, the builders it defines, the + provisioners it defines and the order they'll run, and more. +layout: docs +page_title: 'Inspect - Command-Line' +... # Command-Line: Inspect -The `packer inspect` Packer command takes a template and outputs the various components -a template defines. This can help you quickly learn about a template without -having to dive into the JSON itself. -The command will tell you things like what variables a template accepts, -the builders it defines, the provisioners it defines and the order they'll -run, and more. +The `packer inspect` Packer command takes a template and outputs the various +components a template defines. This can help you quickly learn about a template +without having to dive into the JSON itself. The command will tell you things +like what variables a template accepts, the builders it defines, the +provisioners it defines and the order they'll run, and more. -This command is extra useful when used with -[machine-readable output](/docs/command-line/machine-readable.html) enabled. -The command outputs the components in a way that is parseable by machines. +This command is extra useful when used with [machine-readable +output](/docs/command-line/machine-readable.html) enabled. The command outputs +the components in a way that is parseable by machines. -The command doesn't validate the actual configuration of the various -components (that is what the `validate` command is for), but it will -validate the syntax of your template by necessity. +The command doesn't validate the actual configuration of the various components +(that is what the `validate` command is for), but it will validate the syntax of +your template by necessity. ## Usage Example -Given a basic template, here is an example of what the output might -look like: +Given a basic template, here is an example of what the output might look like: -```text +``` {.text} $ packer inspect template.json Variables and their defaults: diff --git a/website/source/docs/command-line/introduction.html.markdown b/website/source/docs/command-line/introduction.html.markdown index 0a97e0056..ea9834397 100644 --- a/website/source/docs/command-line/introduction.html.markdown +++ b/website/source/docs/command-line/introduction.html.markdown @@ -1,24 +1,27 @@ --- -layout: "docs" -page_title: "Packer Command-Line" -description: |- - Packer is controlled using a command-line interface. All interaction with Packer is done via the `packer` tool. Like many other command-line tools, the `packer` tool takes a subcommand to execute, and that subcommand may have additional options as well. Subcommands are executed with `packer SUBCOMMAND`, where "SUBCOMMAND" is obviously the actual command you wish to execute. ---- +description: | + Packer is controlled using a command-line interface. All interaction with Packer + is done via the `packer` tool. Like many other command-line tools, the `packer` + tool takes a subcommand to execute, and that subcommand may have additional + options as well. Subcommands are executed with `packer SUBCOMMAND`, where + "SUBCOMMAND" is obviously the actual command you wish to execute. +layout: docs +page_title: 'Packer Command-Line' +... # Packer Command-Line -Packer is controlled using a command-line interface. All interaction with -Packer is done via the `packer` tool. Like many other command-line tools, -the `packer` tool takes a subcommand to execute, and that subcommand may -have additional options as well. Subcommands are executed with -`packer SUBCOMMAND`, where "SUBCOMMAND" is obviously the actual command you wish -to execute. +Packer is controlled using a command-line interface. All interaction with Packer +is done via the `packer` tool. Like many other command-line tools, the `packer` +tool takes a subcommand to execute, and that subcommand may have additional +options as well. Subcommands are executed with `packer SUBCOMMAND`, where +"SUBCOMMAND" is obviously the actual command you wish to execute. If you run `packer` by itself, help will be displayed showing all available subcommands and a brief synopsis of what they do. In addition to this, you can -run any `packer` command with the `-h` flag to output more detailed help for -a specific subcommand. +run any `packer` command with the `-h` flag to output more detailed help for a +specific subcommand. -In addition to the documentation available on the command-line, each command -is documented on this website. You can find the documentation for a specific +In addition to the documentation available on the command-line, each command is +documented on this website. You can find the documentation for a specific subcommand using the navigation to the left. diff --git a/website/source/docs/command-line/machine-readable.html.markdown b/website/source/docs/command-line/machine-readable.html.markdown index 5fed33310..550a14f35 100644 --- a/website/source/docs/command-line/machine-readable.html.markdown +++ b/website/source/docs/command-line/machine-readable.html.markdown @@ -1,30 +1,33 @@ --- -layout: "docs" -page_title: "Machine-Readable Output - Command-Line" -description: |- - By default, the output of Packer is very human-readable. It uses nice formatting, spacing, and colors in order to make Packer a pleasure to use. However, Packer was built with automation in mind. To that end, Packer supports a fully machine-readable output setting, allowing you to use Packer in automated environments. ---- +description: | + By default, the output of Packer is very human-readable. It uses nice + formatting, spacing, and colors in order to make Packer a pleasure to use. + However, Packer was built with automation in mind. To that end, Packer supports + a fully machine-readable output setting, allowing you to use Packer in automated + environments. +layout: docs +page_title: 'Machine-Readable Output - Command-Line' +... # Machine-Readable Output By default, the output of Packer is very human-readable. It uses nice formatting, spacing, and colors in order to make Packer a pleasure to use. -However, Packer was built with automation in mind. To that end, Packer -supports a fully machine-readable output setting, allowing you to use -Packer in automated environments. +However, Packer was built with automation in mind. To that end, Packer supports +a fully machine-readable output setting, allowing you to use Packer in automated +environments. -The machine-readable output format is easy to use and read and was made -with Unix tools in mind, so it is awk/sed/grep/etc. friendly. +The machine-readable output format is easy to use and read and was made with +Unix tools in mind, so it is awk/sed/grep/etc. friendly. ## Enabling The machine-readable output format can be enabled by passing the -`-machine-readable` flag to any Packer command. This immediately enables -all output to become machine-readable on stdout. Logging, if enabled, -continues to appear on stderr. An example of the output is shown -below: +`-machine-readable` flag to any Packer command. This immediately enables all +output to become machine-readable on stdout. Logging, if enabled, continues to +appear on stderr. An example of the output is shown below: -```text +``` {.text} $ packer -machine-readable version 1376289459,,version,0.2.4 1376289459,,version-prerelease, @@ -32,54 +35,50 @@ $ packer -machine-readable version 1376289459,,ui,say,Packer v0.2.4.dev (eed6ece+CHANGES) ``` -The format will be covered in more detail later. But as you can see, -the output immediately becomes machine-friendly. Try some other commands -with the `-machine-readable` flag to see! +The format will be covered in more detail later. But as you can see, the output +immediately becomes machine-friendly. Try some other commands with the +`-machine-readable` flag to see! ## Format -The machine readable format is a line-oriented, comma-delimited text -format. This makes it extremely easy to parse using standard Unix tools such -as awk or grep in addition to full programming languages like Ruby or -Python. +The machine readable format is a line-oriented, comma-delimited text format. +This makes it extremely easy to parse using standard Unix tools such as awk or +grep in addition to full programming languages like Ruby or Python. The format is: -```text +``` {.text} timestamp,target,type,data... ``` Each component is explained below: -* **timestamp** is a Unix timestamp in UTC of when the message was - printed. +- **timestamp** is a Unix timestamp in UTC of when the message was printed. -* **target** is the target of the following output. This is empty if - the message is related to Packer globally. Otherwise, this is generally - a build name so you can relate output to a specific build while parallel - builds are running. +- **target** is the target of the following output. This is empty if the message + is related to Packer globally. Otherwise, this is generally a build name so + you can relate output to a specific build while parallel builds are running. -* **type** is the type of machine-readable message being outputted. There - are a set of standard types which are covered later, but each component - of Packer (builders, provisioners, etc.) may output their own custom types - as well, allowing the machine-readable output to be infinitely flexible. +- **type** is the type of machine-readable message being outputted. There are a + set of standard types which are covered later, but each component of Packer + (builders, provisioners, etc.) may output their own custom types as well, + allowing the machine-readable output to be infinitely flexible. -* **data** is zero or more comma-seperated values associated with the prior - type. The exact amount and meaning of this data is type-dependent, so you - must read the documentation associated with the type to understand fully. +- **data** is zero or more comma-seperated values associated with the + prior type. The exact amount and meaning of this data is type-dependent, so + you must read the documentation associated with the type to understand fully. Within the format, if data contains a comma, it is replaced with -`%!(PACKER_COMMA)`. This was preferred over an escape character such as -`\'` because it is more friendly to tools like awk. +`%!(PACKER_COMMA)`. This was preferred over an escape character such as `\'` +because it is more friendly to tools like awk. -Newlines within the format are replaced with their respective standard -escape sequence. Newlines become a literal `\n` within the output. Carriage -returns become a literal `\r`. +Newlines within the format are replaced with their respective standard escape +sequence. Newlines become a literal `\n` within the output. Carriage returns +become a literal `\r`. ## Message Types -The set of machine-readable message types can be found in the -[machine-readable format](/docs/machine-readable/index.html) -complete documentation section. This section contains documentation -on all the message types exposed by Packer core as well as all the -components that ship with Packer by default. +The set of machine-readable message types can be found in the [machine-readable +format](/docs/machine-readable/index.html) complete documentation section. This +section contains documentation on all the message types exposed by Packer core +as well as all the components that ship with Packer by default. diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index 5833b917f..0cc9699f5 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -1,9 +1,10 @@ --- -layout: "docs" -page_title: "Push - Command-Line" -description: |- - The `packer push` Packer command takes a template and pushes it to a build service that will automatically build this Packer template. ---- +description: | + The `packer push` Packer command takes a template and pushes it to a build + service that will automatically build this Packer template. +layout: docs +page_title: 'Push - Command-Line' +... # Command-Line: Push @@ -16,36 +17,36 @@ External build services such as HashiCorp's Atlas make it easy to iterate on Packer templates, especially when the builder you are running may not be easily accessable (such as developing `qemu` builders on Mac or Windows). -!> The Packer build service will receive the raw copy of your Packer template +!> The Packer build service will receive the raw copy of your Packer template when you push. **If you have sensitive data in your Packer template, you should move that data into Packer variables or environment variables!** -For the `push` command to work, the [push configuration](/docs/templates/push.html) -must be completed within the template. +For the `push` command to work, the [push +configuration](/docs/templates/push.html) must be completed within the template. ## Options -* `-message` - A message to identify the purpose or changes in this Packer +- `-message` - A message to identify the purpose or changes in this Packer template much like a VCS commit message. This message will be passed to the Packer build service. This option is also available as a short option `-m`. -* `-token` - An access token for authenticating the push to the Packer build +- `-token` - An access token for authenticating the push to the Packer build service such as Atlas. This can also be specified within the push configuration in the template. -* `-name` - The name of the build in the service. This typically - looks like `hashicorp/precise64`. +- `-name` - The name of the build in the service. This typically looks like + `hashicorp/precise64`. ## Examples Push a Packer template: -```shell +``` {.shell} $ packer push -m "Updating the apache version" template.json ``` Push a Packer template with a custom token: -```shell +``` {.shell} $ packer push -token ABCD1234 template.json ``` diff --git a/website/source/docs/command-line/validate.html.markdown b/website/source/docs/command-line/validate.html.markdown index 530f00295..e17f23dc4 100644 --- a/website/source/docs/command-line/validate.html.markdown +++ b/website/source/docs/command-line/validate.html.markdown @@ -1,20 +1,24 @@ --- -layout: "docs" -page_title: "Validate - Command-Line" -description: |- - The `packer validate` Packer command is used to validate the syntax and configuration of a template. The command will return a zero exit status on success, and a non-zero exit status on failure. Additionally, if a template doesn't validate, any error messages will be outputted. ---- +description: | + The `packer validate` Packer command is used to validate the syntax and + configuration of a template. The command will return a zero exit status on + success, and a non-zero exit status on failure. Additionally, if a template + doesn't validate, any error messages will be outputted. +layout: docs +page_title: 'Validate - Command-Line' +... # Command-Line: Validate -The `packer validate` Packer command is used to validate the syntax and configuration -of a [template](/docs/templates/introduction.html). The command will return -a zero exit status on success, and a non-zero exit status on failure. Additionally, -if a template doesn't validate, any error messages will be outputted. +The `packer validate` Packer command is used to validate the syntax and +configuration of a [template](/docs/templates/introduction.html). The command +will return a zero exit status on success, and a non-zero exit status on +failure. Additionally, if a template doesn't validate, any error messages will +be outputted. Example usage: -```text +``` {.text} $ packer validate my-template.json Template validation failed. Errors are shown below. @@ -25,5 +29,5 @@ Errors validating build 'vmware'. 1 error(s) occurred: ## Options -* `-syntax-only` - Only the syntax of the template is checked. The configuration +- `-syntax-only` - Only the syntax of the template is checked. The configuration is not validated. diff --git a/website/source/docs/extend/builder.html.markdown b/website/source/docs/extend/builder.html.markdown index a841d5c3d..41a83bcef 100644 --- a/website/source/docs/extend/builder.html.markdown +++ b/website/source/docs/extend/builder.html.markdown @@ -1,167 +1,170 @@ --- -layout: "docs" -page_title: "Custom Builder - Extend Packer" -description: |- - Packer Builders are the components of Packer responsible for creating a machine, bringing it to a point where it can be provisioned, and then turning that provisioned machine into some sort of machine image. Several builders are officially distributed with Packer itself, such as the AMI builder, the VMware builder, etc. However, it is possible to write custom builders using the Packer plugin interface, and this page documents how to do that. ---- +description: | + Packer Builders are the components of Packer responsible for creating a machine, + bringing it to a point where it can be provisioned, and then turning that + provisioned machine into some sort of machine image. Several builders are + officially distributed with Packer itself, such as the AMI builder, the VMware + builder, etc. However, it is possible to write custom builders using the Packer + plugin interface, and this page documents how to do that. +layout: docs +page_title: 'Custom Builder - Extend Packer' +... # Custom Builder Development Packer Builders are the components of Packer responsible for creating a machine, -bringing it to a point where it can be provisioned, and then turning -that provisioned machine into some sort of machine image. Several builders -are officially distributed with Packer itself, such as the AMI builder, the -VMware builder, etc. However, it is possible to write custom builders using -the Packer plugin interface, and this page documents how to do that. +bringing it to a point where it can be provisioned, and then turning that +provisioned machine into some sort of machine image. Several builders are +officially distributed with Packer itself, such as the AMI builder, the VMware +builder, etc. However, it is possible to write custom builders using the Packer +plugin interface, and this page documents how to do that. -Prior to reading this page, it is assumed you have read the page on -[plugin development basics](/docs/extend/developing-plugins.html). +Prior to reading this page, it is assumed you have read the page on [plugin +development basics](/docs/extend/developing-plugins.html). -~> **Warning!** This is an advanced topic. If you're new to Packer, we +\~> **Warning!** This is an advanced topic. If you're new to Packer, we recommend getting a bit more comfortable before you dive into writing plugins. ## The Interface The interface that must be implemented for a builder is the `packer.Builder` -interface. It is reproduced below for easy reference. The actual interface -in the source code contains some basic documentation as well explaining -what each method should do. +interface. It is reproduced below for easy reference. The actual interface in +the source code contains some basic documentation as well explaining what each +method should do. -```go +``` {.go} type Builder interface { - Prepare(...interface{}) error - Run(ui Ui, hook Hook, cache Cache) (Artifact, error) - Cancel() + Prepare(...interface{}) error + Run(ui Ui, hook Hook, cache Cache) (Artifact, error) + Cancel() } ``` ### The "Prepare" Method -The `Prepare` method for each builder is called prior to any runs with -the configuration that was given in the template. This is passed in as -an array of `interface{}` types, but is generally `map[string]interface{}`. The prepare +The `Prepare` method for each builder is called prior to any runs with the +configuration that was given in the template. This is passed in as an array of +`interface{}` types, but is generally `map[string]interface{}`. The prepare method is responsible for translating this configuration into an internal structure, validating it, and returning any errors. For multiple parameters, they should be merged together into the final -configuration, with later parameters overwriting any previous configuration. -The exact semantics of the merge are left to the builder author. +configuration, with later parameters overwriting any previous configuration. The +exact semantics of the merge are left to the builder author. For decoding the `interface{}` into a meaningful structure, the -[mapstructure](https://github.com/mitchellh/mapstructure) library is recommended. -Mapstructure will take an `interface{}` and decode it into an arbitrarily -complex struct. If there are any errors, it generates very human friendly -errors that can be returned directly from the prepare method. +[mapstructure](https://github.com/mitchellh/mapstructure) library is +recommended. Mapstructure will take an `interface{}` and decode it into an +arbitrarily complex struct. If there are any errors, it generates very human +friendly errors that can be returned directly from the prepare method. -While it is not actively enforced, **no side effects** should occur from -running the `Prepare` method. Specifically, don't create files, don't launch -virtual machines, etc. Prepare's purpose is solely to configure the builder -and validate the configuration. +While it is not actively enforced, **no side effects** should occur from running +the `Prepare` method. Specifically, don't create files, don't launch virtual +machines, etc. Prepare's purpose is solely to configure the builder and validate +the configuration. -In addition to normal configuration, Packer will inject a `map[string]interface{}` -with a key of `packer.DebugConfigKey` set to boolean `true` if debug mode -is enabled for the build. If this is set to true, then the builder -should enable a debug mode which assists builder developers and advanced -users to introspect what is going on during a build. During debug -builds, parallelism is strictly disabled, so it is safe to request input -from stdin and so on. +In addition to normal configuration, Packer will inject a +`map[string]interface{}` with a key of `packer.DebugConfigKey` set to boolean +`true` if debug mode is enabled for the build. If this is set to true, then the +builder should enable a debug mode which assists builder developers and advanced +users to introspect what is going on during a build. During debug builds, +parallelism is strictly disabled, so it is safe to request input from stdin and +so on. ### The "Run" Method -`Run` is where all the interesting stuff happens. Run is executed, often -in parallel for multiple builders, to actually build the machine, provision -it, and create the resulting machine image, which is returned as an -implementation of the `packer.Artifact` interface. +`Run` is where all the interesting stuff happens. Run is executed, often in +parallel for multiple builders, to actually build the machine, provision it, and +create the resulting machine image, which is returned as an implementation of +the `packer.Artifact` interface. The `Run` method takes three parameters. These are all very useful. The -`packer.Ui` object is used to send output to the console. `packer.Hook` is -used to execute hooks, which are covered in more detail in the hook section -below. And `packer.Cache` is used to store files between multiple Packer -runs, and is covered in more detail in the cache section below. +`packer.Ui` object is used to send output to the console. `packer.Hook` is used +to execute hooks, which are covered in more detail in the hook section below. +And `packer.Cache` is used to store files between multiple Packer runs, and is +covered in more detail in the cache section below. Because builder runs are typically a complex set of many steps, the -[multistep](https://github.com/mitchellh/multistep) library is recommended -to bring order to the complexity. Multistep is a library which allows you to -separate your logic into multiple distinct "steps" and string them together. -It fully supports cancellation mid-step and so on. Please check it out, it is -how the built-in builders are all implemented. +[multistep](https://github.com/mitchellh/multistep) library is recommended to +bring order to the complexity. Multistep is a library which allows you to +separate your logic into multiple distinct "steps" and string them together. It +fully supports cancellation mid-step and so on. Please check it out, it is how +the built-in builders are all implemented. -Finally, as a result of `Run`, an implementation of `packer.Artifact` should -be returned. More details on creating a `packer.Artifact` are covered in the -artifact section below. If something goes wrong during the build, an error -can be returned, as well. Note that it is perfectly fine to produce no artifact -and no error, although this is rare. +Finally, as a result of `Run`, an implementation of `packer.Artifact` should be +returned. More details on creating a `packer.Artifact` are covered in the +artifact section below. If something goes wrong during the build, an error can +be returned, as well. Note that it is perfectly fine to produce no artifact and +no error, although this is rare. ### The "Cancel" Method -The `Run` method is often run in parallel. The `Cancel` method can be -called at any time and requests cancellation of any builder run in progress. -This method should block until the run actually stops. +The `Run` method is often run in parallel. The `Cancel` method can be called at +any time and requests cancellation of any builder run in progress. This method +should block until the run actually stops. -Cancels are most commonly triggered by external interrupts, such as the -user pressing `Ctrl-C`. Packer will only exit once all the builders clean up, -so it is important that you architect your builder in a way that it is quick -to respond to these cancellations and clean up after itself. +Cancels are most commonly triggered by external interrupts, such as the user +pressing `Ctrl-C`. Packer will only exit once all the builders clean up, so it +is important that you architect your builder in a way that it is quick to +respond to these cancellations and clean up after itself. ## Creating an Artifact The `Run` method is expected to return an implementation of the -`packer.Artifact` interface. Each builder must create their own -implementation. The interface is very simple and the documentation on the -interface is quite clear. +`packer.Artifact` interface. Each builder must create their own implementation. +The interface is very simple and the documentation on the interface is quite +clear. -The only part of an artifact that may be confusing is the `BuilderId` -method. This method must return an absolutely unique ID for the builder. -In general, I follow the practice of making the ID contain my GitHub username -and then the platform it is building for. For example, the builder ID of -the VMware builder is "mitchellh.vmware" or something similar. +The only part of an artifact that may be confusing is the `BuilderId` method. +This method must return an absolutely unique ID for the builder. In general, I +follow the practice of making the ID contain my GitHub username and then the +platform it is building for. For example, the builder ID of the VMware builder +is "mitchellh.vmware" or something similar. -Post-processors use the builder ID value in order to make some assumptions -about the artifact results, so it is important it never changes. +Post-processors use the builder ID value in order to make some assumptions about +the artifact results, so it is important it never changes. -Other than the builder ID, the rest should be self-explanatory by reading -the [packer.Artifact interface documentation](#). +Other than the builder ID, the rest should be self-explanatory by reading the +[packer.Artifact interface documentation](#). ## Provisioning Packer has built-in support for provisioning, but the moment when provisioning -runs must be invoked by the builder itself, since only the builder knows -when the machine is running and ready for communication. +runs must be invoked by the builder itself, since only the builder knows when +the machine is running and ready for communication. When the machine is ready to be provisioned, run the `packer.HookProvision` hook, making sure the communicator is not nil, since this is required for provisioners. An example of calling the hook is shown below: -```go +``` {.go} hook.Run(packer.HookProvision, ui, comm, nil) ``` -At this point, Packer will run the provisioners and no additional work -is necessary. +At this point, Packer will run the provisioners and no additional work is +necessary. --> **Note:** Hooks are still undergoing thought around their -general design and will likely change in a future version. They aren't -fully "baked" yet, so they aren't documented here other than to tell you -how to hook in provisioners. +-> **Note:** Hooks are still undergoing thought around their general design +and will likely change in a future version. They aren't fully "baked" yet, so +they aren't documented here other than to tell you how to hook in provisioners. ## Caching Files -It is common for some builders to deal with very large files, or files that -take a long time to generate. For example, the VMware builder has the capability -to download the operating system ISO from the internet. This is timely process, -so it would be convenient to cache the file. This sort of caching is a core -part of Packer that is exposed to builders. +It is common for some builders to deal with very large files, or files that take +a long time to generate. For example, the VMware builder has the capability to +download the operating system ISO from the internet. This is timely process, so +it would be convenient to cache the file. This sort of caching is a core part of +Packer that is exposed to builders. The cache interface is `packer.Cache`. It behaves much like a Go -[RWMutex](http://golang.org/pkg/sync/#RWMutex). The builder requests a "lock" -on certain cache keys, and is given exclusive access to that key for the -duration of the lock. This locking mechanism allows multiple builders to -share cache data even though they're running in parallel. +[RWMutex](http://golang.org/pkg/sync/#RWMutex). The builder requests a "lock" on +certain cache keys, and is given exclusive access to that key for the duration +of the lock. This locking mechanism allows multiple builders to share cache data +even though they're running in parallel. For example, both the VMware and VirtualBox builders support downloading an operating system ISO from the internet. Most of the time, this ISO is identical. The locking mechanisms of the cache allow one of the builders to download it only once, but allow both builders to share the downloaded file. -The [documentation for packer.Cache](#) is -very detailed in how it works. +The [documentation for packer.Cache](#) is very detailed in how it works. diff --git a/website/source/docs/extend/command.html.markdown b/website/source/docs/extend/command.html.markdown index 1a4625c9b..2d611ebf4 100644 --- a/website/source/docs/extend/command.html.markdown +++ b/website/source/docs/extend/command.html.markdown @@ -1,57 +1,57 @@ --- -layout: "docs" -page_title: "Custom Command Development" -description: |- - Packer Commands are the components of Packer that add functionality to the `packer` application. Packer comes with a set of commands out of the box, such as `build`. Commands are invoked as `packer `. Custom commands allow you to add new commands to Packer to perhaps perform new functionality. ---- +description: | + Packer Commands are the components of Packer that add functionality to the + `packer` application. Packer comes with a set of commands out of the box, such + as `build`. Commands are invoked as `packer `. Custom commands allow + you to add new commands to Packer to perhaps perform new functionality. +layout: docs +page_title: Custom Command Development +... # Custom Command Development Packer Commands are the components of Packer that add functionality to the -`packer` application. Packer comes with a set of commands out of the -box, such as `build`. Commands are invoked as `packer `. -Custom commands allow you to add new commands to Packer to perhaps -perform new functionality. +`packer` application. Packer comes with a set of commands out of the box, such +as `build`. Commands are invoked as `packer `. Custom commands allow +you to add new commands to Packer to perhaps perform new functionality. -Prior to reading this page, it is assumed you have read the page on -[plugin development basics](/docs/extend/developing-plugins.html). +Prior to reading this page, it is assumed you have read the page on [plugin +development basics](/docs/extend/developing-plugins.html). -Command plugins implement the `packer.Command` interface and are served -using the `plugin.ServeCommand` function. Commands actually have no control -over what keyword invokes the command with the `packer` binary. The keyword -to invoke the command depends on how the plugin is installed and configured -in the core Packer configuration. +Command plugins implement the `packer.Command` interface and are served using +the `plugin.ServeCommand` function. Commands actually have no control over what +keyword invokes the command with the `packer` binary. The keyword to invoke the +command depends on how the plugin is installed and configured in the core Packer +configuration. -~> **Warning!** This is an advanced topic. If you're new to Packer, we +\~> **Warning!** This is an advanced topic. If you're new to Packer, we recommend getting a bit more comfortable before you dive into writing plugins. ## The Interface The interface that must be implemented for a command is the `packer.Command` -interface. It is reproduced below for easy reference. The actual interface -in the source code contains some basic documentation as well explaining -what each method should do. +interface. It is reproduced below for easy reference. The actual interface in +the source code contains some basic documentation as well explaining what each +method should do. -```go +``` {.go} type Command interface { - Help() string - Run(env Environment, args []string) int - Synopsis() string + Help() string + Run(env Environment, args []string) int + Synopsis() string } ``` ### The "Help" Method -The `Help` method returns long-form help. This help is most commonly -shown when a command is invoked with the `--help` or `-h` option. -The help should document all the available command line flags, purpose -of the command, etc. +The `Help` method returns long-form help. This help is most commonly shown when +a command is invoked with the `--help` or `-h` option. The help should document +all the available command line flags, purpose of the command, etc. -Packer commands generally follow the following format for help, but -it is not required. You're allowed to make the help look like anything -you please. +Packer commands generally follow the following format for help, but it is not +required. You're allowed to make the help look like anything you please. -```text +``` {.text} Usage: packer COMMAND [options] ARGS... Brief one or two sentence about the function of the command. @@ -64,23 +64,23 @@ Options: ### The "Run" Method -`Run` is what is called when the command is actually invoked. It is given -the `packer.Environment`, which has access to almost all components of -the current Packer run, such as UI, builders, other plugins, etc. In addition -to the environment, the remaining command line args are given. These command -line args have already been stripped of the command name, so they can be -passed directly into something like the standard Go `flag` package for -command-line flag parsing. +`Run` is what is called when the command is actually invoked. It is given the +`packer.Environment`, which has access to almost all components of the current +Packer run, such as UI, builders, other plugins, etc. In addition to the +environment, the remaining command line args are given. These command line args +have already been stripped of the command name, so they can be passed directly +into something like the standard Go `flag` package for command-line flag +parsing. -The return value of `Run` is the exit status for the command. If everything -ran successfully, this should be 0. If any errors occurred, it should be any +The return value of `Run` is the exit status for the command. If everything ran +successfully, this should be 0. If any errors occurred, it should be any positive integer. ### The "Synopsis" Method -The `Synopsis` method should return a short single-line description -of what the command does. This is used when `packer` is invoked on its own -in order to show a brief summary of the commands that Packer supports. +The `Synopsis` method should return a short single-line description of what the +command does. This is used when `packer` is invoked on its own in order to show +a brief summary of the commands that Packer supports. -The synopsis should be no longer than around 50 characters, since it is -already appearing on a line with other text. +The synopsis should be no longer than around 50 characters, since it is already +appearing on a line with other text. diff --git a/website/source/docs/extend/developing-plugins.html.markdown b/website/source/docs/extend/developing-plugins.html.markdown index 4e38c27a3..2ccdd437f 100644 --- a/website/source/docs/extend/developing-plugins.html.markdown +++ b/website/source/docs/extend/developing-plugins.html.markdown @@ -1,73 +1,75 @@ --- -layout: "docs" -page_title: "Developing Plugins" -description: |- - This page will document how you can develop your own Packer plugins. Prior to reading this, it is assumed that you're comfortable with Packer and also know the basics of how Plugins work, from a user standpoint. ---- +description: | + This page will document how you can develop your own Packer plugins. Prior to + reading this, it is assumed that you're comfortable with Packer and also know + the basics of how Plugins work, from a user standpoint. +layout: docs +page_title: Developing Plugins +... # Developing Plugins -This page will document how you can develop your own Packer plugins. -Prior to reading this, it is assumed that you're comfortable with Packer -and also know the [basics of how Plugins work](/docs/extend/plugins.html), -from a user standpoint. +This page will document how you can develop your own Packer plugins. Prior to +reading this, it is assumed that you're comfortable with Packer and also know +the [basics of how Plugins work](/docs/extend/plugins.html), from a user +standpoint. Packer plugins must be written in [Go](http://golang.org/), so it is also -assumed that you're familiar with the language. This page will not be a -Go language tutorial. Thankfully, if you are familiar with Go, the Go toolchain +assumed that you're familiar with the language. This page will not be a Go +language tutorial. Thankfully, if you are familiar with Go, the Go toolchain makes it extremely easy to develop Packer plugins. -~> **Warning!** This is an advanced topic. If you're new to Packer, we +\~> **Warning!** This is an advanced topic. If you're new to Packer, we recommend getting a bit more comfortable before you dive into writing plugins. ## Plugin System Architecture Packer has a fairly unique plugin architecture. Instead of loading plugins -directly into a running application, Packer runs each plugin as a -_separate application_. Inter-process communication and RPC is then used -to communicate between the many running Packer processes. Packer core -itself is responsible for orchestrating the processes and handles cleanup. +directly into a running application, Packer runs each plugin as a *separate +application*. Inter-process communication and RPC is then used to communicate +between the many running Packer processes. Packer core itself is responsible for +orchestrating the processes and handles cleanup. The beauty of this is that your plugin can have any dependencies it wants. Dependencies don't need to line up with what Packer core or any other plugin -uses, because they're completely isolated into the process space of the -plugin itself. +uses, because they're completely isolated into the process space of the plugin +itself. -And, thanks to Go's [interfaces](http://golang.org/doc/effective_go.html#interfaces_and_types), -it doesn't even look like inter-process communication is occurring. You just -use the interfaces like normal, but in fact they're being executed in -a remote process. Pretty cool. +And, thanks to Go's +[interfaces](http://golang.org/doc/effective_go.html#interfaces_and_types), it +doesn't even look like inter-process communication is occurring. You just use +the interfaces like normal, but in fact they're being executed in a remote +process. Pretty cool. ## Plugin Development Basics -Developing a plugin is quite simple. All the various kinds of plugins -have a corresponding interface. The plugin simply needs to implement -this interface and expose it using the Packer plugin package (covered here shortly), -and that's it! +Developing a plugin is quite simple. All the various kinds of plugins have a +corresponding interface. The plugin simply needs to implement this interface and +expose it using the Packer plugin package (covered here shortly), and that's it! -There are two packages that really matter that every plugin must use. -Other than the following two packages, you're encouraged to use whatever -packages you want. Because plugins are their own processes, there is -no danger of colliding dependencies. +There are two packages that really matter that every plugin must use. Other than +the following two packages, you're encouraged to use whatever packages you want. +Because plugins are their own processes, there is no danger of colliding +dependencies. -* `github.com/mitchellh/packer` - Contains all the interfaces that you - have to implement for any given plugin. +- `github.com/mitchellh/packer` - Contains all the interfaces that you have to + implement for any given plugin. -* `github.com/mitchellh/packer/plugin` - Contains the code to serve the - plugin. This handles all the inter-process communication stuff. +- `github.com/mitchellh/packer/plugin` - Contains the code to serve the plugin. + This handles all the inter-process communication stuff. There are two steps involved in creating a plugin: -1. Implement the desired interface. For example, if you're building a - builder plugin, implement the `packer.Builder` interface. +1. Implement the desired interface. For example, if you're building a builder + plugin, implement the `packer.Builder` interface. -2. Serve the interface by calling the appropriate plugin serving method - in your main method. In the case of a builder, this is `plugin.ServeBuilder`. +2. Serve the interface by calling the appropriate plugin serving method in your + main method. In the case of a builder, this is `plugin.ServeBuilder`. A basic example is shown below. In this example, assume the `Builder` struct implements the `packer.Builder` interface: -```go +``` {.go} import ( "github.com/mitchellh/packer/plugin" ) @@ -76,40 +78,38 @@ import ( type Builder struct{} func main() { - plugin.ServeBuilder(new(Builder)) + plugin.ServeBuilder(new(Builder)) } ``` **That's it!** `plugin.ServeBuilder` handles all the nitty gritty of -communicating with Packer core and serving your builder over RPC. It -can't get much easier than that. +communicating with Packer core and serving your builder over RPC. It can't get +much easier than that. -Next, just build your plugin like a normal Go application, using `go build` -or however you please. The resulting binary is the plugin that can be -installed using standard installation procedures. +Next, just build your plugin like a normal Go application, using `go build` or +however you please. The resulting binary is the plugin that can be installed +using standard installation procedures. -The specifics of how to implement each type of interface are covered -in the relevant subsections available in the navigation to the left. +The specifics of how to implement each type of interface are covered in the +relevant subsections available in the navigation to the left. -~> **Lock your dependencies!** Unfortunately, Go's dependency -management story is fairly sad. There are various unofficial methods out -there for locking dependencies, and using one of them is highly recommended -since the Packer codebase will continue to improve, potentially breaking -APIs along the way until there is a stable release. By locking your dependencies, -your plugins will continue to work with the version of Packer you lock to. +\~> **Lock your dependencies!** Unfortunately, Go's dependency management +story is fairly sad. There are various unofficial methods out there for locking +dependencies, and using one of them is highly recommended since the Packer +codebase will continue to improve, potentially breaking APIs along the way until +there is a stable release. By locking your dependencies, your plugins will +continue to work with the version of Packer you lock to. ## Logging and Debugging -Plugins can use the standard Go `log` package to log. Anything logged -using this will be available in the Packer log files automatically. -The Packer log is visible on stderr when the `PACKER_LOG` environmental -is set. +Plugins can use the standard Go `log` package to log. Anything logged using this +will be available in the Packer log files automatically. The Packer log is +visible on stderr when the `PACKER_LOG` environmental is set. -Packer will prefix any logs from plugins with the path to that plugin -to make it identifiable where the logs come from. Some example logs are -shown below: +Packer will prefix any logs from plugins with the path to that plugin to make it +identifiable where the logs come from. Some example logs are shown below: -```text +``` {.text} 2013/06/10 21:44:43 ui: Available commands are: 2013/06/10 21:44:43 Loading command: build 2013/06/10 21:44:43 packer-command-build: 2013/06/10 21:44:43 Plugin minimum port: 10000 @@ -117,31 +117,31 @@ shown below: 2013/06/10 21:44:43 packer-command-build: 2013/06/10 21:44:43 Plugin address: :10000 ``` -As you can see, the log messages from the "build" command plugin are -prefixed with "packer-command-build". Log output is _extremely_ helpful -in debugging issues and you're encouraged to be as verbose as you need to -be in order for the logs to be helpful. +As you can see, the log messages from the "build" command plugin are prefixed +with "packer-command-build". Log output is *extremely* helpful in debugging +issues and you're encouraged to be as verbose as you need to be in order for the +logs to be helpful. ## Plugin Development Tips -Here are some tips for developing plugins, often answering common questions -or concerns. +Here are some tips for developing plugins, often answering common questions or +concerns. ### Naming Conventions -It is standard practice to name the resulting plugin application -in the format of `packer-TYPE-NAME`. For example, if you're building a -new builder for CustomCloud, it would be standard practice to name the -resulting plugin `packer-builder-custom-cloud`. This naming convention -helps users identify the purpose of a plugin. +It is standard practice to name the resulting plugin application in the format +of `packer-TYPE-NAME`. For example, if you're building a new builder for +CustomCloud, it would be standard practice to name the resulting plugin +`packer-builder-custom-cloud`. This naming convention helps users identify the +purpose of a plugin. ### Testing Plugins -While developing plugins, you can configure your Packer configuration -to point directly to the compiled plugin in order to test it. For example, -building the CustomCloud plugin, I may configure packer like so: +While developing plugins, you can configure your Packer configuration to point +directly to the compiled plugin in order to test it. For example, building the +CustomCloud plugin, I may configure packer like so: -```javascript +``` {.javascript} { "builders": { "custom-cloud": "/an/absolute/path/to/packer-builder-custom-cloud" @@ -149,13 +149,13 @@ building the CustomCloud plugin, I may configure packer like so: } ``` -This would configure Packer to have the "custom-cloud" plugin, and execute -the binary that I am building during development. This is extremely useful -during development. +This would configure Packer to have the "custom-cloud" plugin, and execute the +binary that I am building during development. This is extremely useful during +development. ### Distributing Plugins -It is recommended you use a tool like [goxc](https://github.com/laher/goxc) -in order to cross-compile your plugin for every platform that Packer supports, -since Go applications are platform-specific. goxc will allow you to build -for every platform from your own computer. +It is recommended you use a tool like [goxc](https://github.com/laher/goxc) in +order to cross-compile your plugin for every platform that Packer supports, +since Go applications are platform-specific. goxc will allow you to build for +every platform from your own computer. diff --git a/website/source/docs/extend/plugins.html.markdown b/website/source/docs/extend/plugins.html.markdown index c257fb702..f8b800a30 100644 --- a/website/source/docs/extend/plugins.html.markdown +++ b/website/source/docs/extend/plugins.html.markdown @@ -1,68 +1,71 @@ --- -layout: "docs" -page_title: "Packer Plugins - Extend Packer" -description: |- - Packer Plugins allow new functionality to be added to Packer without modifying the core source code. Packer plugins are able to add new commands, builders, provisioners, hooks, and more. In fact, much of Packer itself is implemented by writing plugins that are simply distributed with Packer. For example, all the commands, builders, provisioners, and more that ship with Packer are implemented as Plugins that are simply hardcoded to load with Packer. ---- +description: | + Packer Plugins allow new functionality to be added to Packer without modifying + the core source code. Packer plugins are able to add new commands, builders, + provisioners, hooks, and more. In fact, much of Packer itself is implemented by + writing plugins that are simply distributed with Packer. For example, all the + commands, builders, provisioners, and more that ship with Packer are implemented + as Plugins that are simply hardcoded to load with Packer. +layout: docs +page_title: 'Packer Plugins - Extend Packer' +... # Packer Plugins -Packer Plugins allow new functionality to be added to Packer without -modifying the core source code. Packer plugins are able to add new -commands, builders, provisioners, hooks, and more. In fact, much of Packer -itself is implemented by writing plugins that are simply distributed with -Packer. For example, all the commands, builders, provisioners, and more -that ship with Packer are implemented as Plugins that are simply hardcoded -to load with Packer. +Packer Plugins allow new functionality to be added to Packer without modifying +the core source code. Packer plugins are able to add new commands, builders, +provisioners, hooks, and more. In fact, much of Packer itself is implemented by +writing plugins that are simply distributed with Packer. For example, all the +commands, builders, provisioners, and more that ship with Packer are implemented +as Plugins that are simply hardcoded to load with Packer. -This page will cover how to install and use plugins. If you're interested -in developing plugins, the documentation for that is available the -[developing plugins](/docs/extend/developing-plugins.html) page. +This page will cover how to install and use plugins. If you're interested in +developing plugins, the documentation for that is available the [developing +plugins](/docs/extend/developing-plugins.html) page. -Because Packer is so young, there is no official listing of available -Packer plugins. Plugins are best found via Google. Typically, searching -"packer plugin _x_" will find what you're looking for if it exists. As -Packer gets older, an official plugin directory is planned. +Because Packer is so young, there is no official listing of available Packer +plugins. Plugins are best found via Google. Typically, searching "packer plugin +*x*" will find what you're looking for if it exists. As Packer gets older, an +official plugin directory is planned. ## How Plugins Work -Packer plugins are completely separate, standalone applications that the -core of Packer starts and communicates with. +Packer plugins are completely separate, standalone applications that the core of +Packer starts and communicates with. -These plugin applications aren't meant to be run manually. Instead, Packer core executes -these plugin applications in a certain way and communicates with them. +These plugin applications aren't meant to be run manually. Instead, Packer core +executes these plugin applications in a certain way and communicates with them. For example, the VMware builder is actually a standalone binary named -`packer-builder-vmware`. The next time you run a Packer build, look at -your process list and you should see a handful of `packer-` prefixed -applications running. +`packer-builder-vmware`. The next time you run a Packer build, look at your +process list and you should see a handful of `packer-` prefixed applications +running. ## Installing Plugins -The easiest way to install a plugin is to name it correctly, then place -it in the proper directory. To name a plugin correctly, make sure the -binary is named `packer-TYPE-NAME`. For example, `packer-builder-amazon-ebs` -for a "builder" type plugin named "amazon-ebs". Valid types for plugins -are down this page more. +The easiest way to install a plugin is to name it correctly, then place it in +the proper directory. To name a plugin correctly, make sure the binary is named +`packer-TYPE-NAME`. For example, `packer-builder-amazon-ebs` for a "builder" +type plugin named "amazon-ebs". Valid types for plugins are down this page more. -Once the plugin is named properly, Packer automatically discovers plugins -in the following directories in the given order. If a conflicting plugin is -found later, it will take precedence over one found earlier. +Once the plugin is named properly, Packer automatically discovers plugins in the +following directories in the given order. If a conflicting plugin is found +later, it will take precedence over one found earlier. -1. The directory where `packer` is, or the executable directory. +1. The directory where `packer` is, or the executable directory. -2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` on - Windows. +2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` + on Windows. -3. The current working directory. +3. The current working directory. The valid types for plugins are: -* `builder` - Plugins responsible for building images for a specific platform. +- `builder` - Plugins responsible for building images for a specific platform. -* `command` - A CLI sub-command for `packer`. +- `command` - A CLI sub-command for `packer`. -* `post-processor` - A post-processor responsible for taking an artifact - from a builder and turning it into something else. +- `post-processor` - A post-processor responsible for taking an artifact from a + builder and turning it into something else. -* `provisioner` - A provisioner to install software on images created by - a builder. +- `provisioner` - A provisioner to install software on images created by + a builder. diff --git a/website/source/docs/extend/post-processor.html.markdown b/website/source/docs/extend/post-processor.html.markdown index 204cc593b..1120bc31d 100644 --- a/website/source/docs/extend/post-processor.html.markdown +++ b/website/source/docs/extend/post-processor.html.markdown @@ -1,92 +1,89 @@ --- -layout: "docs" -page_title: "Custom Post-Processor Development" -description: |- - Packer Post-processors are the components of Packer that transform one artifact into another, for example by compressing files, or uploading them. ---- +description: | + Packer Post-processors are the components of Packer that transform one artifact + into another, for example by compressing files, or uploading them. +layout: docs +page_title: 'Custom Post-Processor Development' +... # Custom Post-Processor Development Packer Post-processors are the components of Packer that transform one artifact into another, for example by compressing files, or uploading them. -In the compression example, the transformation would be taking an artifact -with a set of files, compressing those files, and returning a new -artifact with only a single file (the compressed archive). For the -upload example, the transformation would be taking an artifact with -some set of files, uploading those files, and returning an artifact -with a single ID: the URL of the upload. +In the compression example, the transformation would be taking an artifact with +a set of files, compressing those files, and returning a new artifact with only +a single file (the compressed archive). For the upload example, the +transformation would be taking an artifact with some set of files, uploading +those files, and returning an artifact with a single ID: the URL of the upload. -Prior to reading this page, it is assumed you have read the page on -[plugin development basics](/docs/extend/developing-plugins.html). +Prior to reading this page, it is assumed you have read the page on [plugin +development basics](/docs/extend/developing-plugins.html). -Post-processor plugins implement the `packer.PostProcessor` interface and -are served using the `plugin.ServePostProcessor` function. +Post-processor plugins implement the `packer.PostProcessor` interface and are +served using the `plugin.ServePostProcessor` function. -~> **Warning!** This is an advanced topic. If you're new to Packer, we +\~> **Warning!** This is an advanced topic. If you're new to Packer, we recommend getting a bit more comfortable before you dive into writing plugins. - ## The Interface The interface that must be implemented for a post-processor is the -`packer.PostProcessor` interface. It is reproduced below for easy reference. -The actual interface in the source code contains some basic documentation as well explaining -what each method should do. +`packer.PostProcessor` interface. It is reproduced below for easy reference. The +actual interface in the source code contains some basic documentation as well +explaining what each method should do. -```go +``` {.go} type PostProcessor interface { - Configure(interface{}) error - PostProcess(Ui, Artifact) (a Artifact, keep bool, err error) + Configure(interface{}) error + PostProcess(Ui, Artifact) (a Artifact, keep bool, err error) } ``` ### The "Configure" Method -The `Configure` method for each post-processor is called early in the -build process to configure the post-processor. The configuration is passed -in as a raw `interface{}`. The configure method is responsible for translating -this configuration into an internal structure, validating it, and returning -any errors. +The `Configure` method for each post-processor is called early in the build +process to configure the post-processor. The configuration is passed in as a raw +`interface{}`. The configure method is responsible for translating this +configuration into an internal structure, validating it, and returning any +errors. For decoding the `interface{}` into a meaningful structure, the [mapstructure](https://github.com/mitchellh/mapstructure) library is recommended. Mapstructure will take an `interface{}` and decode it into an arbitrarily complex struct. If there are any errors, it generates very -human-friendly errors that can be returned directly from the configure -method. +human-friendly errors that can be returned directly from the configure method. -While it is not actively enforced, **no side effects** should occur from -running the `Configure` method. Specifically, don't create files, don't -create network connections, etc. Configure's purpose is solely to setup -internal state and validate the configuration as much as possible. +While it is not actively enforced, **no side effects** should occur from running +the `Configure` method. Specifically, don't create files, don't create network +connections, etc. Configure's purpose is solely to setup internal state and +validate the configuration as much as possible. -`Configure` being run is not an indication that `PostProcess` will ever -run. For example, `packer validate` will run `Configure` to verify the -configuration validates, but will never actually run the build. +`Configure` being run is not an indication that `PostProcess` will ever run. For +example, `packer validate` will run `Configure` to verify the configuration +validates, but will never actually run the build. ### The "PostProcess" Method -The `PostProcess` method is where the real work goes. PostProcess is -responsible for taking one `packer.Artifact` implementation, and transforming -it into another. +The `PostProcess` method is where the real work goes. PostProcess is responsible +for taking one `packer.Artifact` implementation, and transforming it into +another. When we say "transform," we don't mean actually modifying the existing -`packer.Artifact` value itself. We mean taking the contents of the artifact -and creating a new artifact from that. For example, if we were creating -a "compress" post-processor that is responsible for compressing files, -the transformation would be taking the `Files()` from the original artifact, -compressing them, and creating a new artifact with a single file: the -compressed archive. +`packer.Artifact` value itself. We mean taking the contents of the artifact and +creating a new artifact from that. For example, if we were creating a "compress" +post-processor that is responsible for compressing files, the transformation +would be taking the `Files()` from the original artifact, compressing them, and +creating a new artifact with a single file: the compressed archive. -The result signature of this method is `(Artifact, bool, error)`. Each -return value is explained below: +The result signature of this method is `(Artifact, bool, error)`. Each return +value is explained below: -* `Artifact` - The newly created artifact if no errors occurred. -* `bool` - If true, the input artifact will forcefully be kept. By default, +- `Artifact` - The newly created artifact if no errors occurred. +- `bool` - If true, the input artifact will forcefully be kept. By default, Packer typically deletes all input artifacts, since the user doesn't generally want intermediary artifacts. However, some post-processors depend on the previous artifact existing. If this is `true`, it forces packer to keep the artifact around. -* `error` - Non-nil if there was an error in any way. If this is the case, - the other two return values are ignored. +- `error` - Non-nil if there was an error in any way. If this is the case, the + other two return values are ignored. diff --git a/website/source/docs/extend/provisioner.html.markdown b/website/source/docs/extend/provisioner.html.markdown index cb73cccd2..a06940dac 100644 --- a/website/source/docs/extend/provisioner.html.markdown +++ b/website/source/docs/extend/provisioner.html.markdown @@ -1,90 +1,95 @@ --- -layout: "docs" -page_title: "Custom Provisioner Development" -description: |- - Packer Provisioners are the components of Packer that install and configure software into a running machine prior to turning that machine into an image. An example of a provisioner is the shell provisioner, which runs shell scripts within the machines. ---- +description: | + Packer Provisioners are the components of Packer that install and configure + software into a running machine prior to turning that machine into an image. An + example of a provisioner is the shell provisioner, which runs shell scripts + within the machines. +layout: docs +page_title: Custom Provisioner Development +... # Custom Provisioner Development Packer Provisioners are the components of Packer that install and configure -software into a running machine prior to turning that machine into an -image. An example of a provisioner is the [shell provisioner](/docs/provisioners/shell.html), -which runs shell scripts within the machines. +software into a running machine prior to turning that machine into an image. An +example of a provisioner is the [shell +provisioner](/docs/provisioners/shell.html), which runs shell scripts within the +machines. -Prior to reading this page, it is assumed you have read the page on -[plugin development basics](/docs/extend/developing-plugins.html). +Prior to reading this page, it is assumed you have read the page on [plugin +development basics](/docs/extend/developing-plugins.html). -Provisioner plugins implement the `packer.Provisioner` interface and -are served using the `plugin.ServeProvisioner` function. +Provisioner plugins implement the `packer.Provisioner` interface and are served +using the `plugin.ServeProvisioner` function. -~> **Warning!** This is an advanced topic. If you're new to Packer, we +\~> **Warning!** This is an advanced topic. If you're new to Packer, we recommend getting a bit more comfortable before you dive into writing plugins. ## The Interface The interface that must be implemented for a provisioner is the -`packer.Provisioner` interface. It is reproduced below for easy reference. -The actual interface in the source code contains some basic documentation as well explaining -what each method should do. +`packer.Provisioner` interface. It is reproduced below for easy reference. The +actual interface in the source code contains some basic documentation as well +explaining what each method should do. -```go +``` {.go} type Provisioner interface { - Prepare(...interface{}) error - Provision(Ui, Communicator) error + Prepare(...interface{}) error + Provision(Ui, Communicator) error } ``` ### The "Prepare" Method -The `Prepare` method for each provisioner is called prior to any runs with -the configuration that was given in the template. This is passed in as -an array of `interface{}` types, but is generally `map[string]interface{}`. The prepare +The `Prepare` method for each provisioner is called prior to any runs with the +configuration that was given in the template. This is passed in as an array of +`interface{}` types, but is generally `map[string]interface{}`. The prepare method is responsible for translating this configuration into an internal structure, validating it, and returning any errors. For multiple parameters, they should be merged together into the final -configuration, with later parameters overwriting any previous configuration. -The exact semantics of the merge are left to the builder author. +configuration, with later parameters overwriting any previous configuration. The +exact semantics of the merge are left to the builder author. For decoding the `interface{}` into a meaningful structure, the -[mapstructure](https://github.com/mitchellh/mapstructure) library is recommended. -Mapstructure will take an `interface{}` and decode it into an arbitrarily -complex struct. If there are any errors, it generates very human friendly -errors that can be returned directly from the prepare method. +[mapstructure](https://github.com/mitchellh/mapstructure) library is +recommended. Mapstructure will take an `interface{}` and decode it into an +arbitrarily complex struct. If there are any errors, it generates very human +friendly errors that can be returned directly from the prepare method. -While it is not actively enforced, **no side effects** should occur from -running the `Prepare` method. Specifically, don't create files, don't launch -virtual machines, etc. Prepare's purpose is solely to configure the builder -and validate the configuration. +While it is not actively enforced, **no side effects** should occur from running +the `Prepare` method. Specifically, don't create files, don't launch virtual +machines, etc. Prepare's purpose is solely to configure the builder and validate +the configuration. -The `Prepare` method is called very early in the build process so that -errors may be displayed to the user before anything actually happens. +The `Prepare` method is called very early in the build process so that errors +may be displayed to the user before anything actually happens. ### The "Provision" Method -The `Provision` method is called when a machine is running and ready -to be provisioned. The provisioner should do its real work here. +The `Provision` method is called when a machine is running and ready to be +provisioned. The provisioner should do its real work here. -The method takes two parameters: a `packer.Ui` and a `packer.Communicator`. -The UI can be used to communicate with the user what is going on. The -communicator is used to communicate with the running machine, and is -guaranteed to be connected at this point. +The method takes two parameters: a `packer.Ui` and a `packer.Communicator`. The +UI can be used to communicate with the user what is going on. The communicator +is used to communicate with the running machine, and is guaranteed to be +connected at this point. The provision method should not return until provisioning is complete. ## Using the Communicator -The `packer.Communicator` parameter and interface is used to communicate -with running machine. The machine may be local (in a virtual machine or -container of some sort) or it may be remote (in a cloud). The communicator -interface abstracts this away so that communication is the same overall. +The `packer.Communicator` parameter and interface is used to communicate with +running machine. The machine may be local (in a virtual machine or container of +some sort) or it may be remote (in a cloud). The communicator interface +abstracts this away so that communication is the same overall. -The documentation around the [code itself](https://github.com/mitchellh/packer/blob/master/packer/communicator.go) -is really great as an overview of how to use the interface. You should begin -by reading this. Once you have read it, you can see some example usage below: +The documentation around the [code +itself](https://github.com/mitchellh/packer/blob/master/packer/communicator.go) +is really great as an overview of how to use the interface. You should begin by +reading this. Once you have read it, you can see some example usage below: -```go +``` {.go} // Build the remote command. var cmd packer.RemoteCmd cmd.Command = "echo foo" diff --git a/website/source/docs/index.html.markdown b/website/source/docs/index.html.markdown index 5894d17db..cf924d688 100644 --- a/website/source/docs/index.html.markdown +++ b/website/source/docs/index.html.markdown @@ -1,13 +1,16 @@ --- -layout: "docs" -page_title: "Packer Documentation" -description: |- - Welcome to the Packer documentation! This documentation is more of a reference guide for all available features and options in Packer. If you're just getting started with Packer, please start with the introduction and getting started guide instead. ---- +description: | + Welcome to the Packer documentation! This documentation is more of a reference + guide for all available features and options in Packer. If you're just getting + started with Packer, please start with the introduction and getting started + guide instead. +layout: docs +page_title: Packer Documentation +... # Packer Documentation Welcome to the Packer documentation! This documentation is more of a reference guide for all available features and options in Packer. If you're just getting -started with Packer, please start with the -[introduction and getting started guide](/intro) instead. +started with Packer, please start with the [introduction and getting started +guide](/intro) instead. diff --git a/website/source/docs/installation.html.markdown b/website/source/docs/installation.html.markdown index b24078729..35af3ed93 100644 --- a/website/source/docs/installation.html.markdown +++ b/website/source/docs/installation.html.markdown @@ -1,44 +1,48 @@ --- -layout: "docs" -page_title: "Install Packer" -description: |- - Packer must first be installed on the machine you want to run it on. To make installation easy, Packer is distributed as a binary package for all supported platforms and architectures. This page will not cover how to compile Packer from source, as that is covered in the README and is only recommended for advanced users. ---- +description: | + Packer must first be installed on the machine you want to run it on. To make + installation easy, Packer is distributed as a binary package for all supported + platforms and architectures. This page will not cover how to compile Packer from + source, as that is covered in the README and is only recommended for advanced + users. +layout: docs +page_title: Install Packer +... # Install Packer -Packer must first be installed on the machine you want to run it on. -To make installation easy, Packer is distributed as a [binary package](/downloads.html) -for all supported platforms and architectures. This page will not cover how -to compile Packer from source, as that is covered in the +Packer must first be installed on the machine you want to run it on. To make +installation easy, Packer is distributed as a [binary package](/downloads.html) +for all supported platforms and architectures. This page will not cover how to +compile Packer from source, as that is covered in the [README](https://github.com/mitchellh/packer/blob/master/README.md) and is only recommended for advanced users. ## Installing Packer -To install packer, first find the [appropriate package](/downloads.html) -for your system and download it. Packer is packaged as a "zip" file. +To install packer, first find the [appropriate package](/downloads.html) for +your system and download it. Packer is packaged as a "zip" file. Next, unzip the downloaded package into a directory where Packer will be installed. On Unix systems, `~/packer` or `/usr/local/packer` is generally good, -depending on whether you want to restrict the install to just your user -or install it system-wide. On Windows systems, you can put it wherever you'd -like. +depending on whether you want to restrict the install to just your user or +install it system-wide. On Windows systems, you can put it wherever you'd like. After unzipping the package, the directory should contain a set of binary -programs, such as `packer`, `packer-build-amazon-ebs`, etc. The final step -to installation is to make sure the directory you installed Packer to -is on the PATH. See [this page](http://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux) -for instructions on setting the PATH on Linux and Mac. -[This page](http://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) +programs, such as `packer`, `packer-build-amazon-ebs`, etc. The final step to +installation is to make sure the directory you installed Packer to is on the +PATH. See [this +page](http://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux) +for instructions on setting the PATH on Linux and Mac. [This +page](http://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) contains instructions for setting the PATH on Windows. ## Verifying the Installation -After installing Packer, verify the installation worked by opening -a new command prompt or console, and checking that `packer` is available: +After installing Packer, verify the installation worked by opening a new command +prompt or console, and checking that `packer` is available: -```text +``` {.text} $ packer usage: packer [--version] [--help] [] @@ -50,8 +54,8 @@ Available commands are: ``` If you get an error that `packer` could not be found, then your PATH -environmental variable was not setup properly. Please go back and ensure -that your PATH variable contains the directory which has Packer installed. +environmental variable was not setup properly. Please go back and ensure that +your PATH variable contains the directory which has Packer installed. Otherwise, Packer is installed and you're ready to go! @@ -59,24 +63,24 @@ Otherwise, Packer is installed and you're ready to go! Installation from binary packages is currently the only officially supported installation method. The binary packages are guaranteed to be the latest -available version and match the proper checksums. However, in addition to -the official binaries, there are other unofficial 3rd party methods of -installation managed by the Packer community: +available version and match the proper checksums. However, in addition to the +official binaries, there are other unofficial 3rd party methods of installation +managed by the Packer community: ### Homebrew If you're using OS X and [Homebrew](http://brew.sh), you can install Packer: -```text +``` {.text} $ brew install packer ``` ### Chocolatey -If you're using Windows and [Chocolatey](http://chocolatey.org), you can install Packer from -Windows command line (cmd). Remember that this is updated by a 3rd party, so -it may not be the latest available version. +If you're using Windows and [Chocolatey](http://chocolatey.org), you can install +Packer from Windows command line (cmd). Remember that this is updated by a 3rd +party, so it may not be the latest available version. -```text +``` {.text} $ choco install packer ``` diff --git a/website/source/docs/machine-readable/command-build.html.markdown b/website/source/docs/machine-readable/command-build.html.markdown index 3320f43f1..7472b7bfc 100644 --- a/website/source/docs/machine-readable/command-build.html.markdown +++ b/website/source/docs/machine-readable/command-build.html.markdown @@ -1,163 +1,165 @@ --- -layout: "docs_machine_readable" -page_title: "Command: build - Machine-Readable Reference" -description: |- - These are the machine-readable types that exist as part of the output of `packer build`. ---- +description: | + These are the machine-readable types that exist as part of the output of + `packer build`. +layout: 'docs\_machine\_readable' +page_title: 'Command: build - Machine-Readable Reference' +... # Build Command Types -These are the machine-readable types that exist as part of the output -of `packer build`. +These are the machine-readable types that exist as part of the output of +`packer build`.
    -
    artifact (>= 2)
    -
    -

    - Information about an artifact of the targeted item. This is a - fairly complex (but uniform!) machine-readable type that contains - subtypes. The subtypes are documented within this page in the - syntax of "artifact subtype: SUBTYPE". The number of arguments within - that subtype is in addition to the artifact args. -

    +
    artifact (>= 2)
    +
    +

    + Information about an artifact of the targeted item. This is a + fairly complex (but uniform!) machine-readable type that contains + subtypes. The subtypes are documented within this page in the + syntax of "artifact subtype: SUBTYPE". The number of arguments within + that subtype is in addition to the artifact args. +

    -

    - Data 1: index - The zero-based index of the - artifact being described. This goes up to "artifact-count" (see - below). -

    -

    - Data 2: subtype - The subtype that describes - the remaining arguments. See the documentation for the - subtype docs throughout this page. -

    -

    - Data 3..n: subtype data - Zero or more additional - data points related to the subtype. The exact count and meaning - of this subtypes comes from the subtype documentation. -

    -
    +

    + Data 1: index - The zero-based index of the + artifact being described. This goes up to "artifact-count" (see + below). +

    +

    + Data 2: subtype - The subtype that describes + the remaining arguments. See the documentation for the + subtype docs throughout this page. +

    +

    + Data 3..n: subtype data - Zero or more additional + data points related to the subtype. The exact count and meaning + of this subtypes comes from the subtype documentation. +

    +
    -
    artifact-count (1)
    -
    -

    - The number of artifacts associated with the given target. This - will always be outputted _before_ any other artifact information, - so you're able to know how many upcoming artifacts to look for. -

    +
    artifact-count (1)
    +
    +

    + The number of artifacts associated with the given target. This + will always be outputted _before_ any other artifact information, + so you're able to know how many upcoming artifacts to look for. +

    -

    - Data 1: count - The number of artifacts as - a base 10 integer. -

    -
    +

    + Data 1: count - The number of artifacts as + a base 10 integer. +

    + -
    artifact subtype: builder-id (1)
    -
    -

    - The unique ID of the builder that created this artifact. -

    +
    artifact subtype: builder-id (1)
    +
    +

    + The unique ID of the builder that created this artifact. +

    -

    - Data 1: id - The unique ID of the builder. -

    -
    +

    + Data 1: id - The unique ID of the builder. +

    + -
    artifact subtype: end (0)
    -
    -

    - The last machine-readable output line outputted for an artifact. - This is a sentinel value so you know that no more data related to - the targetted artifact will be outputted. -

    -
    +
    artifact subtype: end (0)
    +
    +

    + The last machine-readable output line outputted for an artifact. + This is a sentinel value so you know that no more data related to + the targetted artifact will be outputted. +

    +
    -
    artifact subtype: file (2)
    -
    -

    - A single file associated with the artifact. There are 0 to - "files-count" of these entries to describe every file that is - part of the artifact. -

    +
    artifact subtype: file (2)
    +
    +

    + A single file associated with the artifact. There are 0 to + "files-count" of these entries to describe every file that is + part of the artifact. +

    -

    - Data 1: index - Zero-based index of the file. - This goes from 0 to "files-count" minus one. -

    +

    + Data 1: index - Zero-based index of the file. + This goes from 0 to "files-count" minus one. +

    -

    - Data 2: filename - The filename. -

    -
    +

    + Data 2: filename - The filename. +

    + -
    artifact subtype: files-count (1)
    -
    -

    - The number of files associated with this artifact. Not all - artifacts have files associated with it. -

    +
    artifact subtype: files-count (1)
    +
    +

    + The number of files associated with this artifact. Not all + artifacts have files associated with it. +

    -

    - Data 1: count - The number of files. -

    -
    +

    + Data 1: count - The number of files. +

    + -
    artifact subtype: id (1)
    -
    -

    - The ID (if any) of the artifact that was built. Not all artifacts - have associated IDs. For example, AMIs built have IDs associated - with them, but VirtualBox images do not. The exact format of the ID - is specific to the builder. -

    +
    artifact subtype: id (1)
    +
    +

    + The ID (if any) of the artifact that was built. Not all artifacts + have associated IDs. For example, AMIs built have IDs associated + with them, but VirtualBox images do not. The exact format of the ID + is specific to the builder. +

    -

    - Data 1: id - The ID of the artifact. -

    -
    +

    + Data 1: id - The ID of the artifact. +

    + -
    artifact subtype: nil (0)
    -
    -

    - If present, this means that the artifact was nil, or that the targeted - build completed successfully but no artifact was created. -

    -
    +
    artifact subtype: nil (0)
    +
    +

    + If present, this means that the artifact was nil, or that the targeted + build completed successfully but no artifact was created. +

    +
    -
    artifact subtype: string (1)
    -
    -

    - The human-readable string description of the artifact provided by - the artifact itself. -

    +
    artifact subtype: string (1)
    +
    +

    + The human-readable string description of the artifact provided by + the artifact itself. +

    -

    - Data 1: string - The string output for the artifact. -

    -
    +

    + Data 1: string - The string output for the artifact. +

    + -
    error-count (1)
    -
    -

    - The number of errors that occurred during the build. This will - always be outputted before any errors so you know how many are coming. -

    +
    error-count (1)
    +
    +

    + The number of errors that occurred during the build. This will + always be outputted before any errors so you know how many are coming. +

    -

    - Data 1: count - The number of build errors as - a base 10 integer. -

    -
    +

    + Data 1: count - The number of build errors as + a base 10 integer. +

    + -
    error (1)
    -
    -

    - A build error that occurred. The target of this output will be - the build that had the error. -

    +
    error (1)
    +
    +

    + A build error that occurred. The target of this output will be + the build that had the error. +

    + +

    + Data 1: error - The error message as a string. +

    +
    -

    - Data 1: error - The error message as a string. -

    - diff --git a/website/source/docs/machine-readable/command-inspect.html.markdown b/website/source/docs/machine-readable/command-inspect.html.markdown index 3f8bbb852..4a5d68876 100644 --- a/website/source/docs/machine-readable/command-inspect.html.markdown +++ b/website/source/docs/machine-readable/command-inspect.html.markdown @@ -1,63 +1,65 @@ --- -layout: "docs_machine_readable" -page_title: "Command: inspect - Machine-Readable Reference" -description: |- - These are the machine-readable types that exist as part of the output of `packer inspect`. ---- +description: | + These are the machine-readable types that exist as part of the output of + `packer inspect`. +layout: 'docs\_machine\_readable' +page_title: 'Command: inspect - Machine-Readable Reference' +... # Inspect Command Types -These are the machine-readable types that exist as part of the output -of `packer inspect`. +These are the machine-readable types that exist as part of the output of +`packer inspect`.
    -
    template-variable (3)
    -
    -

    - A user variable - defined within the template. -

    +
    template-variable (3)
    +
    +

    + A user variable + defined within the template. +

    -

    - Data 1: name - Name of the variable. -

    +

    + Data 1: name - Name of the variable. +

    -

    - Data 2: default - The default value of the - variable. -

    +

    + Data 2: default - The default value of the + variable. +

    -

    - Data 3: required - If non-zero, then this variable - is required. -

    -
    +

    + Data 3: required - If non-zero, then this variable + is required. +

    + -
    template-builder (2)
    -
    -

    - A builder defined within the template -

    +
    template-builder (2)
    +
    +

    + A builder defined within the template +

    -

    - Data 1: name - The name of the builder. -

    -

    - Data 2: type - The type of the builder. This will - generally be the same as the name unless you explicitly override - the name. -

    -
    +

    + Data 1: name - The name of the builder. +

    +

    + Data 2: type - The type of the builder. This will + generally be the same as the name unless you explicitly override + the name. +

    + -
    template-provisioner (1)
    -
    -

    - A provisioner defined within the template. Multiple of these may - exist. If so, they are outputted in the order they would run. -

    +
    template-provisioner (1)
    +
    +

    + A provisioner defined within the template. Multiple of these may + exist. If so, they are outputted in the order they would run. +

    + +

    + Data 1: name - The name/type of the provisioner. +

    +
    -

    - Data 1: name - The name/type of the provisioner. -

    -
    diff --git a/website/source/docs/machine-readable/command-version.html.markdown b/website/source/docs/machine-readable/command-version.html.markdown index a7029b627..8b32b2540 100644 --- a/website/source/docs/machine-readable/command-version.html.markdown +++ b/website/source/docs/machine-readable/command-version.html.markdown @@ -1,47 +1,49 @@ --- -layout: "docs_machine_readable" -page_title: "Command: version - Machine-Readable Reference" -description: |- - These are the machine-readable types that exist as part of the output of `packer version`. ---- +description: | + These are the machine-readable types that exist as part of the output of + `packer version`. +layout: 'docs\_machine\_readable' +page_title: 'Command: version - Machine-Readable Reference' +... # Version Command Types -These are the machine-readable types that exist as part of the output -of `packer version`. +These are the machine-readable types that exist as part of the output of +`packer version`.
    -
    version (1)
    -
    -

    The version number of Packer running.

    +
    version (1)
    +
    +

    The version number of Packer running.

    -

    - Data 1: version - The version of Packer running, - only including the major, minor, and patch versions. Example: - "0.2.4". -

    -
    +

    + Data 1: version - The version of Packer running, + only including the major, minor, and patch versions. Example: + "0.2.4". +

    + -
    version-commit (1)
    -
    -

    The SHA1 of the Git commit that built this version of Packer.

    +
    version-commit (1)
    +
    +

    The SHA1 of the Git commit that built this version of Packer.

    -

    - Data 1: commit SHA1 - The SHA1 of the commit. -

    -
    +

    + Data 1: commit SHA1 - The SHA1 of the commit. +

    + -
    version-prerelease (1)
    -
    -

    - The prerelease tag (if any) for the running version of Packer. This - can be "beta", "dev", "alpha", etc. If this is empty, you can assume - it is a release version running. -

    +
    version-prerelease (1)
    +
    +

    + The prerelease tag (if any) for the running version of Packer. This + can be "beta", "dev", "alpha", etc. If this is empty, you can assume + it is a release version running. +

    + +

    + Data 1: prerelease name - The name of the + prerelease tag. +

    +
    -

    - Data 1: prerelease name - The name of the - prerelease tag. -

    -
    diff --git a/website/source/docs/machine-readable/general.html.markdown b/website/source/docs/machine-readable/general.html.markdown index 1f08be4d2..b29ae053f 100644 --- a/website/source/docs/machine-readable/general.html.markdown +++ b/website/source/docs/machine-readable/general.html.markdown @@ -1,9 +1,10 @@ --- -layout: "docs_machine_readable" -page_title: "General Types - Machine-Readable Reference" -description: |- - These are the machine-readable types that can appear in almost any machine-readable output and are provided by Packer core itself. ---- +description: | + These are the machine-readable types that can appear in almost any + machine-readable output and are provided by Packer core itself. +layout: 'docs\_machine\_readable' +page_title: 'General Types - Machine-Readable Reference' +... # General Types @@ -11,21 +12,22 @@ These are the machine-readable types that can appear in almost any machine-readable output and are provided by Packer core itself.
    -
    ui (2)
    -
    -

    - Specifies the output and type of output that would've normally - gone to the console if Packer were running in human-readable - mode. -

    +
    ui (2)
    +
    +

    + Specifies the output and type of output that would've normally + gone to the console if Packer were running in human-readable + mode. +

    + +

    + Data 1: type - The type of UI message that would've + been outputted. Can be "say", "message", or "error". +

    +

    + Data 2: output - The UI message that would have + been outputted. +

    +
    -

    - Data 1: type - The type of UI message that would've - been outputted. Can be "say", "message", or "error". -

    -

    - Data 2: output - The UI message that would have - been outputted. -

    -
    diff --git a/website/source/docs/machine-readable/index.html.markdown b/website/source/docs/machine-readable/index.html.markdown index d26106b15..161bda001 100644 --- a/website/source/docs/machine-readable/index.html.markdown +++ b/website/source/docs/machine-readable/index.html.markdown @@ -1,32 +1,35 @@ --- -layout: "docs_machine_readable" -page_title: "Machine-Readable Reference" -description: |- - This is the reference for the various message categories for Packer machine-readable output. Please read that page if you're unfamiliar with the general format and usage for the machine-readable output. ---- +description: | + This is the reference for the various message categories for Packer + machine-readable output. Please read that page if you're unfamiliar with the + general format and usage for the machine-readable output. +layout: 'docs\_machine\_readable' +page_title: 'Machine-Readable Reference' +... # Machine-Readable Reference This is the reference for the various message categories for Packer -[machine-readable output](/docs/command-line/machine-readable.html). -Please read that page if you're unfamiliar with the general format and -usage for the machine-readable output. +[machine-readable output](/docs/command-line/machine-readable.html). Please read +that page if you're unfamiliar with the general format and usage for the +machine-readable output. -The layout of this reference is split into where the types come from. -There are a set of core types that are from Packer core itself. Then -there are types that come from various components of Packer such as the -builders, provisioners, and more. +The layout of this reference is split into where the types come from. There are +a set of core types that are from Packer core itself. Then there are types that +come from various components of Packer such as the builders, provisioners, and +more. Within each section, the format of the documentation is the following:
    -
    type-name (data-count)
    -
    -

    Description of the type.

    -

    - Data 1: name - Description. -

    -
    +
    type-name (data-count)
    +
    +

    Description of the type.

    +

    + Data 1: name - Description. +

    +
    +
    diff --git a/website/source/docs/other/core-configuration.html.markdown b/website/source/docs/other/core-configuration.html.markdown index 3727af061..db1f75ab7 100644 --- a/website/source/docs/other/core-configuration.html.markdown +++ b/website/source/docs/other/core-configuration.html.markdown @@ -1,25 +1,29 @@ --- -layout: "docs" -page_title: "Core Configuration" -description: |- - There are a few configuration settings that affect Packer globally by configuring the core of Packer. These settings all have reasonable defaults, so you generally don't have to worry about it until you want to tweak a configuration. If you're just getting started with Packer, don't worry about core configuration for now. ---- +description: | + There are a few configuration settings that affect Packer globally by + configuring the core of Packer. These settings all have reasonable defaults, so + you generally don't have to worry about it until you want to tweak a + configuration. If you're just getting started with Packer, don't worry about + core configuration for now. +layout: docs +page_title: Core Configuration +... # Core Configuration There are a few configuration settings that affect Packer globally by configuring the core of Packer. These settings all have reasonable defaults, so -you generally don't have to worry about it until you want to tweak -a configuration. If you're just getting started with Packer, don't worry -about core configuration for now. +you generally don't have to worry about it until you want to tweak a +configuration. If you're just getting started with Packer, don't worry about +core configuration for now. -The default location where Packer looks for this file depends on the -platform. For all non-Windows platforms, Packer looks for `$HOME/.packerconfig`. -For Windows, Packer looks for `%APPDATA%/packer.config`. If the file -doesn't exist, then Packer ignores it and just uses the default configuration. +The default location where Packer looks for this file depends on the platform. +For all non-Windows platforms, Packer looks for `$HOME/.packerconfig`. For +Windows, Packer looks for `%APPDATA%/packer.config`. If the file doesn't exist, +then Packer ignores it and just uses the default configuration. -The location of the core configuration file can be modified by setting -the `PACKER_CONFIG` environmental variable to be the path to another file. +The location of the core configuration file can be modified by setting the +`PACKER_CONFIG` environmental variable to be the path to another file. The format of the configuration file is basic JSON. @@ -28,12 +32,13 @@ The format of the configuration file is basic JSON. Below is the list of all available configuration parameters for the core configuration file. None of these are required, since all have sane defaults. -* `plugin_min_port` and `plugin_max_port` (integer) - These are the minimum and - maximum ports that Packer uses for communication with plugins, since - plugin communication happens over TCP connections on your local host. - By default these are 10,000 and 25,000, respectively. Be sure to set a fairly - wide range here, since Packer can easily use over 25 ports on a single run. +- `plugin_min_port` and `plugin_max_port` (integer) - These are the minimum and + maximum ports that Packer uses for communication with plugins, since plugin + communication happens over TCP connections on your local host. By default + these are 10,000 and 25,000, respectively. Be sure to set a fairly wide range + here, since Packer can easily use over 25 ports on a single run. -* `builders`, `commands`, `post-processors`, and `provisioners` are objects that are used to - install plugins. The details of how exactly these are set is covered - in more detail in the [installing plugins documentation page](/docs/extend/plugins.html). +- `builders`, `commands`, `post-processors`, and `provisioners` are objects that + are used to install plugins. The details of how exactly these are set is + covered in more detail in the [installing plugins documentation + page](/docs/extend/plugins.html). diff --git a/website/source/docs/other/debugging.html.markdown b/website/source/docs/other/debugging.html.markdown index eabf56533..8c8012bc8 100644 --- a/website/source/docs/other/debugging.html.markdown +++ b/website/source/docs/other/debugging.html.markdown @@ -1,9 +1,12 @@ --- -layout: "docs" -page_title: "Debugging Packer" -description: |- - Packer strives to be stable and bug-free, but issues inevitably arise where certain things may not work entirely correctly, or may not appear to work correctly. In these cases, it is sometimes helpful to see more details about what Packer is actually doing. ---- +description: | + Packer strives to be stable and bug-free, but issues inevitably arise where + certain things may not work entirely correctly, or may not appear to work + correctly. In these cases, it is sometimes helpful to see more details about + what Packer is actually doing. +layout: docs +page_title: Debugging Packer +... # Debugging Packer Builds @@ -17,39 +20,40 @@ usually will stop between each step, waiting for keyboard input before continuing. This will allow you to inspect state and so on. In debug mode once the remote instance is instantiated, Packer will emit to the -current directory an emphemeral private ssh key as a .pem file. Using that you +current directory an emphemeral private ssh key as a .pem file. Using that you can `ssh -i ` into the remote build instance and see what is going on -for debugging. The emphemeral key will be deleted at the end of the packer run +for debugging. The emphemeral key will be deleted at the end of the packer run during cleanup. ### Windows + As of Packer 0.8.1 the default WinRM communicator will emit the password for a Remote Desktop Connection into your instance. This happens following the several minute pause as the instance is booted. Note a .pem key is still created for securely transmitting the password. Packer automatically decrypts the password for you in debug mode. -## Debugging Packer +## Debugging Packer Issues occasionally arise where certain things may not work entirely correctly, or may not appear to work correctly. In these cases, it is sometimes helpful to see more details about what Packer is actually doing. Packer has detailed logs which can be enabled by setting the `PACKER_LOG` -environmental variable to any value like this `PACKER_LOG=1 packer build -`. This will cause detailed logs to appear on stderr. The logs -contain log messages from Packer as well as any plugins that are being used. Log -messages from plugins are prefixed by their application name. +environmental variable to any value like this +`PACKER_LOG=1 packer build `. This will cause detailed logs to +appear on stderr. The logs contain log messages from Packer as well as any +plugins that are being used. Log messages from plugins are prefixed by their +application name. -Note that because Packer is highly parallelized, log messages sometimes -appear out of order, especially with respect to plugins. In this case, -it is important to pay attention to the timestamp of the log messages -to determine order. +Note that because Packer is highly parallelized, log messages sometimes appear +out of order, especially with respect to plugins. In this case, it is important +to pay attention to the timestamp of the log messages to determine order. In addition to simply enabling the log, you can set `PACKER_LOG_PATH` in order -to force the log to always go to a specific file when logging is enabled. -Note that even when `PACKER_LOG_PATH` is set, `PACKER_LOG` must be set in -order for any logging to be enabled. +to force the log to always go to a specific file when logging is enabled. Note +that even when `PACKER_LOG_PATH` is set, `PACKER_LOG` must be set in order for +any logging to be enabled. -If you find a bug with Packer, please include the detailed log by using -a service such as [gist](http://gist.github.com). +If you find a bug with Packer, please include the detailed log by using a +service such as [gist](http://gist.github.com). diff --git a/website/source/docs/other/environmental-variables.html.markdown b/website/source/docs/other/environmental-variables.html.markdown index 318e25e25..7d455c708 100644 --- a/website/source/docs/other/environmental-variables.html.markdown +++ b/website/source/docs/other/environmental-variables.html.markdown @@ -1,34 +1,36 @@ --- -layout: "docs" -page_title: "Environmental Variables for Packer" -description: |- - Packer uses a variety of environmental variables. ---- +description: 'Packer uses a variety of environmental variables.' +layout: docs +page_title: Environmental Variables for Packer +... # Environmental Variables for Packer -Packer uses a variety of environmental variables. A listing and description of each can be found below: +Packer uses a variety of environmental variables. A listing and description of +each can be found below: -* `PACKER_CACHE_DIR` - The location of the packer cache. +- `PACKER_CACHE_DIR` - The location of the packer cache. -* `PACKER_CONFIG` - The location of the core configuration file. The format - of the configuration file is basic JSON. - See the [core configuration page](/docs/other/core-configuration.html). +- `PACKER_CONFIG` - The location of the core configuration file. The format of + the configuration file is basic JSON. See the [core configuration + page](/docs/other/core-configuration.html). -* `PACKER_LOG` - Setting this to any value will enable the logger. - See the [debugging page](/docs/other/debugging.html). +- `PACKER_LOG` - Setting this to any value will enable the logger. See the + [debugging page](/docs/other/debugging.html). -* `PACKER_LOG_PATH` - The location of the log file. Note: `PACKER_LOG` must - be set for any logging to occur. See the [debugging page](/docs/other/debugging.html). +- `PACKER_LOG_PATH` - The location of the log file. Note: `PACKER_LOG` must be + set for any logging to occur. See the [debugging + page](/docs/other/debugging.html). -* `PACKER_NO_COLOR` - Setting this to any value will disable color in the terminal. +- `PACKER_NO_COLOR` - Setting this to any value will disable color in + the terminal. -* `PACKER_PLUGIN_MAX_PORT` - The maximum port that Packer uses for - communication with plugins, since plugin communication happens over - TCP connections on your local host. The default is 25,000. - See the [core configuration page](/docs/other/core-configuration.html). +- `PACKER_PLUGIN_MAX_PORT` - The maximum port that Packer uses for communication + with plugins, since plugin communication happens over TCP connections on your + local host. The default is 25,000. See the [core configuration + page](/docs/other/core-configuration.html). -* `PACKER_PLUGIN_MIN_PORT` - The minimum port that Packer uses for - communication with plugins, since plugin communication happens - over TCP connections on your local host. The default is 10,000. - See the [core configuration page](/docs/other/core-configuration.html). +- `PACKER_PLUGIN_MIN_PORT` - The minimum port that Packer uses for communication + with plugins, since plugin communication happens over TCP connections on your + local host. The default is 10,000. See the [core configuration + page](/docs/other/core-configuration.html). diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 91b78e766..c038a119a 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -1,27 +1,38 @@ --- -layout: "docs" -page_title: "Atlas Post-Processor" -description: |- - The Atlas post-processor for Packer receives an artifact from a Packer build and uploads it to Atlas. Atlas hosts and serves artifacts, allowing you to version and distribute them in a simple way. ---- +description: | + The Atlas post-processor for Packer receives an artifact from a Packer build and + uploads it to Atlas. Atlas hosts and serves artifacts, allowing you to version + and distribute them in a simple way. +layout: docs +page_title: 'Atlas Post-Processor' +... # Atlas Post-Processor Type: `atlas` -The Atlas post-processor for Packer receives an artifact from a Packer build and uploads it to Atlas. [Atlas](https://atlas.hashicorp.com) hosts and serves artifacts, allowing you to version and distribute them in a simple way. +The Atlas post-processor for Packer receives an artifact from a Packer build and +uploads it to Atlas. [Atlas](https://atlas.hashicorp.com) hosts and serves +artifacts, allowing you to version and distribute them in a simple way. ## Workflow To take full advantage of Packer and Atlas, it's important to understand the -workflow for creating artifacts with Packer and storing them in Atlas using this post-processor. The goal of the Atlas post-processor is to streamline the distribution of public or private artifacts by hosting them in a central location in Atlas. +workflow for creating artifacts with Packer and storing them in Atlas using this +post-processor. The goal of the Atlas post-processor is to streamline the +distribution of public or private artifacts by hosting them in a central +location in Atlas. Here is an example workflow: -1. Packer builds an AMI with the [Amazon AMI builder](/docs/builders/amazon.html) -2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. The `atlas` post-processor is configured with the name of the AMI, for example `hashicorp/foobar`, to create the artifact in Atlas or update the version if the artifact already exists -3. The new version is ready and available to be used in deployments with a tool like [Terraform](https://terraform.io) - +1. Packer builds an AMI with the [Amazon AMI + builder](/docs/builders/amazon.html) +2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. + The `atlas` post-processor is configured with the name of the AMI, for example + `hashicorp/foobar`, to create the artifact in Atlas or update the version if + the artifact already exists +3. The new version is ready and available to be used in deployments with a tool + like [Terraform](https://terraform.io) ## Configuration @@ -29,32 +40,36 @@ The configuration allows you to specify and access the artifact in Atlas. ### Required: -* `token` (string) - Your access token for the Atlas API. - This can be generated on your [tokens page](https://atlas.hashicorp.com/settings/tokens). Alternatively you can export your Atlas token as an environmental variable and remove it from the configuration. +- `token` (string) - Your access token for the Atlas API. This can be generated + on your [tokens page](https://atlas.hashicorp.com/settings/tokens). + Alternatively you can export your Atlas token as an environmental variable and + remove it from the configuration. -* `artifact` (string) - The shorthand tag for your artifact that maps to - Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. You must - have access to the organization, hashicorp in this example, in order to add an artifact to - the organization in Atlas. +- `artifact` (string) - The shorthand tag for your artifact that maps to Atlas, + i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. You must + have access to the organization, hashicorp in this example, in order to add an + artifact to the organization in Atlas. -* `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will always be `amazon.ami`. - This field must be defined because Atlas can host other artifact types, such as Vagrant boxes. +- `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will + always be `amazon.ami`. This field must be defined because Atlas can host + other artifact types, such as Vagrant boxes. --> **Note:** If you want to upload Vagrant boxes to Atlas, use the [Atlas post-processor](/docs/post-processors/atlas.html). +-> **Note:** If you want to upload Vagrant boxes to Atlas, use the [Atlas +post-processor](/docs/post-processors/atlas.html). ### Optional: -* `atlas_url` (string) - Override the base URL for Atlas. This -is useful if you're using Atlas Enterprise in your own network. Defaults -to `https://atlas.hashicorp.com/api/v1`. +- `atlas_url` (string) - Override the base URL for Atlas. This is useful if + you're using Atlas Enterprise in your own network. Defaults to + `https://atlas.hashicorp.com/api/v1`. -* `metadata` (map) - Send metadata about the artifact. If the artifact - type is "vagrant.box", you must specify a "provider" metadata about - what provider to use. +- `metadata` (map) - Send metadata about the artifact. If the artifact type is + "vagrant.box", you must specify a "provider" metadata about what provider + to use. ### Example Configuration -```javascript +``` {.javascript} { "variables": { "aws_access_key": "ACCESS_KEY_HERE", diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index e6a1237e9..716e4e866 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -1,9 +1,10 @@ --- -layout: "docs" -page_title: "compress Post-Processor" -description: |- - The Packer compress post-processor takes an artifact with files (such as from VMware or VirtualBox) and compresses the artifact into a single archive. ---- +description: | + The Packer compress post-processor takes an artifact with files (such as from + VMware or VirtualBox) and compresses the artifact into a single archive. +layout: docs +page_title: 'compress Post-Processor' +... # Compress Post-Processor @@ -16,49 +17,55 @@ VMware or VirtualBox) and compresses the artifact into a single archive. ### Required: -You must specify the output filename. The archive format is derived from the filename. +You must specify the output filename. The archive format is derived from the +filename. -* `output` (string) - The path to save the compressed archive. The archive - format is inferred from the filename. E.g. `.tar.gz` will be a gzipped - tarball. `.zip` will be a zip file. If the extension can't be detected packer - defaults to `.tar.gz` behavior but will not change the filename. +- `output` (string) - The path to save the compressed archive. The archive + format is inferred from the filename. E.g. `.tar.gz` will be a + gzipped tarball. `.zip` will be a zip file. If the extension can't be detected + packer defaults to `.tar.gz` behavior but will not change the filename. If you are executing multiple builders in parallel you should make sure - `output` is unique for each one. For example `packer_{{.BuildName}}_{{.Provider}}.zip`. + `output` is unique for each one. For example + `packer_{{.BuildName}}_{{.Provider}}.zip`. ### Optional: -If you want more control over how the archive is created you can specify the following settings: +If you want more control over how the archive is created you can specify the +following settings: -* `compression_level` (integer) - Specify the compression level, for algorithms +- `compression_level` (integer) - Specify the compression level, for algorithms that support it, from 1 through 9 inclusive. Typically higher compression levels take longer but produce smaller files. Defaults to `6` -* `keep_input_artifact` (boolean) - Keep source files; defaults to `false` +- `keep_input_artifact` (boolean) - Keep source files; defaults to `false` ### Supported Formats -Supported file extensions include `.zip`, `.tar`, `.gz`, `.tar.gz`, `.lz4` and `.tar.lz4`. Note that `.gz` and `.lz4` will fail if you have multiple files to compress. +Supported file extensions include `.zip`, `.tar`, `.gz`, `.tar.gz`, `.lz4` and +`.tar.lz4`. Note that `.gz` and `.lz4` will fail if you have multiple files to +compress. ## Examples -Some minimal examples are shown below, showing only the post-processor configuration: +Some minimal examples are shown below, showing only the post-processor +configuration: -```json +``` {.json} { "type": "compress", "output": "archive.tar.lz4" } ``` -```json +``` {.json} { "type": "compress", "output": "archive.zip" } ``` -```json +``` {.json} { "type": "compress", "output": "archive.gz", diff --git a/website/source/docs/post-processors/docker-import.html.markdown b/website/source/docs/post-processors/docker-import.html.markdown index c2d7bba80..0c3855622 100644 --- a/website/source/docs/post-processors/docker-import.html.markdown +++ b/website/source/docs/post-processors/docker-import.html.markdown @@ -1,36 +1,38 @@ --- -layout: "docs" -page_title: "docker-import Post-Processor" -description: |- - The Packer Docker import post-processor takes an artifact from the docker builder and imports it with Docker locally. This allows you to apply a repository and tag to the image and lets you use the other Docker post-processors such as docker-push to push the image to a registry. ---- +description: | + The Packer Docker import post-processor takes an artifact from the docker + builder and imports it with Docker locally. This allows you to apply a + repository and tag to the image and lets you use the other Docker + post-processors such as docker-push to push the image to a registry. +layout: docs +page_title: 'docker-import Post-Processor' +... # Docker Import Post-Processor Type: `docker-import` -The Packer Docker import post-processor takes an artifact from the -[docker builder](/docs/builders/docker.html) and imports it with Docker -locally. This allows you to apply a repository and tag to the image -and lets you use the other Docker post-processors such as -[docker-push](/docs/post-processors/docker-push.html) to push the image -to a registry. +The Packer Docker import post-processor takes an artifact from the [docker +builder](/docs/builders/docker.html) and imports it with Docker locally. This +allows you to apply a repository and tag to the image and lets you use the other +Docker post-processors such as +[docker-push](/docs/post-processors/docker-push.html) to push the image to a +registry. ## Configuration -The configuration for this post-processor is extremely simple. At least -a repository is required. +The configuration for this post-processor is extremely simple. At least a +repository is required. -* `repository` (string) - The repository of the imported image. +- `repository` (string) - The repository of the imported image. -* `tag` (string) - The tag for the imported image. By default this is not - set. +- `tag` (string) - The tag for the imported image. By default this is not set. ## Example An example is shown below, showing only the post-processor configuration: -```javascript +``` {.javascript} { "type": "docker-import", "repository": "mitchellh/packer", @@ -38,9 +40,9 @@ An example is shown below, showing only the post-processor configuration: } ``` -This example would take the image created by the Docker builder -and import it into the local Docker process with a name of `mitchellh/packer:0.7`. +This example would take the image created by the Docker builder and import it +into the local Docker process with a name of `mitchellh/packer:0.7`. Following this, you can use the -[docker-push](/docs/post-processors/docker-push.html) -post-processor to push it to a registry, if you want. +[docker-push](/docs/post-processors/docker-push.html) post-processor to push it +to a registry, if you want. diff --git a/website/source/docs/post-processors/docker-push.html.markdown b/website/source/docs/post-processors/docker-push.html.markdown index 2f7ae3e92..72793b735 100644 --- a/website/source/docs/post-processors/docker-push.html.markdown +++ b/website/source/docs/post-processors/docker-push.html.markdown @@ -1,38 +1,38 @@ --- -layout: "docs" -page_title: "Docker Push Post-Processor" -description: |- - The Packer Docker push post-processor takes an artifact from the docker-import post-processor and pushes it to a Docker registry. ---- +description: | + The Packer Docker push post-processor takes an artifact from the docker-import + post-processor and pushes it to a Docker registry. +layout: docs +page_title: 'Docker Push Post-Processor' +... # Docker Push Post-Processor Type: `docker-push` The Packer Docker push post-processor takes an artifact from the -[docker-import](/docs/post-processors/docker-import.html) post-processor -and pushes it to a Docker registry. +[docker-import](/docs/post-processors/docker-import.html) post-processor and +pushes it to a Docker registry. ## Configuration This post-processor has only optional configuration: -* `login` (boolean) - Defaults to false. If true, the post-processor will - login prior to pushing. +- `login` (boolean) - Defaults to false. If true, the post-processor will login + prior to pushing. -* `login_email` (string) - The email to use to authenticate to login. +- `login_email` (string) - The email to use to authenticate to login. -* `login_username` (string) - The username to use to authenticate to login. +- `login_username` (string) - The username to use to authenticate to login. -* `login_password` (string) - The password to use to authenticate to login. +- `login_password` (string) - The password to use to authenticate to login. -* `login_server` (string) - The server address to login to. +- `login_server` (string) - The server address to login to. --> **Note:** If you login using the credentials above, the -post-processor will automatically log you out afterwards (just the server -specified). +-> **Note:** If you login using the credentials above, the post-processor +will automatically log you out afterwards (just the server specified). ## Example -For an example of using docker-push, see the section on using -generated artifacts from the [docker builder](/docs/builders/docker.html). +For an example of using docker-push, see the section on using generated +artifacts from the [docker builder](/docs/builders/docker.html). diff --git a/website/source/docs/post-processors/docker-save.html.markdown b/website/source/docs/post-processors/docker-save.html.markdown index ca03dfcf6..8f758755c 100644 --- a/website/source/docs/post-processors/docker-save.html.markdown +++ b/website/source/docs/post-processors/docker-save.html.markdown @@ -1,35 +1,37 @@ --- -layout: "docs" -page_title: "docker-save Post-Processor" -description: |- - The Packer Docker Save post-processor takes an artifact from the docker builder that was committed and saves it to a file. This is similar to exporting the Docker image directly from the builder, except that it preserves the hierarchy of images and metadata. ---- +description: | + The Packer Docker Save post-processor takes an artifact from the docker builder + that was committed and saves it to a file. This is similar to exporting the + Docker image directly from the builder, except that it preserves the hierarchy + of images and metadata. +layout: docs +page_title: 'docker-save Post-Processor' +... # Docker Save Post-Processor Type: `docker-save` -The Packer Docker Save post-processor takes an artifact from the -[docker builder](/docs/builders/docker.html) that was committed -and saves it to a file. This is similar to exporting the Docker image -directly from the builder, except that it preserves the hierarchy of -images and metadata. +The Packer Docker Save post-processor takes an artifact from the [docker +builder](/docs/builders/docker.html) that was committed and saves it to a file. +This is similar to exporting the Docker image directly from the builder, except +that it preserves the hierarchy of images and metadata. -We understand the terminology can be a bit confusing, but we've -adopted the terminology from Docker, so if you're familiar with that, then -you'll be familiar with this and vice versa. +We understand the terminology can be a bit confusing, but we've adopted the +terminology from Docker, so if you're familiar with that, then you'll be +familiar with this and vice versa. ## Configuration The configuration for this post-processor is extremely simple. -* `path` (string) - The path to save the image. +- `path` (string) - The path to save the image. ## Example An example is shown below, showing only the post-processor configuration: -```javascript +``` {.javascript} { "type": "docker-save", "path": "foo.tar" diff --git a/website/source/docs/post-processors/docker-tag.html.markdown b/website/source/docs/post-processors/docker-tag.html.markdown index d3925d1fa..42c480676 100644 --- a/website/source/docs/post-processors/docker-tag.html.markdown +++ b/website/source/docs/post-processors/docker-tag.html.markdown @@ -1,43 +1,44 @@ --- -layout: "docs" -page_title: "docker-tag Post-Processor" -description: |- - The Packer Docker Tag post-processor takes an artifact from the docker builder that was committed and tags it into a repository. This allows you to use the other Docker post-processors such as docker-push to push the image to a registry. ---- +description: | + The Packer Docker Tag post-processor takes an artifact from the docker builder + that was committed and tags it into a repository. This allows you to use the + other Docker post-processors such as docker-push to push the image to a + registry. +layout: docs +page_title: 'docker-tag Post-Processor' +... # Docker Tag Post-Processor Type: `docker-tag` -The Packer Docker Tag post-processor takes an artifact from the -[docker builder](/docs/builders/docker.html) that was committed -and tags it into a repository. This allows you to use the other -Docker post-processors such as -[docker-push](/docs/post-processors/docker-push.html) to push the image -to a registry. +The Packer Docker Tag post-processor takes an artifact from the [docker +builder](/docs/builders/docker.html) that was committed and tags it into a +repository. This allows you to use the other Docker post-processors such as +[docker-push](/docs/post-processors/docker-push.html) to push the image to a +registry. -This is very similar to the [docker-import](/docs/post-processors/docker-import.html) -post-processor except that this works with committed resources, rather -than exported. +This is very similar to the +[docker-import](/docs/post-processors/docker-import.html) post-processor except +that this works with committed resources, rather than exported. ## Configuration -The configuration for this post-processor is extremely simple. At least -a repository is required. +The configuration for this post-processor is extremely simple. At least a +repository is required. -* `repository` (string) - The repository of the image. +- `repository` (string) - The repository of the image. -* `tag` (string) - The tag for the image. By default this is not - set. +- `tag` (string) - The tag for the image. By default this is not set. -* `force` (boolean) - If true, this post-processor forcibly tag the image - even if tag name is collided. Default to `false`. +- `force` (boolean) - If true, this post-processor forcibly tag the image even + if tag name is collided. Default to `false`. ## Example An example is shown below, showing only the post-processor configuration: -```javascript +``` {.javascript} { "type": "docker-tag", "repository": "mitchellh/packer", @@ -45,9 +46,9 @@ An example is shown below, showing only the post-processor configuration: } ``` -This example would take the image created by the Docker builder -and tag it into the local Docker process with a name of `mitchellh/packer:0.7`. +This example would take the image created by the Docker builder and tag it into +the local Docker process with a name of `mitchellh/packer:0.7`. Following this, you can use the -[docker-push](/docs/post-processors/docker-push.html) -post-processor to push it to a registry, if you want. +[docker-push](/docs/post-processors/docker-push.html) post-processor to push it +to a registry, if you want. diff --git a/website/source/docs/post-processors/vagrant-cloud.html.markdown b/website/source/docs/post-processors/vagrant-cloud.html.markdown index 451ed087b..e049552da 100644 --- a/website/source/docs/post-processors/vagrant-cloud.html.markdown +++ b/website/source/docs/post-processors/vagrant-cloud.html.markdown @@ -1,81 +1,88 @@ --- -layout: "docs" -page_title: "Vagrant Cloud Post-Processor" -description: |- - The Packer Vagrant Cloud post-processor receives a Vagrant box from the `vagrant` post-processor and pushes it to Vagrant Cloud. Vagrant Cloud hosts and serves boxes to Vagrant, allowing you to version and distribute boxes to an organization in a simple way. ---- +description: | + The Packer Vagrant Cloud post-processor receives a Vagrant box from the + `vagrant` post-processor and pushes it to Vagrant Cloud. Vagrant Cloud hosts and + serves boxes to Vagrant, allowing you to version and distribute boxes to an + organization in a simple way. +layout: docs +page_title: 'Vagrant Cloud Post-Processor' +... # Vagrant Cloud Post-Processor -~> Vagrant Cloud has been superseded by Atlas. Please use the [Atlas post-processor](/docs/post-processors/atlas.html) instead. Learn more about [Atlas](https://atlas.hashicorp.com/). +\~> Vagrant Cloud has been superseded by Atlas. Please use the [Atlas +post-processor](/docs/post-processors/atlas.html) instead. Learn more about +[Atlas](https://atlas.hashicorp.com/). Type: `vagrant-cloud` -The Packer Vagrant Cloud post-processor receives a Vagrant box from the `vagrant` -post-processor and pushes it to Vagrant Cloud. [Vagrant Cloud](https://vagrantcloud.com) -hosts and serves boxes to Vagrant, allowing you to version and distribute -boxes to an organization in a simple way. +The Packer Vagrant Cloud post-processor receives a Vagrant box from the +`vagrant` post-processor and pushes it to Vagrant Cloud. [Vagrant +Cloud](https://vagrantcloud.com) hosts and serves boxes to Vagrant, allowing you +to version and distribute boxes to an organization in a simple way. -You'll need to be familiar with Vagrant Cloud, have an upgraded account -to enable box hosting, and be distributing your box via the [shorthand name](http://docs.vagrantup.com/v2/cli/box.html) -configuration. +You'll need to be familiar with Vagrant Cloud, have an upgraded account to +enable box hosting, and be distributing your box via the [shorthand +name](http://docs.vagrantup.com/v2/cli/box.html) configuration. ## Workflow It's important to understand the workflow that using this post-processor enforces in order to take full advantage of Vagrant and Vagrant Cloud. -The use of this processor assume that you currently distribute, or plan -to distribute, boxes via Vagrant Cloud. It also assumes you create Vagrant -Boxes and deliver them to your team in some fashion. +The use of this processor assume that you currently distribute, or plan to +distribute, boxes via Vagrant Cloud. It also assumes you create Vagrant Boxes +and deliver them to your team in some fashion. Here is an example workflow: -1. You use Packer to build a Vagrant Box for the `virtualbox` provider -2. The `vagrant-cloud` post-processor is configured to point to the box `hashicorp/foobar` on Vagrant Cloud -via the `box_tag` configuration -2. The post-processor receives the box from the `vagrant` post-processor -3. It then creates the configured version, or verifies the existence of it, on Vagrant Cloud -4. A provider matching the name of the Vagrant provider is then created -5. The box is uploaded to Vagrant Cloud -6. The upload is verified -7. The version is released and available to users of the box - +1. You use Packer to build a Vagrant Box for the `virtualbox` provider +2. The `vagrant-cloud` post-processor is configured to point to the box + `hashicorp/foobar` on Vagrant Cloud via the `box_tag` configuration +3. The post-processor receives the box from the `vagrant` post-processor +4. It then creates the configured version, or verifies the existence of it, on + Vagrant Cloud +5. A provider matching the name of the Vagrant provider is then created +6. The box is uploaded to Vagrant Cloud +7. The upload is verified +8. The version is released and available to users of the box ## Configuration -The configuration allows you to specify the target box that you have -access to on Vagrant Cloud, as well as authentication and version information. +The configuration allows you to specify the target box that you have access to +on Vagrant Cloud, as well as authentication and version information. ### Required: -* `access_token` (string) - Your access token for the Vagrant Cloud API. - This can be generated on your [tokens page](https://vagrantcloud.com/account/tokens). +- `access_token` (string) - Your access token for the Vagrant Cloud API. This + can be generated on your [tokens + page](https://vagrantcloud.com/account/tokens). -* `box_tag` (string) - The shorthand tag for your box that maps to - Vagrant Cloud, i.e `hashicorp/precise64` for `vagrantcloud.com/hashicorp/precise64` - -* `version` (string) - The version number, typically incrementing a previous version. - The version string is validated based on [Semantic Versioning](http://semver.org/). The string must match - a pattern that could be semver, and doesn't validate that the version comes after - your previous versions. +- `box_tag` (string) - The shorthand tag for your box that maps to Vagrant + Cloud, i.e `hashicorp/precise64` for `vagrantcloud.com/hashicorp/precise64` +- `version` (string) - The version number, typically incrementing a + previous version. The version string is validated based on [Semantic + Versioning](http://semver.org/). The string must match a pattern that could be + semver, and doesn't validate that the version comes after your + previous versions. ### Optional: -* `no_release` (string) - If set to true, does not release the version -on Vagrant Cloud, making it active. You can manually release the version -via the API or Web UI. Defaults to false. +- `no_release` (string) - If set to true, does not release the version on + Vagrant Cloud, making it active. You can manually release the version via the + API or Web UI. Defaults to false. -* `vagrant_cloud_url` (string) - Override the base URL for Vagrant Cloud. This -is useful if you're using Vagrant Private Cloud in your own network. Defaults -to `https://vagrantcloud.com/api/v1` +- `vagrant_cloud_url` (string) - Override the base URL for Vagrant Cloud. This + is useful if you're using Vagrant Private Cloud in your own network. Defaults + to `https://vagrantcloud.com/api/v1` -* `version_description` (string) - Optionally markdown text used as a full-length - and in-depth description of the version, typically for denoting changes introduced +- `version_description` (string) - Optionally markdown text used as a + full-length and in-depth description of the version, typically for denoting + changes introduced -* `box_download_url` (string) - Optional URL for a self-hosted box. If this is set -the box will not be uploaded to the Vagrant Cloud. +- `box_download_url` (string) - Optional URL for a self-hosted box. If this is + set the box will not be uploaded to the Vagrant Cloud. ## Use with Vagrant Post-Processor @@ -84,7 +91,7 @@ An example configuration is below. Note the use of a doubly-nested array, which ensures that the Vagrant Cloud post-processor is run after the Vagrant post-processor. -```javascript +``` {.javascript} { "variables": { "version": "", diff --git a/website/source/docs/post-processors/vagrant.html.markdown b/website/source/docs/post-processors/vagrant.html.markdown index 7ed19d665..da1b8daa9 100644 --- a/website/source/docs/post-processors/vagrant.html.markdown +++ b/website/source/docs/post-processors/vagrant.html.markdown @@ -1,91 +1,90 @@ --- -layout: "docs" -page_title: "Vagrant Post-Processor" -description: |- - The Packer Vagrant post-processor takes a build and converts the artifact into a valid Vagrant box, if it can. This lets you use Packer to automatically create arbitrarily complex Vagrant boxes, and is in fact how the official boxes distributed by Vagrant are created. ---- +description: | + The Packer Vagrant post-processor takes a build and converts the artifact into a + valid Vagrant box, if it can. This lets you use Packer to automatically create + arbitrarily complex Vagrant boxes, and is in fact how the official boxes + distributed by Vagrant are created. +layout: docs +page_title: 'Vagrant Post-Processor' +... # Vagrant Post-Processor Type: `vagrant` -The Packer Vagrant post-processor takes a build and converts the artifact -into a valid [Vagrant](http://www.vagrantup.com) box, if it can. -This lets you use Packer to automatically create arbitrarily complex -Vagrant boxes, and is in fact how the official boxes distributed by -Vagrant are created. +The Packer Vagrant post-processor takes a build and converts the artifact into a +valid [Vagrant](http://www.vagrantup.com) box, if it can. This lets you use +Packer to automatically create arbitrarily complex Vagrant boxes, and is in fact +how the official boxes distributed by Vagrant are created. -If you've never used a post-processor before, please read the -documentation on [using post-processors](/docs/templates/post-processors.html) -in templates. This knowledge will be expected for the remainder of -this document. +If you've never used a post-processor before, please read the documentation on +[using post-processors](/docs/templates/post-processors.html) in templates. This +knowledge will be expected for the remainder of this document. -Because Vagrant boxes are [provider-specific](http://docs.vagrantup.com/v2/boxes/format.html), -the Vagrant post-processor is hardcoded to understand how to convert -the artifacts of certain builders into proper boxes for their -respective providers. +Because Vagrant boxes are +[provider-specific](http://docs.vagrantup.com/v2/boxes/format.html), the Vagrant +post-processor is hardcoded to understand how to convert the artifacts of +certain builders into proper boxes for their respective providers. Currently, the Vagrant post-processor can create boxes for the following providers. -* AWS -* DigitalOcean -* Hyper-V -* Parallels -* QEMU -* VirtualBox -* VMware +- AWS +- DigitalOcean +- Hyper-V +- Parallels +- QEMU +- VirtualBox +- VMware --> **Support for additional providers** is planned. If the -Vagrant post-processor doesn't support creating boxes for a provider you -care about, please help by contributing to Packer and adding support for it. +-> **Support for additional providers** is planned. If the Vagrant +post-processor doesn't support creating boxes for a provider you care about, +please help by contributing to Packer and adding support for it. ## Configuration The simplest way to use the post-processor is to just enable it. No -configuration is required by default. This will mostly do what you expect -and will build functioning boxes for many of the built-in builders of -Packer. +configuration is required by default. This will mostly do what you expect and +will build functioning boxes for many of the built-in builders of Packer. -However, if you want to configure things a bit more, the post-processor -does expose some configuration options. The available options are listed -below, with more details about certain options in following sections. +However, if you want to configure things a bit more, the post-processor does +expose some configuration options. The available options are listed below, with +more details about certain options in following sections. -* `compression_level` (integer) - An integer representing the - compression level to use when creating the Vagrant box. Valid - values range from 0 to 9, with 0 being no compression and 9 being - the best compression. By default, compression is enabled at level 6. +- `compression_level` (integer) - An integer representing the compression level + to use when creating the Vagrant box. Valid values range from 0 to 9, with 0 + being no compression and 9 being the best compression. By default, compression + is enabled at level 6. -* `include` (array of strings) - Paths to files to include in the - Vagrant box. These files will each be copied into the top level directory - of the Vagrant box (regardless of their paths). They can then be used - from the Vagrantfile. +- `include` (array of strings) - Paths to files to include in the Vagrant box. + These files will each be copied into the top level directory of the Vagrant + box (regardless of their paths). They can then be used from the Vagrantfile. -* `keep_input_artifact` (boolean) - If set to true, do not delete the +- `keep_input_artifact` (boolean) - If set to true, do not delete the `output_directory` on a successful build. Defaults to false. -* `output` (string) - The full path to the box file that will be created - by this post-processor. This is a - [configuration template](/docs/templates/configuration-templates.html). - The variable `Provider` is replaced by the Vagrant provider the box is for. - The variable `ArtifactId` is replaced by the ID of the input artifact. - The variable `BuildName` is replaced with the name of the build. - By default, the value of this config is `packer_{{.BuildName}}_{{.Provider}}.box`. +- `output` (string) - The full path to the box file that will be created by + this post-processor. This is a [configuration + template](/docs/templates/configuration-templates.html). The variable + `Provider` is replaced by the Vagrant provider the box is for. The variable + `ArtifactId` is replaced by the ID of the input artifact. The variable + `BuildName` is replaced with the name of the build. By default, the value of + this config is `packer_{{.BuildName}}_{{.Provider}}.box`. -* `vagrantfile_template` (string) - Path to a template to use for the +- `vagrantfile_template` (string) - Path to a template to use for the Vagrantfile that is packaged with the box. ## Provider-Specific Overrides -If you have a Packer template with multiple builder types within it, -you may want to configure the box creation for each type a little differently. -For example, the contents of the Vagrantfile for a Vagrant box for AWS might -be different from the contents of the Vagrantfile you want for VMware. -The post-processor lets you do this. +If you have a Packer template with multiple builder types within it, you may +want to configure the box creation for each type a little differently. For +example, the contents of the Vagrantfile for a Vagrant box for AWS might be +different from the contents of the Vagrantfile you want for VMware. The +post-processor lets you do this. Specify overrides within the `override` configuration by provider name: -```javascript +``` {.javascript} { "type": "vagrant", "compression_level": 1, @@ -97,18 +96,18 @@ Specify overrides within the `override` configuration by provider name: } ``` -In the example above, the compression level will be set to 1 except for -VMware, where it will be set to 0. +In the example above, the compression level will be set to 1 except for VMware, +where it will be set to 0. -The available provider names are: `aws`, `digitalocean`, `virtualbox`, -`vmware`, and `parallels`. +The available provider names are: `aws`, `digitalocean`, `virtualbox`, `vmware`, +and `parallels`. ## Input Artifacts -By default, Packer will delete the original input artifact, assuming -you only want the final Vagrant box as the result. If you wish to keep the -input artifact (the raw virtual machine, for example), then you must -configure Packer to keep it. +By default, Packer will delete the original input artifact, assuming you only +want the final Vagrant box as the result. If you wish to keep the input artifact +(the raw virtual machine, for example), then you must configure Packer to keep +it. -Please see the [documentation on input artifacts](/docs/templates/post-processors.html#toc_2) -for more information. +Please see the [documentation on input +artifacts](/docs/templates/post-processors.html#toc_2) for more information. diff --git a/website/source/docs/post-processors/vsphere.html.markdown b/website/source/docs/post-processors/vsphere.html.markdown index ca3f3f54a..f0fd9588e 100644 --- a/website/source/docs/post-processors/vsphere.html.markdown +++ b/website/source/docs/post-processors/vsphere.html.markdown @@ -1,16 +1,17 @@ --- -layout: "docs" -page_title: "vSphere Post-Processor" -description: |- - The Packer vSphere post-processor takes an artifact from the VMware builder and uploads it to a vSphere endpoint. ---- +description: | + The Packer vSphere post-processor takes an artifact from the VMware builder and + uploads it to a vSphere endpoint. +layout: docs +page_title: 'vSphere Post-Processor' +... # vSphere Post-Processor Type: `vsphere` -The Packer vSphere post-processor takes an artifact from the VMware builder -and uploads it to a vSphere endpoint. +The Packer vSphere post-processor takes an artifact from the VMware builder and +uploads it to a vSphere endpoint. ## Configuration @@ -20,37 +21,35 @@ each category, the available configuration keys are alphabetized. Required: -* `cluster` (string) - The cluster to upload the VM to. +- `cluster` (string) - The cluster to upload the VM to. -* `datacenter` (string) - The name of the datacenter within vSphere to - add the VM to. +- `datacenter` (string) - The name of the datacenter within vSphere to add the + VM to. -* `datastore` (string) - The name of the datastore to store this VM. - This is _not required_ if `resource_pool` is specified. +- `datastore` (string) - The name of the datastore to store this VM. This is + *not required* if `resource_pool` is specified. -* `host` (string) - The vSphere host that will be contacted to perform - the VM upload. +- `host` (string) - The vSphere host that will be contacted to perform the + VM upload. -* `password` (string) - Password to use to authenticate to the vSphere - endpoint. +- `password` (string) - Password to use to authenticate to the vSphere endpoint. -* `resource_pool` (string) - The resource pool to upload the VM to. - This is _not required_. +- `resource_pool` (string) - The resource pool to upload the VM to. This is *not + required*. -* `username` (string) - The username to use to authenticate to the vSphere - endpoint. +- `username` (string) - The username to use to authenticate to the + vSphere endpoint. -* `vm_name` (string) - The name of the VM once it is uploaded. +- `vm_name` (string) - The name of the VM once it is uploaded. Optional: -* `disk_mode` (string) - Target disk format. See `ovftool` manual for +- `disk_mode` (string) - Target disk format. See `ovftool` manual for available options. By default, "thick" will be used. -* `insecure` (boolean) - Whether or not the connection to vSphere can be done +- `insecure` (boolean) - Whether or not the connection to vSphere can be done over an insecure connection. By default this is false. -* `vm_folder` (string) - The folder within the datastore to store the VM. +- `vm_folder` (string) - The folder within the datastore to store the VM. -* `vm_network` (string) - The name of the VM network this VM will be - added to. +- `vm_network` (string) - The name of the VM network this VM will be added to. diff --git a/website/source/docs/provisioners/ansible-local.html.markdown b/website/source/docs/provisioners/ansible-local.html.markdown index a2550b7bd..5682043c9 100644 --- a/website/source/docs/provisioners/ansible-local.html.markdown +++ b/website/source/docs/provisioners/ansible-local.html.markdown @@ -1,23 +1,28 @@ --- -layout: "docs" -page_title: "Ansible (Local) Provisioner" -description: |- - The `ansible-local` Packer provisioner configures Ansible to run on the machine by Packer from local Playbook and Role files. Playbooks and Roles can be uploaded from your local machine to the remote machine. Ansible is run in local mode via the `ansible-playbook` command. ---- +description: | + The `ansible-local` Packer provisioner configures Ansible to run on the machine + by Packer from local Playbook and Role files. Playbooks and Roles can be + uploaded from your local machine to the remote machine. Ansible is run in local + mode via the `ansible-playbook` command. +layout: docs +page_title: 'Ansible (Local) Provisioner' +... # Ansible Local Provisioner Type: `ansible-local` -The `ansible-local` Packer provisioner configures Ansible to run on the machine by -Packer from local Playbook and Role files. Playbooks and Roles can be uploaded -from your local machine to the remote machine. Ansible is run in [local mode](http://docs.ansible.com/playbooks_delegation.html#local-playbooks) via the `ansible-playbook` command. +The `ansible-local` Packer provisioner configures Ansible to run on the machine +by Packer from local Playbook and Role files. Playbooks and Roles can be +uploaded from your local machine to the remote machine. Ansible is run in [local +mode](http://docs.ansible.com/playbooks_delegation.html#local-playbooks) via the +`ansible-playbook` command. ## Basic Example The example below is fully functional. -```javascript +``` {.javascript} { "type": "ansible-local", "playbook_file": "local.yml" @@ -30,39 +35,40 @@ The reference of available configuration options is listed below. Required: -* `playbook_file` (string) - The playbook file to be executed by ansible. - This file must exist on your local system and will be uploaded to the +- `playbook_file` (string) - The playbook file to be executed by ansible. This + file must exist on your local system and will be uploaded to the remote machine. Optional: -* `command` (string) - The command to invoke ansible. Defaults to "ansible-playbook". +- `command` (string) - The command to invoke ansible. Defaults + to "ansible-playbook". -* `extra_arguments` (array of strings) - An array of extra arguments to pass to the - ansible command. By default, this is empty. +- `extra_arguments` (array of strings) - An array of extra arguments to pass to + the ansible command. By default, this is empty. -* `inventory_groups` (string) - A comma-separated list of groups to which - packer will assign the host `127.0.0.1`. A value of `my_group_1,my_group_2` - will generate an Ansible inventory like: +- `inventory_groups` (string) - A comma-separated list of groups to which packer + will assign the host `127.0.0.1`. A value of `my_group_1,my_group_2` will + generate an Ansible inventory like: - ```text + ``` {.text} [my_group_1] 127.0.0.1 [my_group_2] 127.0.0.1 ``` -* `inventory_file` (string) - The inventory file to be used by ansible. - This file must exist on your local system and will be uploaded to the +- `inventory_file` (string) - The inventory file to be used by ansible. This + file must exist on your local system and will be uploaded to the remote machine. - When using an inventory file, it's also required to `--limit` the hosts to - the specified host you're buiding. The `--limit` argument can be provided in - the `extra_arguments` option. + When using an inventory file, it's also required to `--limit` the hosts to the + specified host you're buiding. The `--limit` argument can be provided in the + `extra_arguments` option. An example inventory file may look like: - ```text + ``` {.text} [chi-dbservers] db-01 ansible_connection=local db-02 ansible_connection=local @@ -82,29 +88,30 @@ Optional: chi-appservers ``` -* `playbook_dir` (string) - a path to the complete ansible directory - structure on your local system to be copied to the remote machine - as the `staging_directory` before all other files and directories. +- `playbook_dir` (string) - a path to the complete ansible directory structure + on your local system to be copied to the remote machine as the + `staging_directory` before all other files and directories. -* `playbook_paths` (array of strings) - An array of paths to playbook files on +- `playbook_paths` (array of strings) - An array of paths to playbook files on your local system. These will be uploaded to the remote machine under `staging_directory`/playbooks. By default, this is empty. -* `group_vars` (string) - a path to the directory containing ansible - group variables on your local system to be copied to the - remote machine. By default, this is empty. +- `group_vars` (string) - a path to the directory containing ansible group + variables on your local system to be copied to the remote machine. By default, + this is empty. -* `host_vars` (string) - a path to the directory containing ansible - host variables on your local system to be copied to the - remote machine. By default, this is empty. +- `host_vars` (string) - a path to the directory containing ansible host + variables on your local system to be copied to the remote machine. By default, + this is empty. -* `role_paths` (array of strings) - An array of paths to role directories on +- `role_paths` (array of strings) - An array of paths to role directories on your local system. These will be uploaded to the remote machine under `staging_directory`/roles. By default, this is empty. -* `staging_directory` (string) - The directory where all the configuration of - Ansible by Packer will be placed. By default this is "/tmp/packer-provisioner-ansible-local". - This directory doesn't need to exist but must have proper permissions so that - the SSH user that Packer uses is able to create directories and write into - this folder. If the permissions are not correct, use a shell provisioner prior - to this to configure it properly. +- `staging_directory` (string) - The directory where all the configuration of + Ansible by Packer will be placed. By default this + is "/tmp/packer-provisioner-ansible-local". This directory doesn't need to + exist but must have proper permissions so that the SSH user that Packer uses + is able to create directories and write into this folder. If the permissions + are not correct, use a shell provisioner prior to this to configure + it properly. diff --git a/website/source/docs/provisioners/chef-client.html.markdown b/website/source/docs/provisioners/chef-client.html.markdown index 3e56eecb2..81d097b7e 100644 --- a/website/source/docs/provisioners/chef-client.html.markdown +++ b/website/source/docs/provisioners/chef-client.html.markdown @@ -1,115 +1,120 @@ --- -layout: "docs" -page_title: "Chef-Client Provisioner" -description: |- - The Chef Client Packer provisioner installs and configures software on machines built by Packer using chef-client. Packer configures a Chef client to talk to a remote Chef Server to provision the machine. ---- +description: | + The Chef Client Packer provisioner installs and configures software on machines + built by Packer using chef-client. Packer configures a Chef client to talk to a + remote Chef Server to provision the machine. +layout: docs +page_title: 'Chef-Client Provisioner' +... # Chef Client Provisioner Type: `chef-client` -The Chef Client Packer provisioner installs and configures software on machines built -by Packer using [chef-client](http://docs.opscode.com/chef_client.html). -Packer configures a Chef client to talk to a remote Chef Server to -provision the machine. +The Chef Client Packer provisioner installs and configures software on machines +built by Packer using [chef-client](http://docs.opscode.com/chef_client.html). +Packer configures a Chef client to talk to a remote Chef Server to provision the +machine. The provisioner will even install Chef onto your machine if it isn't already installed, using the official Chef installers provided by Opscode. ## Basic Example -The example below is fully functional. It will install Chef onto the -remote machine and run Chef client. +The example below is fully functional. It will install Chef onto the remote +machine and run Chef client. -```javascript +``` {.javascript} { "type": "chef-client", "server_url": "https://mychefserver.com/" } ``` -Note: to properly clean up the Chef node and client the machine on which -packer is running must have knife on the path and configured globally, -i.e, ~/.chef/knife.rb must be present and configured for the target chef server +Note: to properly clean up the Chef node and client the machine on which packer +is running must have knife on the path and configured globally, i.e, +\~/.chef/knife.rb must be present and configured for the target chef server ## Configuration Reference The reference of available configuration options is listed below. No configuration is actually required. -* `chef_environment` (string) - The name of the chef_environment sent to the +- `chef_environment` (string) - The name of the chef\_environment sent to the Chef server. By default this is empty and will not use an environment. -* `config_template` (string) - Path to a template that will be used for - the Chef configuration file. By default Packer only sets configuration - it needs to match the settings set in the provisioner configuration. If - you need to set configurations that the Packer provisioner doesn't support, - then you should use a custom configuration template. See the dedicated - "Chef Configuration" section below for more details. +- `config_template` (string) - Path to a template that will be used for the Chef + configuration file. By default Packer only sets configuration it needs to + match the settings set in the provisioner configuration. If you need to set + configurations that the Packer provisioner doesn't support, then you should + use a custom configuration template. See the dedicated "Chef Configuration" + section below for more details. -* `execute_command` (string) - The command used to execute Chef. This has - various [configuration template variables](/docs/templates/configuration-templates.html) - available. See below for more information. +- `execute_command` (string) - The command used to execute Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See below + for more information. -* `install_command` (string) - The command used to install Chef. This has - various [configuration template variables](/docs/templates/configuration-templates.html) - available. See below for more information. +- `install_command` (string) - The command used to install Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See below + for more information. -* `json` (object) - An arbitrary mapping of JSON that will be available as - node attributes while running Chef. +- `json` (object) - An arbitrary mapping of JSON that will be available as node + attributes while running Chef. -* `node_name` (string) - The name of the node to register with the Chef - Server. This is optional and by default is packer-{{uuid}}. +- `node_name` (string) - The name of the node to register with the Chef Server. + This is optional and by default is packer-{{uuid}}. -* `prevent_sudo` (boolean) - By default, the configured commands that are +- `prevent_sudo` (boolean) - By default, the configured commands that are executed to install and run Chef are executed with `sudo`. If this is true, then the sudo will be omitted. -* `run_list` (array of strings) - The [run list](http://docs.opscode.com/essentials_node_object_run_lists.html) - for Chef. By default this is empty, and will use the run list sent - down by the Chef Server. +- `run_list` (array of strings) - The [run + list](http://docs.opscode.com/essentials_node_object_run_lists.html) for Chef. + By default this is empty, and will use the run list sent down by the + Chef Server. -* `server_url` (string) - The URL to the Chef server. This is required. +- `server_url` (string) - The URL to the Chef server. This is required. -* `skip_clean_client` (boolean) - If true, Packer won't remove the client - from the Chef server after it is done running. By default, this is false. +- `skip_clean_client` (boolean) - If true, Packer won't remove the client from + the Chef server after it is done running. By default, this is false. -* `skip_clean_node` (boolean) - If true, Packer won't remove the node - from the Chef server after it is done running. By default, this is false. +- `skip_clean_node` (boolean) - If true, Packer won't remove the node from the + Chef server after it is done running. By default, this is false. -* `skip_install` (boolean) - If true, Chef will not automatically be installed +- `skip_install` (boolean) - If true, Chef will not automatically be installed on the machine using the Opscode omnibus installers. -* `staging_directory` (string) - This is the directory where all the configuration - of Chef by Packer will be placed. By default this is "/tmp/packer-chef-client". - This directory doesn't need to exist but must have proper permissions so that - the SSH user that Packer uses is able to create directories and write into - this folder. If the permissions are not correct, use a shell provisioner - prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Chef by Packer will be placed. By default this + is "/tmp/packer-chef-client". This directory doesn't need to exist but must + have proper permissions so that the SSH user that Packer uses is able to + create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. -* `client_key` (string) - Path to client key. If not set, this defaults to a file - named client.pem in `staging_directory`. +- `client_key` (string) - Path to client key. If not set, this defaults to a + file named client.pem in `staging_directory`. -* `validation_client_name` (string) - Name of the validation client. If - not set, this won't be set in the configuration and the default that Chef - uses will be used. +- `validation_client_name` (string) - Name of the validation client. If not set, + this won't be set in the configuration and the default that Chef uses will + be used. -* `validation_key_path` (string) - Path to the validation key for communicating - with the Chef Server. This will be uploaded to the remote machine. If this - is NOT set, then it is your responsibility via other means (shell provisioner, - etc.) to get a validation key to where Chef expects it. +- `validation_key_path` (string) - Path to the validation key for communicating + with the Chef Server. This will be uploaded to the remote machine. If this is + NOT set, then it is your responsibility via other means (shell + provisioner, etc.) to get a validation key to where Chef expects it. ## Chef Configuration -By default, Packer uses a simple Chef configuration file in order to set -the options specified for the provisioner. But Chef is a complex tool that -supports many configuration options. Packer allows you to specify a custom -configuration template if you'd like to set custom configurations. +By default, Packer uses a simple Chef configuration file in order to set the +options specified for the provisioner. But Chef is a complex tool that supports +many configuration options. Packer allows you to specify a custom configuration +template if you'd like to set custom configurations. The default value for the configuration template is: -```liquid +``` {.liquid} log_level :info log_location STDOUT chef_server_url "{{.ServerUrl}}" @@ -126,42 +131,42 @@ node_name "{{.NodeName}}" {{end}} ``` -This template is a [configuration template](/docs/templates/configuration-templates.html) -and has a set of variables available to use: +This template is a [configuration +template](/docs/templates/configuration-templates.html) and has a set of +variables available to use: -* `NodeName` - The node name set in the configuration. -* `ServerUrl` - The URL of the Chef Server set in the configuration. -* `ValidationKeyPath` - Path to the validation key, if it is set. +- `NodeName` - The node name set in the configuration. +- `ServerUrl` - The URL of the Chef Server set in the configuration. +- `ValidationKeyPath` - Path to the validation key, if it is set. ## Execute Command -By default, Packer uses the following command (broken across multiple lines -for readability) to execute Chef: +By default, Packer uses the following command (broken across multiple lines for +readability) to execute Chef: -```liquid +``` {.liquid} {{if .Sudo}}sudo {{end}}chef-client \ --no-color \ -c {{.ConfigPath}} \ -j {{.JsonPath}} ``` -This command can be customized using the `execute_command` configuration. -As you can see from the default value above, the value of this configuration -can contain various template variables, defined below: +This command can be customized using the `execute_command` configuration. As you +can see from the default value above, the value of this configuration can +contain various template variables, defined below: -* `ConfigPath` - The path to the Chef configuration file. - file. -* `JsonPath` - The path to the JSON attributes file for the node. -* `Sudo` - A boolean of whether to `sudo` the command or not, depending on - the value of the `prevent_sudo` configuration. +- `ConfigPath` - The path to the Chef configuration file. file. +- `JsonPath` - The path to the JSON attributes file for the node. +- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the + value of the `prevent_sudo` configuration. ## Install Command -By default, Packer uses the following command (broken across multiple lines -for readability) to install Chef. This command can be customized if you want -to install Chef in another way. +By default, Packer uses the following command (broken across multiple lines for +readability) to install Chef. This command can be customized if you want to +install Chef in another way. -```text +``` {.text} curl -L https://www.opscode.com/chef/install.sh | \ {{if .Sudo}}sudo{{end}} bash ``` @@ -170,9 +175,8 @@ This command can be customized using the `install_command` configuration. ## Folder Permissions -!> The `chef-client` provisioner will chmod the directory with your Chef -keys to 777. This is to ensure that Packer can upload and make use of that -directory. However, once the machine is created, you usually don't -want to keep these directories with those permissions. To change the -permissions on the directories, append a shell provisioner after Chef -to modify them. +!> The `chef-client` provisioner will chmod the directory with your Chef keys +to 777. This is to ensure that Packer can upload and make use of that directory. +However, once the machine is created, you usually don't want to keep these +directories with those permissions. To change the permissions on the +directories, append a shell provisioner after Chef to modify them. diff --git a/website/source/docs/provisioners/chef-solo.html.markdown b/website/source/docs/provisioners/chef-solo.html.markdown index 3a76c5514..03b55c066 100644 --- a/website/source/docs/provisioners/chef-solo.html.markdown +++ b/website/source/docs/provisioners/chef-solo.html.markdown @@ -1,28 +1,30 @@ --- -layout: "docs" -page_title: "Chef-Solo Provisioner" -description: |- - The Chef solo Packer provisioner installs and configures software on machines built by Packer using chef-solo. Cookbooks can be uploaded from your local machine to the remote machine or remote paths can be used. ---- +description: | + The Chef solo Packer provisioner installs and configures software on machines + built by Packer using chef-solo. Cookbooks can be uploaded from your local + machine to the remote machine or remote paths can be used. +layout: docs +page_title: 'Chef-Solo Provisioner' +... # Chef Solo Provisioner Type: `chef-solo` -The Chef solo Packer provisioner installs and configures software on machines built -by Packer using [chef-solo](https://docs.chef.io/chef_solo.html). Cookbooks -can be uploaded from your local machine to the remote machine or remote paths -can be used. +The Chef solo Packer provisioner installs and configures software on machines +built by Packer using [chef-solo](https://docs.chef.io/chef_solo.html). +Cookbooks can be uploaded from your local machine to the remote machine or +remote paths can be used. The provisioner will even install Chef onto your machine if it isn't already installed, using the official Chef installers provided by Chef Inc. ## Basic Example -The example below is fully functional and expects cookbooks in the -"cookbooks" directory relative to your working directory. +The example below is fully functional and expects cookbooks in the "cookbooks" +directory relative to your working directory. -```javascript +``` {.javascript} { "type": "chef-solo", "cookbook_paths": ["cookbooks"] @@ -34,124 +36,126 @@ The example below is fully functional and expects cookbooks in the The reference of available configuration options is listed below. No configuration is actually required, but at least `run_list` is recommended. -* `chef_environment` (string) - The name of the `chef_environment` sent to the +- `chef_environment` (string) - The name of the `chef_environment` sent to the Chef server. By default this is empty and will not use an environment -* `config_template` (string) - Path to a template that will be used for - the Chef configuration file. By default Packer only sets configuration - it needs to match the settings set in the provisioner configuration. If - you need to set configurations that the Packer provisioner doesn't support, - then you should use a custom configuration template. See the dedicated - "Chef Configuration" section below for more details. +- `config_template` (string) - Path to a template that will be used for the Chef + configuration file. By default Packer only sets configuration it needs to + match the settings set in the provisioner configuration. If you need to set + configurations that the Packer provisioner doesn't support, then you should + use a custom configuration template. See the dedicated "Chef Configuration" + section below for more details. -* `cookbook_paths` (array of strings) - This is an array of paths to - "cookbooks" directories on your local filesystem. These will be uploaded - to the remote machine in the directory specified by the `staging_directory`. - By default, this is empty. +- `cookbook_paths` (array of strings) - This is an array of paths to "cookbooks" + directories on your local filesystem. These will be uploaded to the remote + machine in the directory specified by the `staging_directory`. By default, + this is empty. -* `data_bags_path` (string) - The path to the "data\_bags" directory on your local filesystem. - These will be uploaded to the remote machine in the directory specified by the - `staging_directory`. By default, this is empty. +- `data_bags_path` (string) - The path to the "data\_bags" directory on your + local filesystem. These will be uploaded to the remote machine in the + directory specified by the `staging_directory`. By default, this is empty. -* `encrypted_data_bag_secret_path` (string) - The path to the file containing - the secret for encrypted data bags. By default, this is empty, so no - secret will be available. +- `encrypted_data_bag_secret_path` (string) - The path to the file containing + the secret for encrypted data bags. By default, this is empty, so no secret + will be available. -* `environments_path` (string) - The path to the "environments" directory on your local filesystem. - These will be uploaded to the remote machine in the directory specified by the - `staging_directory`. By default, this is empty. +- `environments_path` (string) - The path to the "environments" directory on + your local filesystem. These will be uploaded to the remote machine in the + directory specified by the `staging_directory`. By default, this is empty. -* `execute_command` (string) - The command used to execute Chef. This has - various [configuration template variables](/docs/templates/configuration-templates.html) - available. See below for more information. +- `execute_command` (string) - The command used to execute Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See below + for more information. -* `install_command` (string) - The command used to install Chef. This has - various [configuration template variables](/docs/templates/configuration-templates.html) - available. See below for more information. +- `install_command` (string) - The command used to install Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See below + for more information. -* `json` (object) - An arbitrary mapping of JSON that will be available as - node attributes while running Chef. +- `json` (object) - An arbitrary mapping of JSON that will be available as node + attributes while running Chef. -* `prevent_sudo` (boolean) - By default, the configured commands that are +- `prevent_sudo` (boolean) - By default, the configured commands that are executed to install and run Chef are executed with `sudo`. If this is true, then the sudo will be omitted. -* `remote_cookbook_paths` (array of strings) - A list of paths on the remote +- `remote_cookbook_paths` (array of strings) - A list of paths on the remote machine where cookbooks will already exist. These may exist from a previous provisioner or step. If specified, Chef will be configured to look for cookbooks here. By default, this is empty. -* `roles_path` (string) - The path to the "roles" directory on your local filesystem. - These will be uploaded to the remote machine in the directory specified by the - `staging_directory`. By default, this is empty. +- `roles_path` (string) - The path to the "roles" directory on your + local filesystem. These will be uploaded to the remote machine in the + directory specified by the `staging_directory`. By default, this is empty. -* `run_list` (array of strings) - The [run list](https://docs.chef.io/run_lists.html) - for Chef. By default this is empty. +- `run_list` (array of strings) - The [run + list](https://docs.chef.io/run_lists.html) for Chef. By default this is empty. -* `skip_install` (boolean) - If true, Chef will not automatically be installed +- `skip_install` (boolean) - If true, Chef will not automatically be installed on the machine using the Chef omnibus installers. -* `staging_directory` (string) - This is the directory where all the configuration - of Chef by Packer will be placed. By default this is "/tmp/packer-chef-solo". - This directory doesn't need to exist but must have proper permissions so that - the SSH user that Packer uses is able to create directories and write into - this folder. If the permissions are not correct, use a shell provisioner - prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Chef by Packer will be placed. By default this + is "/tmp/packer-chef-solo". This directory doesn't need to exist but must have + proper permissions so that the SSH user that Packer uses is able to create + directories and write into this folder. If the permissions are not correct, + use a shell provisioner prior to this to configure it properly. ## Chef Configuration -By default, Packer uses a simple Chef configuration file in order to set -the options specified for the provisioner. But Chef is a complex tool that -supports many configuration options. Packer allows you to specify a custom -configuration template if you'd like to set custom configurations. +By default, Packer uses a simple Chef configuration file in order to set the +options specified for the provisioner. But Chef is a complex tool that supports +many configuration options. Packer allows you to specify a custom configuration +template if you'd like to set custom configurations. The default value for the configuration template is: -```liquid +``` {.liquid} cookbook_path [{{.CookbookPaths}}] ``` -This template is a [configuration template](/docs/templates/configuration-templates.html) -and has a set of variables available to use: +This template is a [configuration +template](/docs/templates/configuration-templates.html) and has a set of +variables available to use: -* `ChefEnvironment` - The current enabled environment. Only non-empty - if the environment path is set. -* `CookbookPaths` is the set of cookbook paths ready to embedded directly - into a Ruby array to configure Chef. -* `DataBagsPath` is the path to the data bags folder. -* `EncryptedDataBagSecretPath` - The path to the encrypted data bag secret -* `EnvironmentsPath` - The path to the environments folder. -* `RolesPath` - The path to the roles folder. +- `ChefEnvironment` - The current enabled environment. Only non-empty if the + environment path is set. +- `CookbookPaths` is the set of cookbook paths ready to embedded directly into a + Ruby array to configure Chef. +- `DataBagsPath` is the path to the data bags folder. +- `EncryptedDataBagSecretPath` - The path to the encrypted data bag secret +- `EnvironmentsPath` - The path to the environments folder. +- `RolesPath` - The path to the roles folder. ## Execute Command -By default, Packer uses the following command (broken across multiple lines -for readability) to execute Chef: +By default, Packer uses the following command (broken across multiple lines for +readability) to execute Chef: -```liquid +``` {.liquid} {{if .Sudo}}sudo {{end}}chef-solo \ --no-color \ -c {{.ConfigPath}} \ -j {{.JsonPath}} ``` -This command can be customized using the `execute_command` configuration. -As you can see from the default value above, the value of this configuration -can contain various template variables, defined below: +This command can be customized using the `execute_command` configuration. As you +can see from the default value above, the value of this configuration can +contain various template variables, defined below: -* `ConfigPath` - The path to the Chef configuration file. - file. -* `JsonPath` - The path to the JSON attributes file for the node. -* `Sudo` - A boolean of whether to `sudo` the command or not, depending on - the value of the `prevent_sudo` configuration. +- `ConfigPath` - The path to the Chef configuration file. file. +- `JsonPath` - The path to the JSON attributes file for the node. +- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the + value of the `prevent_sudo` configuration. ## Install Command -By default, Packer uses the following command (broken across multiple lines -for readability) to install Chef. This command can be customized if you want -to install Chef in another way. +By default, Packer uses the following command (broken across multiple lines for +readability) to install Chef. This command can be customized if you want to +install Chef in another way. -```text +``` {.text} curl -L https://www.chef.io/chef/install.sh | \ {{if .Sudo}}sudo{{end}} bash ``` diff --git a/website/source/docs/provisioners/custom.html.markdown b/website/source/docs/provisioners/custom.html.markdown index 08df184fd..673ff3441 100644 --- a/website/source/docs/provisioners/custom.html.markdown +++ b/website/source/docs/provisioners/custom.html.markdown @@ -1,13 +1,16 @@ --- -layout: "docs" -page_title: "Custom Provisioner" -description: |- - Packer is extensible, allowing you to write new provisioners without having to modify the core source code of Packer itself. Documentation for creating new provisioners is covered in the custom provisioners page of the Packer plugin section. ---- +description: | + Packer is extensible, allowing you to write new provisioners without having to + modify the core source code of Packer itself. Documentation for creating new + provisioners is covered in the custom provisioners page of the Packer plugin + section. +layout: docs +page_title: Custom Provisioner +... # Custom Provisioner Packer is extensible, allowing you to write new provisioners without having to -modify the core source code of Packer itself. Documentation for creating -new provisioners is covered in the [custom provisioners](/docs/extend/provisioner.html) -page of the Packer plugin section. +modify the core source code of Packer itself. Documentation for creating new +provisioners is covered in the [custom +provisioners](/docs/extend/provisioner.html) page of the Packer plugin section. diff --git a/website/source/docs/provisioners/file.html.markdown b/website/source/docs/provisioners/file.html.markdown index 19fcce9be..3439b4dd6 100644 --- a/website/source/docs/provisioners/file.html.markdown +++ b/website/source/docs/provisioners/file.html.markdown @@ -1,24 +1,26 @@ --- -layout: "docs" -page_title: "File Provisioner" -description: |- - The file Packer provisioner uploads files to machines built by Packer. The recommended usage of the file provisioner is to use it to upload files, and then use shell provisioner to move them to the proper place, set permissions, etc. ---- +description: | + The file Packer provisioner uploads files to machines built by Packer. The + recommended usage of the file provisioner is to use it to upload files, and then + use shell provisioner to move them to the proper place, set permissions, etc. +layout: docs +page_title: File Provisioner +... # File Provisioner Type: `file` The file Packer provisioner uploads files to machines built by Packer. The -recommended usage of the file provisioner is to use it to upload files, -and then use [shell provisioner](/docs/provisioners/shell.html) to move -them to the proper place, set permissions, etc. +recommended usage of the file provisioner is to use it to upload files, and then +use [shell provisioner](/docs/provisioners/shell.html) to move them to the +proper place, set permissions, etc. The file provisioner can upload both single files and complete directories. ## Basic Example -```javascript +``` {.javascript} { "type": "file", "source": "app.tar.gz", @@ -30,42 +32,42 @@ The file provisioner can upload both single files and complete directories. The available configuration options are listed below. All elements are required. -* `source` (string) - The path to a local file or directory to upload to the - machine. The path can be absolute or relative. If it is relative, it is +- `source` (string) - The path to a local file or directory to upload to + the machine. The path can be absolute or relative. If it is relative, it is relative to the working directory when Packer is executed. If this is a directory, the existence of a trailing slash is important. Read below on uploading directories. -* `destination` (string) - The path where the file will be uploaded to in the - machine. This value must be a writable location and any parent directories +- `destination` (string) - The path where the file will be uploaded to in + the machine. This value must be a writable location and any parent directories must already exist. -* `direction` (string) - The direction of the file transfer. This defaults - to "upload." If it is set to "download" then the file "source" in - the machine wll be downloaded locally to "destination" +- `direction` (string) - The direction of the file transfer. This defaults to + "upload." If it is set to "download" then the file "source" in the machine wll + be downloaded locally to "destination" ## Directory Uploads -The file provisioner is also able to upload a complete directory to the -remote machine. When uploading a directory, there are a few important things -you should know. +The file provisioner is also able to upload a complete directory to the remote +machine. When uploading a directory, there are a few important things you should +know. -First, the destination directory must already exist. If you need to -create it, use a shell provisioner just prior to the file provisioner -in order to create the directory. +First, the destination directory must already exist. If you need to create it, +use a shell provisioner just prior to the file provisioner in order to create +the directory. Next, the existence of a trailing slash on the source path will determine -whether the directory name will be embedded within the destination, or -whether the destination will be created. An example explains this best: +whether the directory name will be embedded within the destination, or whether +the destination will be created. An example explains this best: -If the source is `/foo` (no trailing slash), and the destination is -`/tmp`, then the contents of `/foo` on the local machine will be uploaded -to `/tmp/foo` on the remote machine. The `foo` directory on the remote -machine will be created by Packer. +If the source is `/foo` (no trailing slash), and the destination is `/tmp`, then +the contents of `/foo` on the local machine will be uploaded to `/tmp/foo` on +the remote machine. The `foo` directory on the remote machine will be created by +Packer. -If the source, however, is `/foo/` (a trailing slash is present), and -the destination is `/tmp`, then the contents of `/foo` will be uploaded -into `/tmp` directly. +If the source, however, is `/foo/` (a trailing slash is present), and the +destination is `/tmp`, then the contents of `/foo` will be uploaded into `/tmp` +directly. -This behavior was adopted from the standard behavior of rsync. Note that -under the covers, rsync may or may not be used. +This behavior was adopted from the standard behavior of rsync. Note that under +the covers, rsync may or may not be used. diff --git a/website/source/docs/provisioners/powershell.html.markdown b/website/source/docs/provisioners/powershell.html.markdown index 69cb90b9a..ebc56ec4c 100644 --- a/website/source/docs/provisioners/powershell.html.markdown +++ b/website/source/docs/provisioners/powershell.html.markdown @@ -1,9 +1,11 @@ --- -layout: "docs" -page_title: "PowerShell Provisioner" -description: |- - The shell Packer provisioner provisions machines built by Packer using shell scripts. Shell provisioning is the easiest way to get software installed and configured on a machine. ---- +description: | + The shell Packer provisioner provisions machines built by Packer using shell + scripts. Shell provisioning is the easiest way to get software installed and + configured on a machine. +layout: docs +page_title: PowerShell Provisioner +... # PowerShell Provisioner @@ -16,7 +18,7 @@ It assumes that the communicator in use is WinRM. The example below is fully functional. -```javascript +``` {.javascript} { "type": "powershell", "inline": ["dir c:\\"] @@ -28,55 +30,54 @@ The example below is fully functional. The reference of available configuration options is listed below. The only required element is either "inline" or "script". Every other option is optional. -Exactly _one_ of the following is required: +Exactly *one* of the following is required: -* `inline` (array of strings) - This is an array of commands to execute. - The commands are concatenated by newlines and turned into a single file, - so they are all executed within the same context. This allows you to - change directories in one command and use something in the directory in - the next and so on. Inline scripts are the easiest way to pull off simple - tasks within the machine. +- `inline` (array of strings) - This is an array of commands to execute. The + commands are concatenated by newlines and turned into a single file, so they + are all executed within the same context. This allows you to change + directories in one command and use something in the directory in the next and + so on. Inline scripts are the easiest way to pull off simple tasks within + the machine. -* `script` (string) - The path to a script to upload and execute in the machine. - This path can be absolute or relative. If it is relative, it is relative - to the working directory when Packer is executed. +- `script` (string) - The path to a script to upload and execute in the machine. + This path can be absolute or relative. If it is relative, it is relative to + the working directory when Packer is executed. -* `scripts` (array of strings) - An array of scripts to execute. The scripts +- `scripts` (array of strings) - An array of scripts to execute. The scripts will be uploaded and executed in the order specified. Each script is executed in isolation, so state such as variables from one script won't carry on to the next. Optional parameters: -* `binary` (boolean) - If true, specifies that the script(s) are binary - files, and Packer should therefore not convert Windows line endings to - Unix line endings (if there are any). By default this is false. +- `binary` (boolean) - If true, specifies that the script(s) are binary files, + and Packer should therefore not convert Windows line endings to Unix line + endings (if there are any). By default this is false. -* `environment_vars` (array of strings) - An array of key/value pairs - to inject prior to the execute_command. The format should be - `key=value`. Packer injects some environmental variables by default - into the environment, as well, which are covered in the section below. +- `environment_vars` (array of strings) - An array of key/value pairs to inject + prior to the execute\_command. The format should be `key=value`. Packer + injects some environmental variables by default into the environment, as well, + which are covered in the section below. -* `execute_command` (string) - The command to use to execute the script. - By default this is `powershell "& { {{.Vars}}{{.Path}}; exit $LastExitCode}"`. - The value of this is treated as [configuration template](/docs/templates/configuration-templates.html). - There are two available variables: `Path`, which is - the path to the script to run, and `Vars`, which is the list of - `environment_vars`, if configured. +- `execute_command` (string) - The command to use to execute the script. By + default this is `powershell "& { {{.Vars}}{{.Path}}; exit $LastExitCode}"`. + The value of this is treated as [configuration + template](/docs/templates/configuration-templates.html). There are two + available variables: `Path`, which is the path to the script to run, and + `Vars`, which is the list of `environment_vars`, if configured. -* `elevated_user` and `elevated_password` (string) - If specified, - the PowerShell script will be run with elevated privileges using - the given Windows user. +- `elevated_user` and `elevated_password` (string) - If specified, the + PowerShell script will be run with elevated privileges using the given + Windows user. -* `remote_path` (string) - The path where the script will be uploaded to - in the machine. This defaults to "/tmp/script.sh". This value must be - a writable location and any parent directories must already exist. +- `remote_path` (string) - The path where the script will be uploaded to in + the machine. This defaults to "/tmp/script.sh". This value must be a writable + location and any parent directories must already exist. -* `start_retry_timeout` (string) - The amount of time to attempt to - _start_ the remote process. By default this is "5m" or 5 minutes. This - setting exists in order to deal with times when SSH may restart, such as - a system reboot. Set this to a higher value if reboots take a longer - amount of time. +- `start_retry_timeout` (string) - The amount of time to attempt to *start* the + remote process. By default this is "5m" or 5 minutes. This setting exists in + order to deal with times when SSH may restart, such as a system reboot. Set + this to a higher value if reboots take a longer amount of time. -* `valid_exit_codes` (list of ints) - Valid exit codes for the script. - By default this is just 0. +- `valid_exit_codes` (list of ints) - Valid exit codes for the script. By + default this is just 0. diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index 8fd05e4f2..ac5f4f628 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -1,33 +1,38 @@ --- -layout: "docs" -page_title: "Puppet (Masterless) Provisioner" -description: |- - The masterless Puppet Packer provisioner configures Puppet to run on the machines by Packer from local modules and manifest files. Modules and manifests can be uploaded from your local machine to the remote machine or can simply use remote paths (perhaps obtained using something like the shell provisioner). Puppet is run in masterless mode, meaning it never communicates to a Puppet master. ---- +description: | + The masterless Puppet Packer provisioner configures Puppet to run on the + machines by Packer from local modules and manifest files. Modules and manifests + can be uploaded from your local machine to the remote machine or can simply use + remote paths (perhaps obtained using something like the shell provisioner). + Puppet is run in masterless mode, meaning it never communicates to a Puppet + master. +layout: docs +page_title: 'Puppet (Masterless) Provisioner' +... # Puppet (Masterless) Provisioner Type: `puppet-masterless` -The masterless Puppet Packer provisioner configures Puppet to run on the machines -by Packer from local modules and manifest files. Modules and manifests -can be uploaded from your local machine to the remote machine or can simply -use remote paths (perhaps obtained using something like the shell provisioner). +The masterless Puppet Packer provisioner configures Puppet to run on the +machines by Packer from local modules and manifest files. Modules and manifests +can be uploaded from your local machine to the remote machine or can simply use +remote paths (perhaps obtained using something like the shell provisioner). Puppet is run in masterless mode, meaning it never communicates to a Puppet master. --> **Note:** Puppet will _not_ be installed automatically -by this provisioner. This provisioner expects that Puppet is already -installed on the machine. It is common practice to use the -[shell provisioner](/docs/provisioners/shell.html) before the -Puppet provisioner to do this. +-> **Note:** Puppet will *not* be installed automatically by this +provisioner. This provisioner expects that Puppet is already installed on the +machine. It is common practice to use the [shell +provisioner](/docs/provisioners/shell.html) before the Puppet provisioner to do +this. ## Basic Example -The example below is fully functional and expects the configured manifest -file to exist relative to your working directory: +The example below is fully functional and expects the configured manifest file +to exist relative to your working directory: -```javascript +``` {.javascript} { "type": "puppet-masterless", "manifest_file": "site.pp" @@ -40,63 +45,66 @@ The reference of available configuration options is listed below. Required parameters: -* `manifest_file` (string) - This is either a path to a puppet manifest (`.pp` - file) _or_ a directory containing multiple manifests that puppet will apply - (the ["main manifest"][1]). These file(s) must exist on your local system and - will be uploaded to the remote machine. - - [1]: https://docs.puppetlabs.com/puppet/latest/reference/dirs_manifest.html +- `manifest_file` (string) - This is either a path to a puppet manifest + (`.pp` file) *or* a directory containing multiple manifests that puppet will + apply (the ["main + manifest"](https://docs.puppetlabs.com/puppet/latest/reference/dirs_manifest.html)). + These file(s) must exist on your local system and will be uploaded to the + remote machine. Optional parameters: -* `execute_command` (string) - The command used to execute Puppet. This has - various [configuration template variables](/docs/templates/configuration-templates.html) - available. See below for more information. +- `execute_command` (string) - The command used to execute Puppet. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See below + for more information. -* `facter` (object of key/value strings) - Additional +- `facter` (object of key/value strings) - Additional [facts](http://puppetlabs.com/puppet/related-projects/facter) to make available when Puppet is running. -* `hiera_config_path` (string) - The path to a local file with hiera +- `hiera_config_path` (string) - The path to a local file with hiera configuration to be uploaded to the remote machine. Hiera data directories must be uploaded using the file provisioner separately. -* `manifest_dir` (string) - The path to a local directory with manifests - to be uploaded to the remote machine. This is useful if your main - manifest file uses imports. This directory doesn't necessarily contain - the `manifest_file`. It is a separate directory that will be set as - the "manifestdir" setting on Puppet. +- `manifest_dir` (string) - The path to a local directory with manifests to be + uploaded to the remote machine. This is useful if your main manifest file + uses imports. This directory doesn't necessarily contain the `manifest_file`. + It is a separate directory that will be set as the "manifestdir" setting + on Puppet. - ~> `manifest_dir` is passed to `puppet apply` as the `--manifestdir` option. - This option was deprecated in puppet 3.6, and removed in puppet 4.0. If you - have multiple manifests you should use `manifest_file` instead. + \~> `manifest_dir` is passed to `puppet apply` as the + `--manifestdir` option. This option was deprecated in puppet 3.6, and removed + in puppet 4.0. If you have multiple manifests you should use + `manifest_file` instead. -* `module_paths` (array of strings) - This is an array of paths to module - directories on your local filesystem. These will be uploaded to the remote - machine. By default, this is empty. +- `module_paths` (array of strings) - This is an array of paths to module + directories on your local filesystem. These will be uploaded to the + remote machine. By default, this is empty. -* `prevent_sudo` (boolean) - By default, the configured commands that are - executed to run Puppet are executed with `sudo`. If this is true, - then the sudo will be omitted. +- `prevent_sudo` (boolean) - By default, the configured commands that are + executed to run Puppet are executed with `sudo`. If this is true, then the + sudo will be omitted. -* `staging_directory` (string) - This is the directory where all the configuration - of Puppet by Packer will be placed. By default this is "/tmp/packer-puppet-masterless". - This directory doesn't need to exist but must have proper permissions so that - the SSH user that Packer uses is able to create directories and write into - this folder. If the permissions are not correct, use a shell provisioner - prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Puppet by Packer will be placed. By default this + is "/tmp/packer-puppet-masterless". This directory doesn't need to exist but + must have proper permissions so that the SSH user that Packer uses is able to + create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. -* `working_directory` (string) - This is the directory from which the puppet command - will be run. When using hiera with a relative path, this option allows to ensure - that the paths are working properly. If not specified, defaults to the value of - specified `staging_directory` (or its default value if not specified either). +- `working_directory` (string) - This is the directory from which the puppet + command will be run. When using hiera with a relative path, this option allows + to ensure that the paths are working properly. If not specified, defaults to + the value of specified `staging_directory` (or its default value if not + specified either). ## Execute Command -By default, Packer uses the following command (broken across multiple lines -for readability) to execute Puppet: +By default, Packer uses the following command (broken across multiple lines for +readability) to execute Puppet: -```liquid +``` {.liquid} cd {{.WorkingDir}} && \ {{.FacterVars}}{{if .Sudo}} sudo -E {{end}}puppet apply \ --verbose \ @@ -107,19 +115,19 @@ cd {{.WorkingDir}} && \ {{.ManifestFile}} ``` -This command can be customized using the `execute_command` configuration. -As you can see from the default value above, the value of this configuration -can contain various template variables, defined below: +This command can be customized using the `execute_command` configuration. As you +can see from the default value above, the value of this configuration can +contain various template variables, defined below: -* `WorkingDir` - The path from which Puppet will be executed. -* `FacterVars` - Shell-friendly string of environmental variables used - to set custom facts configured for this provisioner. -* `HieraConfigPath` - The path to a hiera configuration file. -* `ManifestFile` - The path on the remote machine to the manifest file - for Puppet to use. -* `ModulePath` - The paths to the module directories. -* `Sudo` - A boolean of whether to `sudo` the command or not, depending on - the value of the `prevent_sudo` configuration. +- `WorkingDir` - The path from which Puppet will be executed. +- `FacterVars` - Shell-friendly string of environmental variables used to set + custom facts configured for this provisioner. +- `HieraConfigPath` - The path to a hiera configuration file. +- `ManifestFile` - The path on the remote machine to the manifest file for + Puppet to use. +- `ModulePath` - The paths to the module directories. +- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the + value of the `prevent_sudo` configuration. ## Default Facts @@ -127,10 +135,10 @@ In addition to being able to specify custom Facter facts using the `facter` configuration, the provisioner automatically defines certain commonly useful facts: -* `packer_build_name` is set to the name of the build that Packer is running. +- `packer_build_name` is set to the name of the build that Packer is running. This is most useful when Packer is making multiple builds and you want to distinguish them in your Hiera hierarchy. -* `packer_builder_type` is the type of the builder that was used to create the +- `packer_builder_type` is the type of the builder that was used to create the machine that Puppet is running on. This is useful if you want to run only certain parts of your Puppet code on systems built with certain builders. diff --git a/website/source/docs/provisioners/puppet-server.html.markdown b/website/source/docs/provisioners/puppet-server.html.markdown index 803ae22cf..32bcadbe8 100644 --- a/website/source/docs/provisioners/puppet-server.html.markdown +++ b/website/source/docs/provisioners/puppet-server.html.markdown @@ -1,29 +1,30 @@ --- -layout: "docs" -page_title: "Puppet Server Provisioner" -description: |- - The `puppet-server` Packer provisioner provisions Packer machines with Puppet by connecting to a Puppet master. ---- +description: | + The `puppet-server` Packer provisioner provisions Packer machines with Puppet by + connecting to a Puppet master. +layout: docs +page_title: Puppet Server Provisioner +... # Puppet Server Provisioner Type: `puppet-server` -The `puppet-server` Packer provisioner provisions Packer machines with Puppet -by connecting to a Puppet master. +The `puppet-server` Packer provisioner provisions Packer machines with Puppet by +connecting to a Puppet master. --> **Note:** Puppet will _not_ be installed automatically -by this provisioner. This provisioner expects that Puppet is already -installed on the machine. It is common practice to use the -[shell provisioner](/docs/provisioners/shell.html) before the -Puppet provisioner to do this. +-> **Note:** Puppet will *not* be installed automatically by this +provisioner. This provisioner expects that Puppet is already installed on the +machine. It is common practice to use the [shell +provisioner](/docs/provisioners/shell.html) before the Puppet provisioner to do +this. ## Basic Example -The example below is fully functional and expects a Puppet server to be accessible -from your network.: +The example below is fully functional and expects a Puppet server to be +accessible from your network.: -```javascript +``` {.javascript} { "type": "puppet-server", "options": "--test --pluginsync", @@ -37,39 +38,39 @@ from your network.: The reference of available configuration options is listed below. -The provisioner takes various options. None are strictly -required. They are listed below: +The provisioner takes various options. None are strictly required. They are +listed below: -* `client_cert_path` (string) - Path to the client certificate for the - node on your disk. This defaults to nothing, in which case a client - cert won't be uploaded. +- `client_cert_path` (string) - Path to the client certificate for the node on + your disk. This defaults to nothing, in which case a client cert won't + be uploaded. -* `client_private_key_path` (string) - Path to the client private key for - the node on your disk. This defaults to nothing, in which case a client - private key won't be uploaded. +- `client_private_key_path` (string) - Path to the client private key for the + node on your disk. This defaults to nothing, in which case a client private + key won't be uploaded. -* `facter` (object of key/value strings) - Additional Facter facts to make available to the - Puppet run. +- `facter` (object of key/value strings) - Additional Facter facts to make + available to the Puppet run. -* `ignore_exit_codes` (boolean) - If true, Packer will never consider the +- `ignore_exit_codes` (boolean) - If true, Packer will never consider the provisioner a failure. -* `options` (string) - Additional command line options to pass - to `puppet agent` when Puppet is ran. +- `options` (string) - Additional command line options to pass to `puppet agent` + when Puppet is ran. -* `prevent_sudo` (boolean) - By default, the configured commands that are - executed to run Puppet are executed with `sudo`. If this is true, - then the sudo will be omitted. +- `prevent_sudo` (boolean) - By default, the configured commands that are + executed to run Puppet are executed with `sudo`. If this is true, then the + sudo will be omitted. -* `puppet_node` (string) - The name of the node. If this isn't set, - the fully qualified domain name will be used. +- `puppet_node` (string) - The name of the node. If this isn't set, the fully + qualified domain name will be used. -* `puppet_server` (string) - Hostname of the Puppet server. By default - "puppet" will be used. +- `puppet_server` (string) - Hostname of the Puppet server. By default "puppet" + will be used. -* `staging_directory` (string) - This is the directory where all the configuration - of Puppet by Packer will be placed. By default this is "/tmp/packer-puppet-server". - This directory doesn't need to exist but must have proper permissions so that - the SSH user that Packer uses is able to create directories and write into - this folder. If the permissions are not correct, use a shell provisioner - prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Puppet by Packer will be placed. By default this + is "/tmp/packer-puppet-server". This directory doesn't need to exist but must + have proper permissions so that the SSH user that Packer uses is able to + create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. diff --git a/website/source/docs/provisioners/salt-masterless.html.markdown b/website/source/docs/provisioners/salt-masterless.html.markdown index a298bb28d..cc1ab1f7b 100644 --- a/website/source/docs/provisioners/salt-masterless.html.markdown +++ b/website/source/docs/provisioners/salt-masterless.html.markdown @@ -1,22 +1,23 @@ --- -layout: "docs" -page_title: "Salt (Masterless) Provisioner" -description: |- - The `salt-masterless` Packer provisioner provisions machines built by Packer using Salt states, without connecting to a Salt master. ---- +description: | + The `salt-masterless` Packer provisioner provisions machines built by Packer + using Salt states, without connecting to a Salt master. +layout: docs +page_title: 'Salt (Masterless) Provisioner' +... # Salt Masterless Provisioner Type: `salt-masterless` -The `salt-masterless` Packer provisioner provisions machines built by Packer using -[Salt](http://saltstack.com/) states, without connecting to a Salt master. +The `salt-masterless` Packer provisioner provisions machines built by Packer +using [Salt](http://saltstack.com/) states, without connecting to a Salt master. ## Basic Example The example below is fully functional. -```javascript +``` {.javascript} { "type": "salt-masterless", "local_state_tree": "/Users/me/salt" @@ -25,31 +26,33 @@ The example below is fully functional. ## Configuration Reference -The reference of available configuration options is listed below. The only required argument is the path to your local salt state tree. +The reference of available configuration options is listed below. The only +required argument is the path to your local salt state tree. Optional: -* `bootstrap_args` (string) - Arguments to send to the bootstrap script. Usage - is somewhat documented on [github](https://github.com/saltstack/salt-bootstrap), - but the [script itself](https://github.com/saltstack/salt-bootstrap/blob/develop/bootstrap-salt.sh) +- `bootstrap_args` (string) - Arguments to send to the bootstrap script. Usage + is somewhat documented on + [github](https://github.com/saltstack/salt-bootstrap), but the [script + itself](https://github.com/saltstack/salt-bootstrap/blob/develop/bootstrap-salt.sh) has more detailed usage instructions. By default, no arguments are sent to the script. -* `local_pillar_roots` (string) - The path to your local - [pillar roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). +- `local_pillar_roots` (string) - The path to your local [pillar + roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). This will be uploaded to the `/srv/pillar` on the remote. -* `local_state_tree` (string) - The path to your local - [state tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). +- `local_state_tree` (string) - The path to your local [state + tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). This will be uploaded to the `/srv/salt` on the remote. -* `minion_config` (string) - The path to your local - [minion config](http://docs.saltstack.com/topics/configuration.html). - This will be uploaded to the `/etc/salt` on the remote. +- `minion_config` (string) - The path to your local [minion + config](http://docs.saltstack.com/topics/configuration.html). This will be + uploaded to the `/etc/salt` on the remote. -* `skip_bootstrap` (boolean) - By default the salt provisioner runs - [salt bootstrap](https://github.com/saltstack/salt-bootstrap) to install - salt. Set this to true to skip this step. +- `skip_bootstrap` (boolean) - By default the salt provisioner runs [salt + bootstrap](https://github.com/saltstack/salt-bootstrap) to install salt. Set + this to true to skip this step. -* `temp_config_dir` (string) - Where your local state tree will be copied - before moving to the `/srv/salt` directory. Default is `/tmp/salt`. +- `temp_config_dir` (string) - Where your local state tree will be copied before + moving to the `/srv/salt` directory. Default is `/tmp/salt`. diff --git a/website/source/docs/provisioners/shell.html.markdown b/website/source/docs/provisioners/shell.html.markdown index dec270841..97015a847 100644 --- a/website/source/docs/provisioners/shell.html.markdown +++ b/website/source/docs/provisioners/shell.html.markdown @@ -1,27 +1,29 @@ --- -layout: "docs" -page_title: "Shell Provisioner" -description: |- - The shell Packer provisioner provisions machines built by Packer using shell scripts. Shell provisioning is the easiest way to get software installed and configured on a machine. ---- +description: | + The shell Packer provisioner provisions machines built by Packer using shell + scripts. Shell provisioning is the easiest way to get software installed and + configured on a machine. +layout: docs +page_title: Shell Provisioner +... # Shell Provisioner Type: `shell` -The shell Packer provisioner provisions machines built by Packer using shell scripts. -Shell provisioning is the easiest way to get software installed and configured -on a machine. +The shell Packer provisioner provisions machines built by Packer using shell +scripts. Shell provisioning is the easiest way to get software installed and +configured on a machine. --> **Building Windows images?** You probably want to use the -[PowerShell](/docs/provisioners/powershell.html) or -[Windows Shell](/docs/provisioners/windows-shell.html) provisioners. +-> **Building Windows images?** You probably want to use the +[PowerShell](/docs/provisioners/powershell.html) or [Windows +Shell](/docs/provisioners/windows-shell.html) provisioners. ## Basic Example The example below is fully functional. -```javascript +``` {.javascript} { "type": "shell", "inline": ["echo foo"] @@ -33,83 +35,82 @@ The example below is fully functional. The reference of available configuration options is listed below. The only required element is either "inline" or "script". Every other option is optional. -Exactly _one_ of the following is required: +Exactly *one* of the following is required: -* `inline` (array of strings) - This is an array of commands to execute. - The commands are concatenated by newlines and turned into a single file, - so they are all executed within the same context. This allows you to - change directories in one command and use something in the directory in - the next and so on. Inline scripts are the easiest way to pull off simple - tasks within the machine. +- `inline` (array of strings) - This is an array of commands to execute. The + commands are concatenated by newlines and turned into a single file, so they + are all executed within the same context. This allows you to change + directories in one command and use something in the directory in the next and + so on. Inline scripts are the easiest way to pull off simple tasks within + the machine. -* `script` (string) - The path to a script to upload and execute in the machine. - This path can be absolute or relative. If it is relative, it is relative - to the working directory when Packer is executed. +- `script` (string) - The path to a script to upload and execute in the machine. + This path can be absolute or relative. If it is relative, it is relative to + the working directory when Packer is executed. -* `scripts` (array of strings) - An array of scripts to execute. The scripts +- `scripts` (array of strings) - An array of scripts to execute. The scripts will be uploaded and executed in the order specified. Each script is executed in isolation, so state such as variables from one script won't carry on to the next. Optional parameters: -* `binary` (boolean) - If true, specifies that the script(s) are binary - files, and Packer should therefore not convert Windows line endings to - Unix line endings (if there are any). By default this is false. +- `binary` (boolean) - If true, specifies that the script(s) are binary files, + and Packer should therefore not convert Windows line endings to Unix line + endings (if there are any). By default this is false. -* `environment_vars` (array of strings) - An array of key/value pairs - to inject prior to the execute_command. The format should be - `key=value`. Packer injects some environmental variables by default - into the environment, as well, which are covered in the section below. +- `environment_vars` (array of strings) - An array of key/value pairs to inject + prior to the execute\_command. The format should be `key=value`. Packer + injects some environmental variables by default into the environment, as well, + which are covered in the section below. -* `execute_command` (string) - The command to use to execute the script. - By default this is `chmod +x {{ .Path }}; {{ .Vars }} {{ .Path }}`. The value of this is - treated as [configuration template](/docs/templates/configuration-templates.html). There are two available variables: `Path`, which is - the path to the script to run, and `Vars`, which is the list of - `environment_vars`, if configured. +- `execute_command` (string) - The command to use to execute the script. By + default this is `chmod +x {{ .Path }}; {{ .Vars }} {{ .Path }}`. The value of + this is treated as [configuration + template](/docs/templates/configuration-templates.html). There are two + available variables: `Path`, which is the path to the script to run, and + `Vars`, which is the list of `environment_vars`, if configured. -* `inline_shebang` (string) - The +- `inline_shebang` (string) - The [shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when - running commands specified by `inline`. By default, this is `/bin/sh -e`. - If you're not using `inline`, then this configuration has no effect. - **Important:** If you customize this, be sure to include something like - the `-e` flag, otherwise individual steps failing won't fail the provisioner. + running commands specified by `inline`. By default, this is `/bin/sh -e`. If + you're not using `inline`, then this configuration has no effect. + **Important:** If you customize this, be sure to include something like the + `-e` flag, otherwise individual steps failing won't fail the provisioner. -* `remote_path` (string) - The path where the script will be uploaded to - in the machine. This defaults to "/tmp/script.sh". This value must be - a writable location and any parent directories must already exist. +- `remote_path` (string) - The path where the script will be uploaded to in + the machine. This defaults to "/tmp/script.sh". This value must be a writable + location and any parent directories must already exist. -* `start_retry_timeout` (string) - The amount of time to attempt to - _start_ the remote process. By default this is "5m" or 5 minutes. This - setting exists in order to deal with times when SSH may restart, such as - a system reboot. Set this to a higher value if reboots take a longer - amount of time. +- `start_retry_timeout` (string) - The amount of time to attempt to *start* the + remote process. By default this is "5m" or 5 minutes. This setting exists in + order to deal with times when SSH may restart, such as a system reboot. Set + this to a higher value if reboots take a longer amount of time. ## Execute Command Example -To many new users, the `execute_command` is puzzling. However, it provides -an important function: customization of how the command is executed. The -most common use case for this is dealing with **sudo password prompts**. You may -also need to customize this if you use a non-POSIX shell, such as `tcsh` on -FreeBSD. +To many new users, the `execute_command` is puzzling. However, it provides an +important function: customization of how the command is executed. The most +common use case for this is dealing with **sudo password prompts**. You may also +need to customize this if you use a non-POSIX shell, such as `tcsh` on FreeBSD. ### Sudo Example -Some operating systems default to a non-root user. For example if you login -as `ubuntu` and can sudo using the password `packer`, then you'll want to -change `execute_command` to be: +Some operating systems default to a non-root user. For example if you login as +`ubuntu` and can sudo using the password `packer`, then you'll want to change +`execute_command` to be: -```text +``` {.text} "echo 'packer' | {{ .Vars }} sudo -E -S sh '{{ .Path }}'" ``` -The `-S` flag tells `sudo` to read the password from stdin, which in this -case is being piped in with the value of `packer`. The `-E` flag tells `sudo` -to preserve the environment, allowing our environmental variables to work -within the script. +The `-S` flag tells `sudo` to read the password from stdin, which in this case +is being piped in with the value of `packer`. The `-E` flag tells `sudo` to +preserve the environment, allowing our environmental variables to work within +the script. -By setting the `execute_command` to this, your script(s) can run with -root privileges without worrying about password prompts. +By setting the `execute_command` to this, your script(s) can run with root +privileges without worrying about password prompts. ### FreeBSD Example @@ -123,44 +124,44 @@ Note the addition of `env` before `{{ .Vars }}`. ## Default Environmental Variables -In addition to being able to specify custom environmental variables using -the `environment_vars` configuration, the provisioner automatically -defines certain commonly useful environmental variables: +In addition to being able to specify custom environmental variables using the +`environment_vars` configuration, the provisioner automatically defines certain +commonly useful environmental variables: -* `PACKER_BUILD_NAME` is set to the name of the build that Packer is running. +- `PACKER_BUILD_NAME` is set to the name of the build that Packer is running. This is most useful when Packer is making multiple builds and you want to distinguish them slightly from a common provisioning script. -* `PACKER_BUILDER_TYPE` is the type of the builder that was used to create - the machine that the script is running on. This is useful if you want to - run only certain parts of the script on systems built with certain builders. +- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create the + machine that the script is running on. This is useful if you want to run only + certain parts of the script on systems built with certain builders. ## Handling Reboots Provisioning sometimes involves restarts, usually when updating the operating system. Packer is able to tolerate restarts via the shell provisioner. -Packer handles this by retrying to start scripts for a period of time -before failing. This allows time for the machine to start up and be ready -to run scripts. The amount of time the provisioner will wait is configured -using `start_retry_timeout`, which defaults to a few minutes. +Packer handles this by retrying to start scripts for a period of time before +failing. This allows time for the machine to start up and be ready to run +scripts. The amount of time the provisioner will wait is configured using +`start_retry_timeout`, which defaults to a few minutes. -Sometimes, when executing a command like `reboot`, the shell script will -return and Packer will start executing the next one before SSH actually -quits and the machine restarts. For this, put a long `sleep` after the -reboot so that SSH will eventually be killed automatically: +Sometimes, when executing a command like `reboot`, the shell script will return +and Packer will start executing the next one before SSH actually quits and the +machine restarts. For this, put a long `sleep` after the reboot so that SSH will +eventually be killed automatically: -```text +``` {.text} reboot sleep 60 ``` -Some OS configurations don't properly kill all network connections on -reboot, causing the provisioner to hang despite a reboot occurring. -In this case, make sure you shut down the network interfaces -on reboot or in your shell script. For example, on Gentoo: +Some OS configurations don't properly kill all network connections on reboot, +causing the provisioner to hang despite a reboot occurring. In this case, make +sure you shut down the network interfaces on reboot or in your shell script. For +example, on Gentoo: -```text +``` {.text} /etc/init.d/net.eth0 stop ``` @@ -170,59 +171,58 @@ Some provisioning requires connecting to remote SSH servers from within the packer instance. The below example is for pulling code from a private git repository utilizing openssh on the client. Make sure you are running `ssh-agent` and add your git repo ssh keys into it using `ssh-add /path/to/key`. -When the packer instance needs access to the ssh keys the agent will forward -the request back to your `ssh-agent`. +When the packer instance needs access to the ssh keys the agent will forward the +request back to your `ssh-agent`. -Note: when provisioning via git you should add the git server keys into -the `~/.ssh/known_hosts` file otherwise the git command could hang awaiting -input. This can be done by copying the file in via the -[file provisioner](/docs/provisioners/file.html) (more secure) -or using `ssh-keyscan` to populate the file (less secure). An example of the -latter accessing github would be: +Note: when provisioning via git you should add the git server keys into the +`~/.ssh/known_hosts` file otherwise the git command could hang awaiting input. +This can be done by copying the file in via the [file +provisioner](/docs/provisioners/file.html) (more secure) or using `ssh-keyscan` +to populate the file (less secure). An example of the latter accessing github +would be: -``` -{ - "type": "shell", - "inline": [ - "sudo apt-get install -y git", - "ssh-keyscan github.com >> ~/.ssh/known_hosts", - "git clone git@github.com:exampleorg/myprivaterepo.git" - ] -} -``` + { + "type": "shell", + "inline": [ + "sudo apt-get install -y git", + "ssh-keyscan github.com >> ~/.ssh/known_hosts", + "git clone git@github.com:exampleorg/myprivaterepo.git" + ] + } ## Troubleshooting *My shell script doesn't work correctly on Ubuntu* -* On Ubuntu, the `/bin/sh` shell is -[dash](http://en.wikipedia.org/wiki/Debian_Almquist_shell). If your script has -[bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell))-specific commands in it, -then put `#!/bin/bash` at the top of your script. Differences -between dash and bash can be found on the [DashAsBinSh](https://wiki.ubuntu.com/DashAsBinSh) Ubuntu wiki page. +- On Ubuntu, the `/bin/sh` shell is + [dash](http://en.wikipedia.org/wiki/Debian_Almquist_shell). If your script has + [bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell))-specific commands in + it, then put `#!/bin/bash` at the top of your script. Differences between dash + and bash can be found on the + [DashAsBinSh](https://wiki.ubuntu.com/DashAsBinSh) Ubuntu wiki page. *My shell works when I login but fails with the shell provisioner* -* See the above tip. More than likely, your login shell is using `/bin/bash` -while the provisioner is using `/bin/sh`. +- See the above tip. More than likely, your login shell is using `/bin/bash` + while the provisioner is using `/bin/sh`. *My installs hang when using `apt-get` or `yum`* -* Make sure you add a `-y` to the command to prevent it from requiring -user input before proceeding. +- Make sure you add a `-y` to the command to prevent it from requiring user + input before proceeding. *How do I tell what my shell script is doing?* -* Adding a `-x` flag to the shebang at the top of the script (`#!/bin/sh -x`) -will echo the script statements as it is executing. +- Adding a `-x` flag to the shebang at the top of the script (`#!/bin/sh -x`) + will echo the script statements as it is executing. *My builds don't always work the same* -* Some distributions start the SSH daemon before other core services which -can create race conditions. Your first provisioner can tell the machine to -wait until it completely boots. +- Some distributions start the SSH daemon before other core services which can + create race conditions. Your first provisioner can tell the machine to wait + until it completely boots. -```javascript +``` {.javascript} { "type": "shell", "inline": [ "sleep 10" ] diff --git a/website/source/docs/templates/builders.html.markdown b/website/source/docs/templates/builders.html.markdown index 2afb0a95c..594ed59e4 100644 --- a/website/source/docs/templates/builders.html.markdown +++ b/website/source/docs/templates/builders.html.markdown @@ -1,27 +1,28 @@ --- -layout: "docs" -page_title: "Templates: Builders" -description: |- - Within the template, the builders section contains an array of all the builders that Packer should use to generate a machine images for the template. ---- +description: | + Within the template, the builders section contains an array of all the builders + that Packer should use to generate a machine images for the template. +layout: docs +page_title: 'Templates: Builders' +... # Templates: Builders -Within the template, the builders section contains an array of all the -builders that Packer should use to generate a machine images for the template. +Within the template, the builders section contains an array of all the builders +that Packer should use to generate a machine images for the template. -Builders are responsible for creating machines and generating images from -them for various platforms. For example, there are separate builders for -EC2, VMware, VirtualBox, etc. Packer comes with many builders by default, -and can also be extended to add new builders. +Builders are responsible for creating machines and generating images from them +for various platforms. For example, there are separate builders for EC2, VMware, +VirtualBox, etc. Packer comes with many builders by default, and can also be +extended to add new builders. -This documentation page will cover how to configure a builder in a template. -The specific configuration options available for each builder, however, -must be referenced from the documentation for that specific builder. +This documentation page will cover how to configure a builder in a template. The +specific configuration options available for each builder, however, must be +referenced from the documentation for that specific builder. Within a template, a section of builder definitions looks like this: -```javascript +``` {.javascript} { "builders": [ // ... one or more builder definitions here @@ -31,19 +32,19 @@ Within a template, a section of builder definitions looks like this: ## Builder Definition -A single builder definition maps to exactly one [build](/docs/basics/terminology.html#term-build). -A builder definition is a JSON object that requires at least a `type` key. The -`type` is the name of the builder that will be used to create a machine image -for the build. +A single builder definition maps to exactly one +[build](/docs/basics/terminology.html#term-build). A builder definition is a +JSON object that requires at least a `type` key. The `type` is the name of the +builder that will be used to create a machine image for the build. -In addition to the `type`, other keys configure the builder itself. For -example, the AWS builder requires an `access_key`, `secret_key`, and -some other settings. These are placed directly within the builder definition. +In addition to the `type`, other keys configure the builder itself. For example, +the AWS builder requires an `access_key`, `secret_key`, and some other settings. +These are placed directly within the builder definition. -An example builder definition is shown below, in this case configuring -the AWS builder: +An example builder definition is shown below, in this case configuring the AWS +builder: -```javascript +``` {.javascript} { "type": "amazon-ebs", "access_key": "...", @@ -53,23 +54,22 @@ the AWS builder: ## Named Builds -Each build in Packer has a name. By default, the name is just the name -of the builder being used. In general, this is good enough. Names only serve -as an indicator in the output of what is happening. If you want, however, -you can specify a custom name using the `name` key within the builder definition. +Each build in Packer has a name. By default, the name is just the name of the +builder being used. In general, this is good enough. Names only serve as an +indicator in the output of what is happening. If you want, however, you can +specify a custom name using the `name` key within the builder definition. -This is particularly useful if you have multiple builds defined that use -the same underlying builder. In this case, you must specify a name for at least -one of them since the names must be unique. +This is particularly useful if you have multiple builds defined that use the +same underlying builder. In this case, you must specify a name for at least one +of them since the names must be unique. ## Communicators Every build is associated with a single -[communicator](/docs/templates/communicator.html). Communicators are -used to establish a connection for provisioning a remote machine (such -as an AWS instance or local virtual machine). +[communicator](/docs/templates/communicator.html). Communicators are used to +establish a connection for provisioning a remote machine (such as an AWS +instance or local virtual machine). -All the examples for the various builders show some communicator (usually -SSH), but the communicators are highly customizable so we recommend -reading the +All the examples for the various builders show some communicator (usually SSH), +but the communicators are highly customizable so we recommend reading the [communicator documentation](/docs/templates/communicator.html). diff --git a/website/source/docs/templates/configuration-templates.html.markdown b/website/source/docs/templates/configuration-templates.html.markdown index cef1385d3..9bc8f835e 100644 --- a/website/source/docs/templates/configuration-templates.html.markdown +++ b/website/source/docs/templates/configuration-templates.html.markdown @@ -1,40 +1,42 @@ --- -layout: "docs" -page_title: "Configuration Templates" -description: |- - All strings within templates are processed by a common Packer templating engine, where variables and functions can be used to modify the value of a configuration parameter at runtime. ---- +description: | + All strings within templates are processed by a common Packer templating engine, + where variables and functions can be used to modify the value of a configuration + parameter at runtime. +layout: docs +page_title: Configuration Templates +... # Configuration Templates -All strings within templates are processed by a common Packer templating -engine, where variables and functions can be used to modify the value of -a configuration parameter at runtime. +All strings within templates are processed by a common Packer templating engine, +where variables and functions can be used to modify the value of a configuration +parameter at runtime. -For example, the `{{timestamp}}` function can be used in any string to -generate the current timestamp. This is useful for configurations that require -unique keys, such as AMI names. By setting the AMI name to something like +For example, the `{{timestamp}}` function can be used in any string to generate +the current timestamp. This is useful for configurations that require unique +keys, such as AMI names. By setting the AMI name to something like `My Packer AMI {{timestamp}}`, the AMI name will be unique down to the second. -In addition to globally available functions like timestamp shown before, -some configurations have special local variables that are available only -for that configuration. These are recognizable because they're prefixed by -a period, such as `{{.Name}}`. +In addition to globally available functions like timestamp shown before, some +configurations have special local variables that are available only for that +configuration. These are recognizable because they're prefixed by a period, such +as `{{.Name}}`. -The complete syntax is covered in the next section, followed by a reference -of globally available functions. +The complete syntax is covered in the next section, followed by a reference of +globally available functions. ## Syntax -The syntax of templates is extremely simple. Anything template related -happens within double-braces: `{{ }}`. Variables are prefixed with a period -and capitalized, such as `{{.Variable}}` and functions are just directly -within the braces, such as `{{timestamp}}`. +The syntax of templates is extremely simple. Anything template related happens +within double-braces: `{{ }}`. Variables are prefixed with a period and +capitalized, such as `{{.Variable}}` and functions are just directly within the +braces, such as `{{timestamp}}`. Here is an example from the VMware VMX template that shows configuration templates in action: -```liquid +``` {.liquid} .encoding = "UTF-8" displayName = "{{ .Name }}" guestOS = "{{ .GuestOS }}" @@ -43,7 +45,7 @@ guestOS = "{{ .GuestOS }}" In this case, the "Name" and "GuestOS" variables will be replaced, potentially resulting in a VMX that looks like this: -```liquid +``` {.liquid} .encoding = "UTF-8" displayName = "packer" guestOS = "otherlinux" @@ -52,70 +54,126 @@ guestOS = "otherlinux" ## Global Functions While some configuration settings have local variables specific to only that -configuration, a set of functions are available globally for use in _any string_ +configuration, a set of functions are available globally for use in *any string* in Packer templates. These are listed below for reference. -* `build_name` - The name of the build being run. -* `build_type` - The type of the builder being used currently. -* `isotime [FORMAT]` - UTC time, which can be [formatted](http://golang.org/pkg/time/#example_Time_Format). - See more examples below. -* `lower` - Lowercases the string. -* `pwd` - The working directory while executing Packer. -* `template_dir` - The directory to the template for the build. -* `timestamp` - The current Unix timestamp in UTC. -* `uuid` - Returns a random UUID. -* `upper` - Uppercases the string. +- `build_name` - The name of the build being run. +- `build_type` - The type of the builder being used currently. +- `isotime [FORMAT]` - UTC time, which can be + [formatted](http://golang.org/pkg/time/#example_Time_Format). See more + examples below. +- `lower` - Lowercases the string. +- `pwd` - The working directory while executing Packer. +- `template_dir` - The directory to the template for the build. +- `timestamp` - The current Unix timestamp in UTC. +- `uuid` - Returns a random UUID. +- `upper` - Uppercases the string. ### isotime Format -Formatting for the function `isotime` uses the magic reference date -**Mon Jan 2 15:04:05 -0700 MST 2006**, which breaks down to the following: +Formatting for the function `isotime` uses the magic reference date **Mon Jan 2 +15:04:05 -0700 MST 2006**, which breaks down to the following:
    + - - - - - - - - - + + + + + + + + + - - - - - - - - - + + + + + + + + + - - - - - - - - - + + + + + + + + +
    Day of WeekMonthDateHourMinuteSecondYearTimezone + +Day of Week + +Month + +Date + +Hour + +Minute + +Second + +Year + +Timezone +
    Numeric-010203 (15)040506-0700 +Numeric + +- + +01 + +02 + +03 (15) + +04 + +05 + +06 + +-0700 +
    TextualMonday (Mon)January (Jan)-----MST +Textual + +Monday (Mon) + +January (Jan) + +- + +- + +- + +- + +- + +MST +
    +
    - _The values in parentheses are the abbreviated, or 24-hour clock values_ +*The values in parentheses are the abbreviated, or 24-hour clock values* - Here are some example formated time, using the above format options: +Here are some example formated time, using the above format options: -```liquid +``` {.liquid} isotime = June 7, 7:22:43pm 2014 {{isotime "2006-01-02"}} = 2014-06-07 @@ -126,7 +184,7 @@ isotime = June 7, 7:22:43pm 2014 Please note that double quote characters need escaping inside of templates: -```javascript +``` {.javascript} { "builders": [ { @@ -147,6 +205,6 @@ Please note that double quote characters need escaping inside of templates: Specific to Amazon builders: -* ``clean_ami_name`` - AMI names can only contain certain characters. This +- `clean_ami_name` - AMI names can only contain certain characters. This function will replace illegal characters with a '-" character. Example usage since ":" is not a legal AMI name is: `{{isotime | clean_ami_name}}`. diff --git a/website/source/docs/templates/introduction.html.markdown b/website/source/docs/templates/introduction.html.markdown index 3dc363916..1d67ea196 100644 --- a/website/source/docs/templates/introduction.html.markdown +++ b/website/source/docs/templates/introduction.html.markdown @@ -1,21 +1,25 @@ --- -layout: "docs" -page_title: "Templates" -description: |- - Templates are JSON files that configure the various components of Packer in order to create one or more machine images. Templates are portable, static, and readable and writable by both humans and computers. This has the added benefit of being able to not only create and modify templates by hand, but also write scripts to dynamically create or modify templates. ---- +description: | + Templates are JSON files that configure the various components of Packer in + order to create one or more machine images. Templates are portable, static, and + readable and writable by both humans and computers. This has the added benefit + of being able to not only create and modify templates by hand, but also write + scripts to dynamically create or modify templates. +layout: docs +page_title: Templates +... # Templates -Templates are JSON files that configure the various components of Packer -in order to create one or more machine images. Templates are portable, static, -and readable and writable by both humans and computers. This has the added -benefit of being able to not only create and modify templates by hand, but -also write scripts to dynamically create or modify templates. +Templates are JSON files that configure the various components of Packer in +order to create one or more machine images. Templates are portable, static, and +readable and writable by both humans and computers. This has the added benefit +of being able to not only create and modify templates by hand, but also write +scripts to dynamically create or modify templates. -Templates are given to commands such as `packer build`, which will -take the template and actually run the builds within it, producing -any resulting machine images. +Templates are given to commands such as `packer build`, which will take the +template and actually run the builds within it, producing any resulting machine +images. ## Template Structure @@ -23,64 +27,64 @@ A template is a JSON object that has a set of keys configuring various components of Packer. The available keys within a template are listed below. Along with each key, it is noted whether it is required or not. -* `builders` (_required_) is an array of one or more objects that defines - the builders that will be used to create machine images for this template, - and configures each of those builders. For more information on how to define - and configure a builder, read the sub-section on - [configuring builders in templates](/docs/templates/builders.html). +- `builders` (*required*) is an array of one or more objects that defines the + builders that will be used to create machine images for this template, and + configures each of those builders. For more information on how to define and + configure a builder, read the sub-section on [configuring builders in + templates](/docs/templates/builders.html). -* `description` (optional) is a string providing a description of what - the template does. This output is used only in the - [inspect command](/docs/command-line/inspect.html). +- `description` (optional) is a string providing a description of what the + template does. This output is used only in the [inspect + command](/docs/command-line/inspect.html). -* `min_packer_version` (optional) is a string that has a minimum Packer - version that is required to parse the template. This can be used to - ensure that proper versions of Packer are used with the template. A - max version can't be specified because Packer retains backwards - compatibility with `packer fix`. +- `min_packer_version` (optional) is a string that has a minimum Packer version + that is required to parse the template. This can be used to ensure that proper + versions of Packer are used with the template. A max version can't be + specified because Packer retains backwards compatibility with `packer fix`. -* `post-processors` (optional) is an array of one or more objects that defines the - various post-processing steps to take with the built images. If not specified, - then no post-processing will be done. For more - information on what post-processors do and how they're defined, read the - sub-section on [configuring post-processors in templates](/docs/templates/post-processors.html). +- `post-processors` (optional) is an array of one or more objects that defines + the various post-processing steps to take with the built images. If not + specified, then no post-processing will be done. For more information on what + post-processors do and how they're defined, read the sub-section on + [configuring post-processors in + templates](/docs/templates/post-processors.html). -* `provisioners` (optional) is an array of one or more objects that defines - the provisioners that will be used to install and configure software for - the machines created by each of the builders. If it is not specified, - then no provisioners will be run. For more - information on how to define and configure a provisioner, read the - sub-section on [configuring provisioners in templates](/docs/templates/provisioners.html). +- `provisioners` (optional) is an array of one or more objects that defines the + provisioners that will be used to install and configure software for the + machines created by each of the builders. If it is not specified, then no + provisioners will be run. For more information on how to define and configure + a provisioner, read the sub-section on [configuring provisioners in + templates](/docs/templates/provisioners.html). -* `variables` (optional) is an array of one or more key/value strings that defines - user variables contained in the template. - If it is not specified, then no variables are defined. - For more information on how to define and use user variables, read the - sub-section on [user variables in templates](/docs/templates/user-variables.html). +- `variables` (optional) is an array of one or more key/value strings that + defines user variables contained in the template. If it is not specified, then + no variables are defined. For more information on how to define and use user + variables, read the sub-section on [user variables in + templates](/docs/templates/user-variables.html). ## Comments JSON doesn't support comments and Packer reports unknown keys as validation -errors. If you'd like to comment your template, you can prefix a _root level_ +errors. If you'd like to comment your template, you can prefix a *root level* key with an underscore. Example: -```javascript +``` {.javascript} { "_comment": "This is a comment", "builders": [{}] } ``` -**Important:** Only _root level_ keys can be underscore prefixed. Keys within +**Important:** Only *root level* keys can be underscore prefixed. Keys within builders, provisioners, etc. will still result in validation errors. ## Example Template -Below is an example of a basic template that is nearly fully functional. It is just -missing valid AWS access keys. Otherwise, it would work properly with +Below is an example of a basic template that is nearly fully functional. It is +just missing valid AWS access keys. Otherwise, it would work properly with `packer build`. -```javascript +``` {.javascript} { "builders": [ { diff --git a/website/source/docs/templates/post-processors.html.markdown b/website/source/docs/templates/post-processors.html.markdown index 7a7ba4664..2c71e6664 100644 --- a/website/source/docs/templates/post-processors.html.markdown +++ b/website/source/docs/templates/post-processors.html.markdown @@ -1,27 +1,30 @@ --- -layout: "docs" -page_title: "Templates: Post-Processors" -description: |- - The post-processor section within a template configures any post-processing that will be done to images built by the builders. Examples of post-processing would be compressing files, uploading artifacts, etc. ---- +description: | + The post-processor section within a template configures any post-processing that + will be done to images built by the builders. Examples of post-processing would + be compressing files, uploading artifacts, etc. +layout: docs +page_title: 'Templates: Post-Processors' +... # Templates: Post-Processors -The post-processor section within a template configures any post-processing -that will be done to images built by the builders. Examples of post-processing -would be compressing files, uploading artifacts, etc. +The post-processor section within a template configures any post-processing that +will be done to images built by the builders. Examples of post-processing would +be compressing files, uploading artifacts, etc. -Post-processors are _optional_. If no post-processors are defined within a template, -then no post-processing will be done to the image. The resulting artifact of -a build is just the image outputted by the builder. +Post-processors are *optional*. If no post-processors are defined within a +template, then no post-processing will be done to the image. The resulting +artifact of a build is just the image outputted by the builder. This documentation page will cover how to configure a post-processor in a template. The specific configuration options available for each post-processor, -however, must be referenced from the documentation for that specific post-processor. +however, must be referenced from the documentation for that specific +post-processor. Within a template, a section of post-processor definitions looks like this: -```javascript +``` {.javascript} { "post-processors": [ // ... one or more post-processor definitions here @@ -38,29 +41,29 @@ apply to, if you wish. ## Post-Processor Definition -Within the `post-processors` array in a template, there are three ways to -define a post-processor. There are _simple_ definitions, _detailed_ definitions, -and _sequence_ definitions. Don't worry, they're all very easy to understand, -and the "simple" and "detailed" definitions are simply shortcuts for the -"sequence" definition. +Within the `post-processors` array in a template, there are three ways to define +a post-processor. There are *simple* definitions, *detailed* definitions, and +*sequence* definitions. Don't worry, they're all very easy to understand, and +the "simple" and "detailed" definitions are simply shortcuts for the "sequence" +definition. A **simple definition** is just a string; the name of the post-processor. An -example is shown below. Simple definitions are used when no additional configuration -is needed for the post-processor. +example is shown below. Simple definitions are used when no additional +configuration is needed for the post-processor. -```javascript +``` {.javascript} { "post-processors": ["compress"] } ``` -A **detailed definition** is a JSON object. It is very similar to a builder -or provisioner definition. It contains a `type` field to denote the type of -the post-processor, but may also contain additional configuration for the -post-processor. A detailed definition is used when additional configuration -is needed beyond simply the type for the post-processor. An example is shown below. +A **detailed definition** is a JSON object. It is very similar to a builder or +provisioner definition. It contains a `type` field to denote the type of the +post-processor, but may also contain additional configuration for the +post-processor. A detailed definition is used when additional configuration is +needed beyond simply the type for the post-processor. An example is shown below. -```javascript +``` {.javascript} { "post-processors": [ { @@ -72,14 +75,14 @@ is needed beyond simply the type for the post-processor. An example is shown bel ``` A **sequence definition** is a JSON array comprised of other **simple** or -**detailed** definitions. The post-processors defined in the array are run -in order, with the artifact of each feeding into the next, and any intermediary +**detailed** definitions. The post-processors defined in the array are run in +order, with the artifact of each feeding into the next, and any intermediary artifacts being discarded. A sequence definition may not contain another sequence definition. Sequence definitions are used to chain together multiple post-processors. An example is shown below, where the artifact of a build is compressed then uploaded, but the compressed result is not kept. -```javascript +``` {.javascript} { "post-processors": [ [ @@ -90,21 +93,21 @@ compressed then uploaded, but the compressed result is not kept. } ``` -As you may be able to imagine, the **simple** and **detailed** definitions -are simply shortcuts for a **sequence** definition of only one element. +As you may be able to imagine, the **simple** and **detailed** definitions are +simply shortcuts for a **sequence** definition of only one element. ## Input Artifacts -When using post-processors, the input artifact (coming from a builder or -another post-processor) is discarded by default after the post-processor runs. -This is because generally, you don't want the intermediary artifacts on the -way to the final artifact created. +When using post-processors, the input artifact (coming from a builder or another +post-processor) is discarded by default after the post-processor runs. This is +because generally, you don't want the intermediary artifacts on the way to the +final artifact created. -In some cases, however, you may want to keep the intermediary artifacts. -You can tell Packer to keep these artifacts by setting the -`keep_input_artifact` configuration to `true`. An example is shown below: +In some cases, however, you may want to keep the intermediary artifacts. You can +tell Packer to keep these artifacts by setting the `keep_input_artifact` +configuration to `true`. An example is shown below: -```javascript +``` {.javascript} { "post-processors": [ { @@ -115,39 +118,37 @@ You can tell Packer to keep these artifacts by setting the } ``` -This setting will only keep the input artifact to _that specific_ -post-processor. If you're specifying a sequence of post-processors, then -all intermediaries are discarded by default except for the input artifacts -to post-processors that explicitly state to keep the input artifact. +This setting will only keep the input artifact to *that specific* +post-processor. If you're specifying a sequence of post-processors, then all +intermediaries are discarded by default except for the input artifacts to +post-processors that explicitly state to keep the input artifact. --> **Note:** The intuitive reader may be wondering what happens -if multiple post-processors are specified (not in a sequence). Does Packer require the -configuration to keep the input artifact on all the post-processors? -The answer is no, of course not. Packer is smart enough to figure out -that at least one post-processor requested that the input be kept, so it will keep -it around. +-> **Note:** The intuitive reader may be wondering what happens if multiple +post-processors are specified (not in a sequence). Does Packer require the +configuration to keep the input artifact on all the post-processors? The answer +is no, of course not. Packer is smart enough to figure out that at least one +post-processor requested that the input be kept, so it will keep it around. ## Run on Specific Builds -You can use the `only` or `except` configurations to run a post-processor -only with specific builds. These two configurations do what you expect: -`only` will only run the post-processor on the specified builds and -`except` will run the post-processor on anything other than the specified -builds. +You can use the `only` or `except` configurations to run a post-processor only +with specific builds. These two configurations do what you expect: `only` will +only run the post-processor on the specified builds and `except` will run the +post-processor on anything other than the specified builds. -An example of `only` being used is shown below, but the usage of `except` -is effectively the same. `only` and `except` can only be specified on "detailed" -configurations. If you have a sequence of post-processors to run, `only` -and `except` will only affect that single post-processor in the sequence. +An example of `only` being used is shown below, but the usage of `except` is +effectively the same. `only` and `except` can only be specified on "detailed" +configurations. If you have a sequence of post-processors to run, `only` and +`except` will only affect that single post-processor in the sequence. -```javascript +``` {.javascript} { "type": "vagrant", "only": ["virtualbox-iso"] } ``` -The values within `only` or `except` are _build names_, not builder -types. If you recall, build names by default are just their builder type, -but if you specify a custom `name` parameter, then you should use that -as the value instead of the type. +The values within `only` or `except` are *build names*, not builder types. If +you recall, build names by default are just their builder type, but if you +specify a custom `name` parameter, then you should use that as the value instead +of the type. diff --git a/website/source/docs/templates/provisioners.html.markdown b/website/source/docs/templates/provisioners.html.markdown index 00578bb86..9f4acc9cb 100644 --- a/website/source/docs/templates/provisioners.html.markdown +++ b/website/source/docs/templates/provisioners.html.markdown @@ -1,9 +1,11 @@ --- -layout: "docs" -page_title: "Templates: Provisioners" -description: |- - Within the template, the provisioners section contains an array of all the provisioners that Packer should use to install and configure software within running machines prior to turning them into machine images. ---- +description: | + Within the template, the provisioners section contains an array of all the + provisioners that Packer should use to install and configure software within + running machines prior to turning them into machine images. +layout: docs +page_title: 'Templates: Provisioners' +... # Templates: Provisioners @@ -11,19 +13,18 @@ Within the template, the provisioners section contains an array of all the provisioners that Packer should use to install and configure software within running machines prior to turning them into machine images. -Provisioners are _optional_. If no provisioners are defined within a template, -then no software other than the defaults will be installed within the -resulting machine images. This is not typical, however, since much of the -value of Packer is to produce multiple identical images -of pre-configured software. +Provisioners are *optional*. If no provisioners are defined within a template, +then no software other than the defaults will be installed within the resulting +machine images. This is not typical, however, since much of the value of Packer +is to produce multiple identical images of pre-configured software. This documentation page will cover how to configure a provisioner in a template. -The specific configuration options available for each provisioner, however, -must be referenced from the documentation for that specific provisioner. +The specific configuration options available for each provisioner, however, must +be referenced from the documentation for that specific provisioner. Within a template, a section of provisioner definitions looks like this: -```javascript +``` {.javascript} { "provisioners": [ // ... one or more provisioner definitions here @@ -31,25 +32,24 @@ Within a template, a section of provisioner definitions looks like this: } ``` -For each of the definitions, Packer will run the provisioner for each -of the configured builds. The provisioners will be run in the order -they are defined within the template. +For each of the definitions, Packer will run the provisioner for each of the +configured builds. The provisioners will be run in the order they are defined +within the template. ## Provisioner Definition -A provisioner definition is a JSON object that must contain at least -the `type` key. This key specifies the name of the provisioner to use. -Additional keys within the object are used to configure the provisioner, -with the exception of a handful of special keys, covered later. +A provisioner definition is a JSON object that must contain at least the `type` +key. This key specifies the name of the provisioner to use. Additional keys +within the object are used to configure the provisioner, with the exception of a +handful of special keys, covered later. -As an example, the "shell" provisioner requires a key such as `script` -which specifies a path to a shell script to execute within the machines -being created. +As an example, the "shell" provisioner requires a key such as `script` which +specifies a path to a shell script to execute within the machines being created. An example provisioner definition is shown below, configuring the shell provisioner to run a local script within the machines: -```javascript +``` {.javascript} { "type": "shell", "script": "script.sh" @@ -58,16 +58,15 @@ provisioner to run a local script within the machines: ## Run on Specific Builds -You can use the `only` or `except` configurations to run a provisioner -only with specific builds. These two configurations do what you expect: -`only` will only run the provisioner on the specified builds and -`except` will run the provisioner on anything other than the specified -builds. +You can use the `only` or `except` configurations to run a provisioner only with +specific builds. These two configurations do what you expect: `only` will only +run the provisioner on the specified builds and `except` will run the +provisioner on anything other than the specified builds. -An example of `only` being used is shown below, but the usage of `except` -is effectively the same: +An example of `only` being used is shown below, but the usage of `except` is +effectively the same: -```javascript +``` {.javascript} { "type": "shell", "script": "script.sh", @@ -75,21 +74,21 @@ is effectively the same: } ``` -The values within `only` or `except` are _build names_, not builder -types. If you recall, build names by default are just their builder type, -but if you specify a custom `name` parameter, then you should use that -as the value instead of the type. +The values within `only` or `except` are *build names*, not builder types. If +you recall, build names by default are just their builder type, but if you +specify a custom `name` parameter, then you should use that as the value instead +of the type. ## Build-Specific Overrides -While the goal of Packer is to produce identical machine images, it -sometimes requires periods of time where the machines are different before -they eventually converge to be identical. In these cases, different configurations -for provisioners may be necessary depending on the build. This can be done -using build-specific overrides. +While the goal of Packer is to produce identical machine images, it sometimes +requires periods of time where the machines are different before they eventually +converge to be identical. In these cases, different configurations for +provisioners may be necessary depending on the build. This can be done using +build-specific overrides. -An example of where this might be necessary is when building both an EC2 AMI -and a VMware machine. The source EC2 AMI may setup a user with administrative +An example of where this might be necessary is when building both an EC2 AMI and +a VMware machine. The source EC2 AMI may setup a user with administrative privileges by default, whereas the VMware machine doesn't have these privileges. In this case, the shell script may need to be executed differently. Of course, the goal is that hopefully the shell script converges these two images to be @@ -97,7 +96,7 @@ identical. However, they may initially need to be run differently. This example is shown below: -```javascript +``` {.javascript} { "type": "shell", "script": "script.sh", @@ -111,24 +110,23 @@ This example is shown below: ``` As you can see, the `override` key is used. The value of this key is another -JSON object where the key is the name of a [builder definition](/docs/templates/builders.html). -The value of this is in turn another JSON object. This JSON object simply -contains the provisioner configuration as normal. This configuration is merged -into the default provisioner configuration. +JSON object where the key is the name of a [builder +definition](/docs/templates/builders.html). The value of this is in turn another +JSON object. This JSON object simply contains the provisioner configuration as +normal. This configuration is merged into the default provisioner configuration. ## Pausing Before Running -With certain provisioners it is sometimes desirable to pause for some period -of time before running it. Specifically, in cases where a provisioner reboots -the machine, you may want to wait for some period of time before starting -the next provisioner. +With certain provisioners it is sometimes desirable to pause for some period of +time before running it. Specifically, in cases where a provisioner reboots the +machine, you may want to wait for some period of time before starting the next +provisioner. Every provisioner definition in a Packer template can take a special -configuration `pause_before` that is the amount of time to pause before -running that provisioner. By default, there is no pause. An example -is shown below: +configuration `pause_before` that is the amount of time to pause before running +that provisioner. By default, there is no pause. An example is shown below: -```javascript +``` {.javascript} { "type": "shell", "script": "script.sh", @@ -136,5 +134,5 @@ is shown below: } ``` -For the above provisioner, Packer will wait 10 seconds before uploading -and executing the shell script. +For the above provisioner, Packer will wait 10 seconds before uploading and +executing the shell script. diff --git a/website/source/docs/templates/push.html.markdown b/website/source/docs/templates/push.html.markdown index 568b45ec1..3ca2c2de2 100644 --- a/website/source/docs/templates/push.html.markdown +++ b/website/source/docs/templates/push.html.markdown @@ -1,19 +1,19 @@ --- -layout: "docs" -page_title: "Templates: Push" -description: |- - Within the template, the push section configures how a template can be - pushed to a remote build service. ---- +description: | + Within the template, the push section configures how a template can be pushed to + a remote build service. +layout: docs +page_title: 'Templates: Push' +... # Templates: Push Within the template, the push section configures how a template can be [pushed](/docs/command-line/push.html) to a remote build service. -Push configuration is responsible for defining what files are required -to build this template, what the name of build configuration is in the -build service, etc. +Push configuration is responsible for defining what files are required to build +this template, what the name of build configuration is in the build service, +etc. The only build service that Packer can currently push to is [Atlas](https://atlas.hashicorp.com) by HashiCorp. Support for other build @@ -21,7 +21,7 @@ services will come in the form of plugins in the future. Within a template, a push configuration section looks like this: -```javascript +``` {.javascript} { "push": { // ... push configuration here @@ -37,37 +37,37 @@ each category, the available configuration keys are alphabetized. ### Required -* `name` (string) - Name of the build configuration in the build service. - If this doesn't exist, it will be created (by default). +- `name` (string) - Name of the build configuration in the build service. If + this doesn't exist, it will be created (by default). ### Optional -* `address` (string) - The address of the build service to use. By default - this is `https://atlas.hashicorp.com`. +- `address` (string) - The address of the build service to use. By default this + is `https://atlas.hashicorp.com`. -* `base_dir` (string) - The base directory of the files to upload. This - will be the current working directory when the build service executes your - template. This path is relative to the template. +- `base_dir` (string) - The base directory of the files to upload. This will be + the current working directory when the build service executes your template. + This path is relative to the template. -* `include` (array of strings) - Glob patterns to include relative to - the `base_dir`. If this is specified, only files that match the include - pattern are included. +- `include` (array of strings) - Glob patterns to include relative to the + `base_dir`. If this is specified, only files that match the include pattern + are included. -* `exclude` (array of strings) - Glob patterns to exclude relative to - the `base_dir`. +- `exclude` (array of strings) - Glob patterns to exclude relative to the + `base_dir`. -* `token` (string) - An access token to use to authenticate to the build - service. +- `token` (string) - An access token to use to authenticate to the + build service. -* `vcs` (boolean) - If true, Packer will detect your VCS (if there is one) - and only upload the files that are tracked by the VCS. This is useful - for automatically excluding ignored files. This defaults to false. +- `vcs` (boolean) - If true, Packer will detect your VCS (if there is one) and + only upload the files that are tracked by the VCS. This is useful for + automatically excluding ignored files. This defaults to false. ## Examples A push configuration section with minimal options: -```javascript +``` {.javascript} { "push": { "name": "hashicorp/precise64" @@ -78,7 +78,7 @@ A push configuration section with minimal options: A push configuration specifying Packer to inspect the VCS and list individual files to include: -```javascript +``` {.javascript} { "push": { "name": "hashicorp/precise64", diff --git a/website/source/docs/templates/user-variables.html.markdown b/website/source/docs/templates/user-variables.html.markdown index d80662dea..30c9555bf 100644 --- a/website/source/docs/templates/user-variables.html.markdown +++ b/website/source/docs/templates/user-variables.html.markdown @@ -1,35 +1,38 @@ --- -layout: "docs" -page_title: "User Variables in Templates" -description: |- - User variables allow your templates to be further configured with variables from the command-line, environmental variables, or files. This lets you parameterize your templates so that you can keep secret tokens, environment-specific data, and other types of information out of your templates. This maximizes the portability and shareability of the template. ---- +description: | + User variables allow your templates to be further configured with variables from + the command-line, environmental variables, or files. This lets you parameterize + your templates so that you can keep secret tokens, environment-specific data, + and other types of information out of your templates. This maximizes the + portability and shareability of the template. +layout: docs +page_title: User Variables in Templates +... # User Variables -User variables allow your templates to be further configured with variables -from the command-line, environmental variables, or files. This lets you -parameterize your templates so that you can keep secret tokens, -environment-specific data, and other types of information out of your -templates. This maximizes the portability and shareability of the template. +User variables allow your templates to be further configured with variables from +the command-line, environmental variables, or files. This lets you parameterize +your templates so that you can keep secret tokens, environment-specific data, +and other types of information out of your templates. This maximizes the +portability and shareability of the template. -Using user variables expects you know how -[configuration templates](/docs/templates/configuration-templates.html) work. -If you don't know how configuration templates work yet, please read that -page first. +Using user variables expects you know how [configuration +templates](/docs/templates/configuration-templates.html) work. If you don't know +how configuration templates work yet, please read that page first. ## Usage User variables must first be defined in a `variables` section within your -template. Even if you want a variable to default to an empty string, it -must be defined. This explicitness makes it easy for newcomers to your -template to understand what can be modified using variables in your template. +template. Even if you want a variable to default to an empty string, it must be +defined. This explicitness makes it easy for newcomers to your template to +understand what can be modified using variables in your template. -The `variables` section is a simple key/value mapping of the variable -name to a default value. A default value can be the empty string. An -example is shown below: +The `variables` section is a simple key/value mapping of the variable name to a +default value. A default value can be the empty string. An example is shown +below: -```javascript +``` {.javascript} { "variables": { "aws_access_key": "", @@ -46,28 +49,27 @@ example is shown below: ``` In the above example, the template defines two variables: `aws_access_key` and -`aws_secret_key`. They default to empty values. -Later, the variables are used within the builder we defined in order to -configure the actual keys for the Amazon builder. +`aws_secret_key`. They default to empty values. Later, the variables are used +within the builder we defined in order to configure the actual keys for the +Amazon builder. -If the default value is `null`, then the user variable will be _required_. -This means that the user must specify a value for this variable or template +If the default value is `null`, then the user variable will be *required*. This +means that the user must specify a value for this variable or template validation will fail. -Using the variables is extremely easy. Variables are used by calling -the user function in the form of {{user `variable`}}. -This function can be used in _any value_ within the template, in -builders, provisioners, _anything_. The user variable is available globally -within the template. +Using the variables is extremely easy. Variables are used by calling the user +function in the form of {{user \`variable\`}}. This function can be +used in *any value* within the template, in builders, provisioners, *anything*. +The user variable is available globally within the template. ## Environmental Variables -Environmental variables can be used within your template using user -variables. The `env` function is available _only_ within the default value -of a user variable, allowing you to default a user variable to an -environmental variable. An example is shown below: +Environmental variables can be used within your template using user variables. +The `env` function is available *only* within the default value of a user +variable, allowing you to default a user variable to an environmental variable. +An example is shown below: -```javascript +``` {.javascript} { "variables": { "my_secret": "{{env `MY_SECRET`}}", @@ -77,73 +79,69 @@ environmental variable. An example is shown below: } ``` -This will default "my\_secret" to be the value of the "MY\_SECRET" -environmental variable (or the empty string if it does not exist). +This will default "my\_secret" to be the value of the "MY\_SECRET" environmental +variable (or the empty string if it does not exist). --> **Why can't I use environmental variables elsewhere?** -User variables are the single source of configurable input to a template. -We felt that having environmental variables used _anywhere_ in a -template would confuse the user about the possible inputs to a template. -By allowing environmental variables only within default values for user -variables, user variables remain as the single source of input to a template -that a user can easily discover using `packer inspect`. +-> **Why can't I use environmental variables elsewhere?** User variables are +the single source of configurable input to a template. We felt that having +environmental variables used *anywhere* in a template would confuse the user +about the possible inputs to a template. By allowing environmental variables +only within default values for user variables, user variables remain as the +single source of input to a template that a user can easily discover using +`packer inspect`. ## Setting Variables -Now that we covered how to define and use variables within a template, -the next important point is how to actually set these variables. Packer -exposes two methods for setting variables: from the command line or -from a file. +Now that we covered how to define and use variables within a template, the next +important point is how to actually set these variables. Packer exposes two +methods for setting variables: from the command line or from a file. ### From the Command Line -To set variables from the command line, the `-var` flag is used as -a parameter to `packer build` (and some other commands). Continuing our example -above, we could build our template using the command below. The command -is split across multiple lines for readability, but can of course be a single -line. +To set variables from the command line, the `-var` flag is used as a parameter +to `packer build` (and some other commands). Continuing our example above, we +could build our template using the command below. The command is split across +multiple lines for readability, but can of course be a single line. -```text +``` {.text} $ packer build \ -var 'aws_access_key=foo' \ -var 'aws_secret_key=bar' \ template.json ``` -As you can see, the `-var` flag can be specified multiple times in order -to set multiple variables. Also, variables set later on the command-line -override earlier set variables if it has already been set. +As you can see, the `-var` flag can be specified multiple times in order to set +multiple variables. Also, variables set later on the command-line override +earlier set variables if it has already been set. -Finally, variables set from the command-line override all other methods -of setting variables. So if you specify a variable in a file (the next -method shown), you can override it using the command-line. +Finally, variables set from the command-line override all other methods of +setting variables. So if you specify a variable in a file (the next method +shown), you can override it using the command-line. ### From a File -Variables can also be set from an external JSON file. The `-var-file` -flag reads a file containing a basic key/value mapping of variables to -values and sets those variables. The JSON file is simple: +Variables can also be set from an external JSON file. The `-var-file` flag reads +a file containing a basic key/value mapping of variables to values and sets +those variables. The JSON file is simple: -```javascript +``` {.javascript} { "aws_access_key": "foo", "aws_secret_key": "bar" } ``` -It is a single JSON object where the keys are variables and the values are -the variable values. Assuming this file is in `variables.json`, we can -build our template using the following command: +It is a single JSON object where the keys are variables and the values are the +variable values. Assuming this file is in `variables.json`, we can build our +template using the following command: -```text +``` {.text} $ packer build -var-file=variables.json template.json ``` -The `-var-file` flag can be specified multiple times and variables from -multiple files will be read and applied. As you'd expect, variables read -from files specified later override a variable set earlier if it has -already been set. +The `-var-file` flag can be specified multiple times and variables from multiple +files will be read and applied. As you'd expect, variables read from files +specified later override a variable set earlier if it has already been set. -And as mentioned above, no matter where a `-var-file` is specified, a -`-var` flag on the command line will always override any variables from -a file. +And as mentioned above, no matter where a `-var-file` is specified, a `-var` +flag on the command line will always override any variables from a file. diff --git a/website/source/docs/templates/veewee-to-packer.html.markdown b/website/source/docs/templates/veewee-to-packer.html.markdown index 81a06de71..ecc257f14 100644 --- a/website/source/docs/templates/veewee-to-packer.html.markdown +++ b/website/source/docs/templates/veewee-to-packer.html.markdown @@ -1,35 +1,39 @@ --- -layout: "docs" -page_title: "Convert Veewee Definitions to Packer Templates" -description: |- - If you are or were a user of Veewee, then there is an official tool called veewee-to-packer that will convert your Veewee definition into an equivalent Packer template. Even if you're not a Veewee user, Veewee has a large library of templates that can be readily used with Packer by simply converting them. ---- +description: | + If you are or were a user of Veewee, then there is an official tool called + veewee-to-packer that will convert your Veewee definition into an equivalent + Packer template. Even if you're not a Veewee user, Veewee has a large library of + templates that can be readily used with Packer by simply converting them. +layout: docs +page_title: Convert Veewee Definitions to Packer Templates +... # Veewee-to-Packer -If you are or were a user of [Veewee](https://github.com/jedi4ever/veewee), -then there is an official tool called [veewee-to-packer](https://github.com/mitchellh/veewee-to-packer) -that will convert your Veewee definition into an equivalent Packer template. -Even if you're not a Veewee user, Veewee has a -[large library](https://github.com/jedi4ever/veewee/tree/master/templates) -of templates that can be readily used with Packer by simply converting them. +If you are or were a user of [Veewee](https://github.com/jedi4ever/veewee), then +there is an official tool called +[veewee-to-packer](https://github.com/mitchellh/veewee-to-packer) that will +convert your Veewee definition into an equivalent Packer template. Even if +you're not a Veewee user, Veewee has a [large +library](https://github.com/jedi4ever/veewee/tree/master/templates) of templates +that can be readily used with Packer by simply converting them. ## Installation and Usage Since Veewee itself is a Ruby project, so too is the veewee-to-packer -application so that it can read the Veewee configurations. Install it using RubyGems: +application so that it can read the Veewee configurations. Install it using +RubyGems: -```text +``` {.text} $ gem install veewee-to-packer ... ``` -Once installed, usage is easy! Just point `veewee-to-packer` -at the `definition.rb` file of any template. The converter will output -any warnings or messages about the conversion. The example below converts -a CentOS template: +Once installed, usage is easy! Just point `veewee-to-packer` at the +`definition.rb` file of any template. The converter will output any warnings or +messages about the conversion. The example below converts a CentOS template: -```text +``` {.text} $ veewee-to-packer templates/CentOS-6.4/definition.rb Success! Your Veewee definition was converted to a Packer template! The template can be found in the `template.json` file @@ -41,22 +45,21 @@ first, since the template has relative paths that expect you to use it from the same working directory. ``` -***Voila!*** By default, `veewee-to-packer` will output a template that -contains a builder for both VirtualBox and VMware. You can use the -`-only` flag on `packer build` to only build one of them. Otherwise -you can use the `--builder` flag on `veewee-to-packer` to only output -specific builder configurations. +***Voila!*** By default, `veewee-to-packer` will output a template that contains +a builder for both VirtualBox and VMware. You can use the `-only` flag on +`packer build` to only build one of them. Otherwise you can use the `--builder` +flag on `veewee-to-packer` to only output specific builder configurations. ## Limitations -None, really. The tool will tell you if it can't convert a part of a -template, and whether that is a critical error or just a warning. -Most of Veewee's functions translate perfectly over to Packer. There are -still a couple missing features in Packer, but they're minimal. +None, really. The tool will tell you if it can't convert a part of a template, +and whether that is a critical error or just a warning. Most of Veewee's +functions translate perfectly over to Packer. There are still a couple missing +features in Packer, but they're minimal. ## Bugs -If you find any bugs, please report them to the -[veewee-to-packer issue tracker](https://github.com/mitchellh/veewee-to-packer). -I haven't been able to exhaustively test every Veewee template, so there -are certainly some edge cases out there. +If you find any bugs, please report them to the [veewee-to-packer issue +tracker](https://github.com/mitchellh/veewee-to-packer). I haven't been able to +exhaustively test every Veewee template, so there are certainly some edge cases +out there. diff --git a/website/source/intro/getting-started/build-image.html.markdown b/website/source/intro/getting-started/build-image.html.markdown index 4bf8eda57..ec1d851a9 100644 --- a/website/source/intro/getting-started/build-image.html.markdown +++ b/website/source/intro/getting-started/build-image.html.markdown @@ -1,29 +1,32 @@ --- -layout: "intro" -page_title: "Build an Image" -prev_url: "/intro/getting-started/setup.html" -next_url: "/intro/getting-started/provision.html" -next_title: "Provision" -description: |- - With Packer installed, let's just dive right into it and build our first image. Our first image will be an Amazon EC2 AMI with Redis pre-installed. This is just an example. Packer can create images for many platforms with anything pre-installed. ---- +description: | + With Packer installed, let's just dive right into it and build our first image. + Our first image will be an Amazon EC2 AMI with Redis pre-installed. This is just + an example. Packer can create images for many platforms with anything + pre-installed. +layout: intro +next_title: Provision +next_url: '/intro/getting-started/provision.html' +page_title: Build an Image +prev_url: '/intro/getting-started/setup.html' +... # Build an Image -With Packer installed, let's just dive right into it and build our first -image. Our first image will be an [Amazon EC2 AMI](http://aws.amazon.com/ec2/) -with Redis pre-installed. This is just an example. Packer can create images -for [many platforms](/intro/platforms.html) with anything pre-installed. +With Packer installed, let's just dive right into it and build our first image. +Our first image will be an [Amazon EC2 AMI](http://aws.amazon.com/ec2/) with +Redis pre-installed. This is just an example. Packer can create images for [many +platforms](/intro/platforms.html) with anything pre-installed. If you don't have an AWS account, [create one now](http://aws.amazon.com/free/). For the example, we'll use a "t2.micro" instance to build our image, which -qualifies under the AWS [free-tier](http://aws.amazon.com/free/), meaning -it will be free. If you already have an AWS account, you may be charged some -amount of money, but it shouldn't be more than a few cents. +qualifies under the AWS [free-tier](http://aws.amazon.com/free/), meaning it +will be free. If you already have an AWS account, you may be charged some amount +of money, but it shouldn't be more than a few cents. --> **Note:** If you're not using an account that qualifies under the AWS -free-tier, you may be charged to run these examples. The charge should only be -a few cents, but we're not responsible if it ends up being more. +-> **Note:** If you're not using an account that qualifies under the AWS +free-tier, you may be charged to run these examples. The charge should only be a +few cents, but we're not responsible if it ends up being more. Packer can build images for [many platforms](/intro/platforms.html) other than AWS, but AWS requires no additional software installed on your computer and @@ -34,16 +37,16 @@ apply to the other platforms as well. ## The Template -The configuration file used to define what image we want built and how -is called a _template_ in Packer terminology. The format of a template -is simple [JSON](http://www.json.org/). JSON struck the best balance between +The configuration file used to define what image we want built and how is called +a *template* in Packer terminology. The format of a template is simple +[JSON](http://www.json.org/). JSON struck the best balance between human-editable and machine-editable, allowing both hand-made templates as well as machine generated templates to easily be made. We'll start by creating the entire template, then we'll go over each section briefly. Create a file `example.json` and fill it with the following contents: -```javascript +``` {.javascript} { "variables": { "aws_access_key": "", @@ -62,55 +65,55 @@ briefly. Create a file `example.json` and fill it with the following contents: } ``` -When building, you'll pass in the `aws_access_key` and `aws_secret_key` as -a [user variable](/docs/templates/user-variables.html), keeping your secret -keys out of the template. You can create security credentials -on [this page](https://console.aws.amazon.com/iam/home?#security_credential). -An example IAM policy document can be found in the [Amazon EC2 builder docs](/docs/builders/amazon.html). +When building, you'll pass in the `aws_access_key` and `aws_secret_key` as a +[user variable](/docs/templates/user-variables.html), keeping your secret keys +out of the template. You can create security credentials on [this +page](https://console.aws.amazon.com/iam/home?#security_credential). An example +IAM policy document can be found in the [Amazon EC2 builder +docs](/docs/builders/amazon.html). -This is a basic template that is ready-to-go. It should be immediately recognizable -as a normal, basic JSON object. Within the object, the `builders` section -contains an array of JSON objects configuring a specific _builder_. A -builder is a component of Packer that is responsible for creating a machine -and turning that machine into an image. +This is a basic template that is ready-to-go. It should be immediately +recognizable as a normal, basic JSON object. Within the object, the `builders` +section contains an array of JSON objects configuring a specific *builder*. A +builder is a component of Packer that is responsible for creating a machine and +turning that machine into an image. -In this case, we're only configuring a single builder of type `amazon-ebs`. -This is the Amazon EC2 AMI builder that ships with Packer. This builder -builds an EBS-backed AMI by launching a source AMI, provisioning on top of -that, and re-packaging it into a new AMI. +In this case, we're only configuring a single builder of type `amazon-ebs`. This +is the Amazon EC2 AMI builder that ships with Packer. This builder builds an +EBS-backed AMI by launching a source AMI, provisioning on top of that, and +re-packaging it into a new AMI. -The additional keys within the object are configuration for this builder, specifying things -such as access keys, the source AMI to build from, and more. -The exact set of configuration variables available for a builder are -specific to each builder and can be found within the [documentation](/docs). +The additional keys within the object are configuration for this builder, +specifying things such as access keys, the source AMI to build from, and more. +The exact set of configuration variables available for a builder are specific to +each builder and can be found within the [documentation](/docs). -Before we take this template and build an image from it, let's validate the template -by running `packer validate example.json`. This command checks the syntax -as well as the configuration values to verify they look valid. The output should -look similar to below, because the template should be valid. If there are +Before we take this template and build an image from it, let's validate the +template by running `packer validate example.json`. This command checks the +syntax as well as the configuration values to verify they look valid. The output +should look similar to below, because the template should be valid. If there are any errors, this command will tell you. -```text +``` {.text} $ packer validate example.json Template validated successfully. ``` Next, let's build the image from this template. -An astute reader may notice that we said earlier we'd be building an -image with Redis pre-installed, and yet the template we made doesn't reference -Redis anywhere. In fact, this part of the documentation will only cover making -a first basic, non-provisioned image. The next section on provisioning will -cover installing Redis. +An astute reader may notice that we said earlier we'd be building an image with +Redis pre-installed, and yet the template we made doesn't reference Redis +anywhere. In fact, this part of the documentation will only cover making a first +basic, non-provisioned image. The next section on provisioning will cover +installing Redis. ## Your First Image -With a properly validated template. It is time to build your first image. -This is done by calling `packer build` with the template file. The output -should look similar to below. Note that this process typically takes a -few minutes. +With a properly validated template. It is time to build your first image. This +is done by calling `packer build` with the template file. The output should look +similar to below. Note that this process typically takes a few minutes. -```text +``` {.text} $ packer build \ -var 'aws_access_key=YOUR ACCESS KEY' \ -var 'aws_secret_key=YOUR SECRET KEY' \ @@ -139,38 +142,36 @@ $ packer build \ us-east-1: ami-19601070 ``` -At the end of running `packer build`, Packer outputs the _artifacts_ -that were created as part of the build. Artifacts are the results of a -build, and typically represent an ID (such as in the case of an AMI) or -a set of files (such as for a VMware virtual machine). In this example, -we only have a single artifact: the AMI in us-east-1 that was created. +At the end of running `packer build`, Packer outputs the *artifacts* that were +created as part of the build. Artifacts are the results of a build, and +typically represent an ID (such as in the case of an AMI) or a set of files +(such as for a VMware virtual machine). In this example, we only have a single +artifact: the AMI in us-east-1 that was created. -This AMI is ready to use. If you wanted you can go and launch this AMI -right now and it would work great. +This AMI is ready to use. If you wanted you can go and launch this AMI right now +and it would work great. --> **Note:** Your AMI ID will surely be different than the -one above. If you try to launch the one in the example output above, you -will get an error. If you want to try to launch your AMI, get the ID from -the Packer output. +-> **Note:** Your AMI ID will surely be different than the one above. If you +try to launch the one in the example output above, you will get an error. If you +want to try to launch your AMI, get the ID from the Packer output. ## Managing the Image -Packer only builds images. It does not attempt to manage them in any way. -After they're built, it is up to you to launch or destroy them as you see -fit. If you want to store and namespace images for easy reference, you -can use [Atlas by HashiCorp](https://atlas.hashicorp.com). We'll cover -remotely building and storing images at the end of this getting started guide. +Packer only builds images. It does not attempt to manage them in any way. After +they're built, it is up to you to launch or destroy them as you see fit. If you +want to store and namespace images for easy reference, you can use [Atlas by +HashiCorp](https://atlas.hashicorp.com). We'll cover remotely building and +storing images at the end of this getting started guide. -After running the above example, your AWS account -now has an AMI associated with it. AMIs are stored in S3 by Amazon, -so unless you want to be charged about $0.01 -per month, you'll probably want to remove it. Remove the AMI by -first deregistering it on the [AWS AMI management page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Images). -Next, delete the associated snapshot on the -[AWS snapshot management page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Snapshots). +After running the above example, your AWS account now has an AMI associated with +it. AMIs are stored in S3 by Amazon, so unless you want to be charged about +\$0.01 per month, you'll probably want to remove it. Remove the AMI by first +deregistering it on the [AWS AMI management +page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Images). Next, +delete the associated snapshot on the [AWS snapshot management +page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Snapshots). -Congratulations! You've just built your first image with Packer. Although -the image was pretty useless in this case (nothing was changed about it), -this page should've given you a general idea of how Packer works, what -templates are, and how to validate and build templates into machine -images. +Congratulations! You've just built your first image with Packer. Although the +image was pretty useless in this case (nothing was changed about it), this page +should've given you a general idea of how Packer works, what templates are, and +how to validate and build templates into machine images. diff --git a/website/source/intro/getting-started/next.html.markdown b/website/source/intro/getting-started/next.html.markdown index 262b84bb9..e1e7cc2ae 100644 --- a/website/source/intro/getting-started/next.html.markdown +++ b/website/source/intro/getting-started/next.html.markdown @@ -1,25 +1,29 @@ --- -layout: "intro" -page_title: "Next Steps" -description: |- - That concludes the getting started guide for Packer. You should now be comfortable with basic Packer usage, should understand templates, defining builds, provisioners, etc. At this point you're ready to begin playing with and using Packer in real scenarios. ---- +description: | + That concludes the getting started guide for Packer. You should now be + comfortable with basic Packer usage, should understand templates, defining + builds, provisioners, etc. At this point you're ready to begin playing with and + using Packer in real scenarios. +layout: intro +page_title: Next Steps +... # Next Steps -That concludes the getting started guide for Packer. You should now be comfortable -with basic Packer usage, should understand templates, defining builds, provisioners, -etc. At this point you're ready to begin playing with and using Packer -in real scenarios. +That concludes the getting started guide for Packer. You should now be +comfortable with basic Packer usage, should understand templates, defining +builds, provisioners, etc. At this point you're ready to begin playing with and +using Packer in real scenarios. -From this point forward, the most important reference for you will be -the [documentation](/docs). The documentation is less of a guide and -more of a reference of all the overall features and options of Packer. +From this point forward, the most important reference for you will be the +[documentation](/docs). The documentation is less of a guide and more of a +reference of all the overall features and options of Packer. -If you're interested in learning more about how Packer fits into the -HashiCorp ecosystem of tools, read our [Atlas getting started overview](https://atlas.hashicorp.com/help/intro/getting-started). +If you're interested in learning more about how Packer fits into the HashiCorp +ecosystem of tools, read our [Atlas getting started +overview](https://atlas.hashicorp.com/help/intro/getting-started). -As you use Packer more, please voice your comments and concerns on -the [mailing list or IRC](/community). Additionally, Packer is -[open source](https://github.com/mitchellh/packer) so please contribute -if you'd like to. Contributions are very welcome. +As you use Packer more, please voice your comments and concerns on the [mailing +list or IRC](/community). Additionally, Packer is [open +source](https://github.com/mitchellh/packer) so please contribute if you'd like +to. Contributions are very welcome. diff --git a/website/source/intro/getting-started/parallel-builds.html.markdown b/website/source/intro/getting-started/parallel-builds.html.markdown index 90554dacc..626033ef2 100644 --- a/website/source/intro/getting-started/parallel-builds.html.markdown +++ b/website/source/intro/getting-started/parallel-builds.html.markdown @@ -1,57 +1,59 @@ --- -layout: "intro" -page_title: "Parallel Builds" -prev_url: "/intro/getting-started/provision.html" -next_url: "/intro/getting-started/vagrant.html" -next_title: "Vagrant Boxes" -description: |- - So far we've shown how Packer can automatically build an image and provision it. This on its own is already quite powerful. But Packer can do better than that. Packer can create multiple images for multiple platforms in parallel, all configured from a single template. ---- +description: | + So far we've shown how Packer can automatically build an image and provision it. + This on its own is already quite powerful. But Packer can do better than that. + Packer can create multiple images for multiple platforms in parallel, all + configured from a single template. +layout: intro +next_title: Vagrant Boxes +next_url: '/intro/getting-started/vagrant.html' +page_title: Parallel Builds +prev_url: '/intro/getting-started/provision.html' +... # Parallel Builds So far we've shown how Packer can automatically build an image and provision it. This on its own is already quite powerful. But Packer can do better than that. -Packer can create multiple images for multiple platforms _in parallel_, all +Packer can create multiple images for multiple platforms *in parallel*, all configured from a single template. -This is a very useful and important feature of Packer. As an example, -Packer is able to make an AMI and a VMware virtual machine -in parallel provisioned with the _same scripts_, resulting in near-identical -images. The AMI can be used for production, the VMware machine can be used -for development. Or, another example, if you're using Packer to build -[software appliances](http://en.wikipedia.org/wiki/Software_appliance), -then you can build the appliance for every supported platform all in -parallel, all configured from a single template. +This is a very useful and important feature of Packer. As an example, Packer is +able to make an AMI and a VMware virtual machine in parallel provisioned with +the *same scripts*, resulting in near-identical images. The AMI can be used for +production, the VMware machine can be used for development. Or, another example, +if you're using Packer to build [software +appliances](http://en.wikipedia.org/wiki/Software_appliance), then you can build +the appliance for every supported platform all in parallel, all configured from +a single template. -Once you start taking advantage of this feature, the possibilities begin -to unfold in front of you. +Once you start taking advantage of this feature, the possibilities begin to +unfold in front of you. -Continuing on the example in this getting started guide, we'll build -a [DigitalOcean](http://www.digitalocean.com) image as well as an AMI. Both -will be near-identical: bare bones Ubuntu OS with Redis pre-installed. -However, since we're building for both platforms, you have the option of -whether you want to use the AMI, or the DigitalOcean snapshot. Or use both. +Continuing on the example in this getting started guide, we'll build a +[DigitalOcean](http://www.digitalocean.com) image as well as an AMI. Both will +be near-identical: bare bones Ubuntu OS with Redis pre-installed. However, since +we're building for both platforms, you have the option of whether you want to +use the AMI, or the DigitalOcean snapshot. Or use both. ## Setting Up DigitalOcean -[DigitalOcean](https://www.digitalocean.com/) is a relatively new, but -very popular VPS provider that has popped up. They have a quality offering -of high performance, low cost VPS servers. We'll be building a DigitalOcean -snapshot for this example. +[DigitalOcean](https://www.digitalocean.com/) is a relatively new, but very +popular VPS provider that has popped up. They have a quality offering of high +performance, low cost VPS servers. We'll be building a DigitalOcean snapshot for +this example. -In order to do this, you'll need an account with DigitalOcean. -[Sign up for an account now](https://www.digitalocean.com/). It is free -to sign up. Because the "droplets" (servers) are charged hourly, you -_will_ be charged $0.01 for every image you create with Packer. If -you're not okay with this, just follow along. +In order to do this, you'll need an account with DigitalOcean. [Sign up for an +account now](https://www.digitalocean.com/). It is free to sign up. Because the +"droplets" (servers) are charged hourly, you *will* be charged \$0.01 for every +image you create with Packer. If you're not okay with this, just follow along. -!> **Warning!** You _will_ be charged $0.01 by DigitalOcean per image +!> **Warning!** You *will* be charged \$0.01 by DigitalOcean per image created with Packer because of the time the "droplet" is running. -Once you sign up for an account, grab your API token from -the [DigitalOcean API access page](https://cloud.digitalocean.com/settings/applications). -Save these values somewhere; you'll need them in a second. +Once you sign up for an account, grab your API token from the [DigitalOcean API +access page](https://cloud.digitalocean.com/settings/applications). Save these +values somewhere; you'll need them in a second. ## Modifying the Template @@ -59,20 +61,20 @@ We now have to modify the template to add DigitalOcean to it. Modify the template we've been using and add the following JSON object to the `builders` array. -```javascript +``` {.javascript} { "type": "digitalocean", "api_token": "{{user `do_api_token`}}", - "image": "ubuntu-14-04-x64", - "region": "nyc3", - "size": "512mb", + "image": "ubuntu-14-04-x64", + "region": "nyc3", + "size": "512mb", } ``` -You'll also need to modify the `variables` section of the template -to include the access keys for DigitalOcean. +You'll also need to modify the `variables` section of the template to include +the access keys for DigitalOcean. -```javascript +``` {.javascript} "variables": { "do_api_token": "", // ... @@ -81,61 +83,61 @@ to include the access keys for DigitalOcean. The entire template should now look like this: -```javascript +``` {.javascript} { - "variables": { - "aws_access_key": "", - "aws_secret_key": "", - "do_api_token": "" - }, - "builders": [{ - "type": "amazon-ebs", - "access_key": "{{user `aws_access_key`}}", - "secret_key": "{{user `aws_secret_key`}}", - "region": "us-east-1", - "source_ami": "ami-de0d9eb7", - "instance_type": "t1.micro", - "ssh_username": "ubuntu", - "ami_name": "packer-example {{timestamp}}" - },{ - "type": "digitalocean", - "api_token": "{{user `do_api_token`}}", - "image": "ubuntu-14-04-x64", - "region": "nyc3", - "size": "512mb" - }], - "provisioners": [{ - "type": "shell", - "inline": [ - "sleep 30", - "sudo apt-get update", - "sudo apt-get install -y redis-server" - ] - }] + "variables": { + "aws_access_key": "", + "aws_secret_key": "", + "do_api_token": "" + }, + "builders": [{ + "type": "amazon-ebs", + "access_key": "{{user `aws_access_key`}}", + "secret_key": "{{user `aws_secret_key`}}", + "region": "us-east-1", + "source_ami": "ami-de0d9eb7", + "instance_type": "t1.micro", + "ssh_username": "ubuntu", + "ami_name": "packer-example {{timestamp}}" + },{ + "type": "digitalocean", + "api_token": "{{user `do_api_token`}}", + "image": "ubuntu-14-04-x64", + "region": "nyc3", + "size": "512mb" + }], + "provisioners": [{ + "type": "shell", + "inline": [ + "sleep 30", + "sudo apt-get update", + "sudo apt-get install -y redis-server" + ] + }] } ``` Additional builders are simply added to the `builders` array in the template. -This tells Packer to build multiple images. The builder `type` values don't -even need to be different! In fact, if you wanted to build multiple AMIs, -you can do that as long as you specify a unique `name` for each build. +This tells Packer to build multiple images. The builder `type` values don't even +need to be different! In fact, if you wanted to build multiple AMIs, you can do +that as long as you specify a unique `name` for each build. Validate the template with `packer validate`. This is always a good practice. --> **Note:** If you're looking for more **DigitalOcean configuration options**, -you can find them on the -[DigitalOcean Builder page](/docs/builders/digitalocean.html) in the -documentation. The documentation is more of a reference manual that contains a -listing of all the available configuration options. +-> **Note:** If you're looking for more **DigitalOcean configuration +options**, you can find them on the [DigitalOcean Builder +page](/docs/builders/digitalocean.html) in the documentation. The documentation +is more of a reference manual that contains a listing of all the available +configuration options. ## Build -Now run `packer build` with your user variables. The output is too verbose to include -all of it, but a portion of it is reproduced below. Note that the ordering -and wording of the lines may be slightly different, but the effect is the -same. +Now run `packer build` with your user variables. The output is too verbose to +include all of it, but a portion of it is reproduced below. Note that the +ordering and wording of the lines may be slightly different, but the effect is +the same. -```text +``` {.text} $ packer build \ -var 'aws_access_key=YOUR ACCESS KEY' \ -var 'aws_secret_key=YOUR SECRET KEY' \ @@ -162,10 +164,10 @@ us-east-1: ami-376d1d5e --> digitalocean: A snapshot was created: packer-1371870364 ``` -As you can see, Packer builds both the Amazon and DigitalOcean images -in parallel. It outputs information about each in different colors -(although you can't see that in the block above) so that it is easy to identify. +As you can see, Packer builds both the Amazon and DigitalOcean images in +parallel. It outputs information about each in different colors (although you +can't see that in the block above) so that it is easy to identify. -At the end of the build, Packer outputs both of the artifacts created -(an AMI and a DigitalOcean snapshot). Both images created are bare bones -Ubuntu installations with Redis pre-installed. +At the end of the build, Packer outputs both of the artifacts created (an AMI +and a DigitalOcean snapshot). Both images created are bare bones Ubuntu +installations with Redis pre-installed. diff --git a/website/source/intro/getting-started/provision.html.markdown b/website/source/intro/getting-started/provision.html.markdown index bedb63b69..eda1f0346 100644 --- a/website/source/intro/getting-started/provision.html.markdown +++ b/website/source/intro/getting-started/provision.html.markdown @@ -1,43 +1,45 @@ --- -layout: "intro" -page_title: "Provision" -prev_url: "/intro/getting-started/build-image.html" -next_url: "/intro/getting-started/parallel-builds.html" -next_title: "Parallel Builds" -description: |- - In the previous page of this guide, you created your first image with Packer. The image you just built, however, was basically just a repackaging of a previously existing base AMI. The real utility of Packer comes from being able to install and configure software into the images as well. This stage is also known as the _provision_ step. Packer fully supports automated provisioning in order to install software onto the machines prior to turning them into images. ---- +description: | + In the previous page of this guide, you created your first image with Packer. + The image you just built, however, was basically just a repackaging of a + previously existing base AMI. The real utility of Packer comes from being able + to install and configure software into the images as well. This stage is also + known as the *provision* step. Packer fully supports automated provisioning in + order to install software onto the machines prior to turning them into images. +layout: intro +next_title: Parallel Builds +next_url: '/intro/getting-started/parallel-builds.html' +page_title: Provision +prev_url: '/intro/getting-started/build-image.html' +... # Provision -In the previous page of this guide, you created your first image with -Packer. The image you just built, however, was basically just a repackaging -of a previously existing base AMI. The real utility of Packer comes from -being able to install and configure software into the images as well. -This stage is also known as the _provision_ step. Packer fully supports -automated provisioning in order to install software onto the machines prior -to turning them into images. +In the previous page of this guide, you created your first image with Packer. +The image you just built, however, was basically just a repackaging of a +previously existing base AMI. The real utility of Packer comes from being able +to install and configure software into the images as well. This stage is also +known as the *provision* step. Packer fully supports automated provisioning in +order to install software onto the machines prior to turning them into images. -In this section, we're going to complete our image by installing -Redis on it. This way, the image we end up building actually contains -Redis pre-installed. Although Redis is a small, simple example, this should -give you an idea of what it may be like to install many more packages into -the image. +In this section, we're going to complete our image by installing Redis on it. +This way, the image we end up building actually contains Redis pre-installed. +Although Redis is a small, simple example, this should give you an idea of what +it may be like to install many more packages into the image. -Historically, pre-baked images have been frowned upon because changing -them has been so tedious and slow. Because Packer is completely automated, -including provisioning, images can be changed quickly and integrated with -modern configuration management tools such as Chef or Puppet. +Historically, pre-baked images have been frowned upon because changing them has +been so tedious and slow. Because Packer is completely automated, including +provisioning, images can be changed quickly and integrated with modern +configuration management tools such as Chef or Puppet. ## Configuring Provisioners Provisioners are configured as part of the template. We'll use the built-in shell provisioner that comes with Packer to install Redis. Modify the -`example.json` template we made previously and add the following. We'll -explain the various parts of the new configuration following the code -block below. +`example.json` template we made previously and add the following. We'll explain +the various parts of the new configuration following the code block below. -```javascript +``` {.javascript} { "variables": ["..."], "builders": ["..."], @@ -53,51 +55,51 @@ block below. } ``` --> **Note:** The `sleep 30` in the example above is -very important. Because Packer is able to detect and SSH into the instance -as soon as SSH is available, Ubuntu actually doesn't get proper amounts -of time to initialize. The sleep makes sure that the OS properly initializes. +-> **Note:** The `sleep 30` in the example above is very important. Because +Packer is able to detect and SSH into the instance as soon as SSH is available, +Ubuntu actually doesn't get proper amounts of time to initialize. The sleep +makes sure that the OS properly initializes. -Hopefully it is obvious, but the `builders` section shouldn't actually -contain "...", it should be the contents setup in the previous page -of the getting started guide. Also note the comma after the `"builders": [...]` -section, which was not present in the previous lesson. +Hopefully it is obvious, but the `builders` section shouldn't actually contain +"...", it should be the contents setup in the previous page of the getting +started guide. Also note the comma after the `"builders": [...]` section, which +was not present in the previous lesson. To configure the provisioners, we add a new section `provisioners` to the -template, alongside the `builders` configuration. The provisioners section -is an array of provisioners to run. If multiple provisioners are specified, they -are run in the order given. +template, alongside the `builders` configuration. The provisioners section is an +array of provisioners to run. If multiple provisioners are specified, they are +run in the order given. -By default, each provisioner is run for every builder defined. So if we had -two builders defined in our template, such as both Amazon and DigitalOcean, then -the shell script would run as part of both builds. There are ways to restrict +By default, each provisioner is run for every builder defined. So if we had two +builders defined in our template, such as both Amazon and DigitalOcean, then the +shell script would run as part of both builds. There are ways to restrict provisioners to certain builds, but it is outside the scope of this getting started guide. It is covered in more detail in the complete [documentation](/docs). -The one provisioner we defined has a type of `shell`. This provisioner -ships with Packer and runs shell scripts on the running machine. In our -case, we specify two inline commands to run in order to install Redis. +The one provisioner we defined has a type of `shell`. This provisioner ships +with Packer and runs shell scripts on the running machine. In our case, we +specify two inline commands to run in order to install Redis. ## Build With the provisioner configured, give it a pass once again through `packer validate` to verify everything is okay, then build it using -`packer build example.json`. The output should look similar to when you -built your first image, except this time there will be a new step where -the provisioning is run. +`packer build example.json`. The output should look similar to when you built +your first image, except this time there will be a new step where the +provisioning is run. -The output from the provisioner is too verbose to include in this -guide, since it contains all the output from the shell scripts. But you -should see Redis successfully install. After that, Packer once again -turns the machine into an AMI. +The output from the provisioner is too verbose to include in this guide, since +it contains all the output from the shell scripts. But you should see Redis +successfully install. After that, Packer once again turns the machine into an +AMI. If you were to launch this AMI, Redis would be pre-installed. Cool! This is just a basic example. In a real world use case, you may be provisioning -an image with the entire stack necessary to run your application. Or maybe -just the web stack so that you can have an image for web servers pre-built. -This saves tons of time later as you launch these images since everything -is pre-installed. Additionally, since everything is pre-installed, you -can test the images as they're built and know that when they go into -production, they'll be functional. +an image with the entire stack necessary to run your application. Or maybe just +the web stack so that you can have an image for web servers pre-built. This +saves tons of time later as you launch these images since everything is +pre-installed. Additionally, since everything is pre-installed, you can test the +images as they're built and know that when they go into production, they'll be +functional. diff --git a/website/source/intro/getting-started/remote-builds.html.markdown b/website/source/intro/getting-started/remote-builds.html.markdown index e5d1b48ff..f37a5a5ad 100644 --- a/website/source/intro/getting-started/remote-builds.html.markdown +++ b/website/source/intro/getting-started/remote-builds.html.markdown @@ -1,23 +1,41 @@ --- -layout: "intro" -page_title: "Remote Builds and Storage" -prev_url: "/intro/getting-started/vagrant.html" -next_url: "/intro/getting-started/next.html" -next_title: "Next Steps" -description: |- - Up to this point in the guide, you have been running Packer on your local machine to build and provision images on AWS and DigitalOcean. However, you can use Atlas by HashiCorp to both run Packer builds remotely and store the output of builds. ---- +description: | + Up to this point in the guide, you have been running Packer on your local + machine to build and provision images on AWS and DigitalOcean. However, you can + use Atlas by HashiCorp to both run Packer builds remotely and store the output + of builds. +layout: intro +next_title: Next Steps +next_url: '/intro/getting-started/next.html' +page_title: Remote Builds and Storage +prev_url: '/intro/getting-started/vagrant.html' +... # Remote Builds and Storage -Up to this point in the guide, you have been running Packer on your local machine to build and provision images on AWS and DigitalOcean. However, you can use [Atlas by HashiCorp](https://atlas.hashicorp.com) to run Packer builds remotely and store the output of builds. + +Up to this point in the guide, you have been running Packer on your local +machine to build and provision images on AWS and DigitalOcean. However, you can +use [Atlas by HashiCorp](https://atlas.hashicorp.com) to run Packer builds +remotely and store the output of builds. ## Why Build Remotely? -By building remotely, you can move access credentials off of developer machines, release local machines from long-running Packer processes, and automatically start Packer builds from trigger sources such as `vagrant push`, a version control system, or CI tool. + +By building remotely, you can move access credentials off of developer machines, +release local machines from long-running Packer processes, and automatically +start Packer builds from trigger sources such as `vagrant push`, a version +control system, or CI tool. ## Run Packer Builds Remotely -To run Packer remotely, there are two changes that must be made to the Packer template. The first is the addition of the `push` [configuration](https://www.packer.io/docs/templates/push.html), which sends the Packer template to Atlas so it can run Packer remotely. The second modification is updating the variables section to read variables from the Atlas environment rather than the local environment. Remove the `post-processors` section for now if it is still in your template. -```javascript +To run Packer remotely, there are two changes that must be made to the Packer +template. The first is the addition of the `push` +[configuration](https://www.packer.io/docs/templates/push.html), which sends the +Packer template to Atlas so it can run Packer remotely. The second modification +is updating the variables section to read variables from the Atlas environment +rather than the local environment. Remove the `post-processors` section for now +if it is still in your template. + +``` {.javascript} { "variables": { "aws_access_key": "{{env `aws_access_key`}}", @@ -45,31 +63,35 @@ To run Packer remotely, there are two changes that must be made to the Packer te "name": "ATLAS_USERNAME/packer-tutorial" } } -``` - -To get an Atlas username, [create an account here](https://atlas.hashicorp.com/account/new?utm_source=oss&utm_medium=getting-started&utm_campaign=packer). Replace "ATLAS_USERNAME" with your username, then run `packer push -create example.json` to send the configuration to Atlas, which automatically starts the build. - -This build will fail since neither `aws_access_key` or `aws_secret_key` are set in the Atlas environment. To set environment variables in Atlas, navigate to the [operations tab](https://atlas.hashicorp.com/operations), click the "packer-tutorial" build configuration that was just created, and then click 'variables' in the left navigation. Set `aws_access_key` and `aws_secret_key` with their respective values. Now restart the Packer build by either clicking 'rebuild' in the Atlas UI or by running `packer push example.json` again. Now when you click on the active build, you can view the logs in real-time. - --> **Note:** Whenever a change is made to the Packer template, you must `packer push` to update the configuration in Atlas. - -## Store Packer Outputs -Now we have Atlas building an AMI with Redis pre-configured. This is great, but it's even better to store and version the AMI output so it can be easily deployed by a tool like [Terraform](https://terraform.io). The `atlas` [post-processor](/docs/post-processors/atlas.html) makes this process simple: - - ```javascript -{ - "variables": ["..."], - "builders": ["..."], - "provisioners": ["..."], - "push": ["..."], - "post-processors": [ - { - "type": "atlas", - "artifact": "ATLAS_USERNAME/packer-tutorial", - "artifact_type": "amazon.ami" - } - ] -} ``` -Update the `post-processors` block with your Atlas username, then `packer push example.json` and watch the build kick off in Atlas! When the build completes, the resulting artifact will be saved and stored in Atlas. \ No newline at end of file +To get an Atlas username, [create an account +here](https://atlas.hashicorp.com/account/new?utm_source=oss&utm_medium=getting-started&utm_campaign=packer). +Replace "ATLAS\_USERNAME" with your username, then run +`packer push -create example.json` to send the configuration to Atlas, which +automatically starts the build. + +This build will fail since neither `aws_access_key` or `aws_secret_key` are set +in the Atlas environment. To set environment variables in Atlas, navigate to the +[operations tab](https://atlas.hashicorp.com/operations), click the +"packer-tutorial" build configuration that was just created, and then click +'variables' in the left navigation. Set `aws_access_key` and `aws_secret_key` +with their respective values. Now restart the Packer build by either clicking +'rebuild' in the Atlas UI or by running `packer push example.json` again. Now +when you click on the active build, you can view the logs in real-time. + +-> **Note:** Whenever a change is made to the Packer template, you must +`packer push` to update the configuration in Atlas. + +## Store Packer Outputs + +Now we have Atlas building an AMI with Redis pre-configured. This is great, but +it's even better to store and version the AMI output so it can be easily +deployed by a tool like [Terraform](https://terraform.io). The `atlas` +[post-processor](/docs/post-processors/atlas.html) makes this process simple: + +`javascript { "variables": ["..."], "builders": ["..."], "provisioners": ["..."], "push": ["..."], "post-processors": [ { "type": "atlas", "artifact": "ATLAS_USERNAME/packer-tutorial", "artifact_type": "amazon.ami" } ] }` + +Update the `post-processors` block with your Atlas username, then +`packer push example.json` and watch the build kick off in Atlas! When the build +completes, the resulting artifact will be saved and stored in Atlas. diff --git a/website/source/intro/getting-started/setup.html.markdown b/website/source/intro/getting-started/setup.html.markdown index ae14c2748..a24d023e2 100644 --- a/website/source/intro/getting-started/setup.html.markdown +++ b/website/source/intro/getting-started/setup.html.markdown @@ -1,47 +1,51 @@ --- -layout: "intro" -page_title: "Install Packer" -prev_url: "/intro/platforms.html" -next_url: "/intro/getting-started/build-image.html" -next_title: "Build an Image" -description: |- - Packer must first be installed on the machine you want to run it on. To make installation easy, Packer is distributed as a binary package for all supported platforms and architectures. This page will not cover how to compile Packer from source, as that is covered in the README and is only recommended for advanced users. ---- +description: | + Packer must first be installed on the machine you want to run it on. To make + installation easy, Packer is distributed as a binary package for all supported + platforms and architectures. This page will not cover how to compile Packer from + source, as that is covered in the README and is only recommended for advanced + users. +layout: intro +next_title: Build an Image +next_url: '/intro/getting-started/build-image.html' +page_title: Install Packer +prev_url: '/intro/platforms.html' +... # Install Packer -Packer must first be installed on the machine you want to run it on. -To make installation easy, Packer is distributed as a [binary package](/downloads.html) -for all supported platforms and architectures. This page will not cover how -to compile Packer from source, as that is covered in the +Packer must first be installed on the machine you want to run it on. To make +installation easy, Packer is distributed as a [binary package](/downloads.html) +for all supported platforms and architectures. This page will not cover how to +compile Packer from source, as that is covered in the [README](https://github.com/mitchellh/packer/blob/master/README.md) and is only recommended for advanced users. ## Installing Packer -To install packer, first find the [appropriate package](/downloads.html) -for your system and download it. Packer is packaged as a "zip" file. +To install packer, first find the [appropriate package](/downloads.html) for +your system and download it. Packer is packaged as a "zip" file. Next, unzip the downloaded package into a directory where Packer will be installed. On Unix systems, `~/packer` or `/usr/local/packer` is generally good, -depending on whether you want to restrict the install to just your user -or install it system-wide. On Windows systems, you can put it wherever you'd -like. +depending on whether you want to restrict the install to just your user or +install it system-wide. On Windows systems, you can put it wherever you'd like. After unzipping the package, the directory should contain a set of binary -programs, such as `packer`, `packer-build-amazon-ebs`, etc. The final step -to installation is to make sure the directory you installed Packer to -is on the PATH. See [this page](http://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux) -for instructions on setting the PATH on Linux and Mac. -[This page](http://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) +programs, such as `packer`, `packer-build-amazon-ebs`, etc. The final step to +installation is to make sure the directory you installed Packer to is on the +PATH. See [this +page](http://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux) +for instructions on setting the PATH on Linux and Mac. [This +page](http://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) contains instructions for setting the PATH on Windows. ## Verifying the Installation -After installing Packer, verify the installation worked by opening -a new command prompt or console, and checking that `packer` is available: +After installing Packer, verify the installation worked by opening a new command +prompt or console, and checking that `packer` is available: -```text +``` {.text} $ packer usage: packer [--version] [--help] [] @@ -54,21 +58,21 @@ Available commands are: version Prints the Packer version ``` -If you get an error that `packer` could not be found, then your PATH -environment variable was not setup properly. Please go back and ensure -that your PATH variable contains the directory which has Packer installed. +If you get an error that `packer` could not be found, then your PATH environment +variable was not setup properly. Please go back and ensure that your PATH +variable contains the directory which has Packer installed. Otherwise, Packer is installed and you're ready to go! ## Alternative Installation Methods -While the binary packages is the only official method of installation, there -are alternatives available. +While the binary packages is the only official method of installation, there are +alternatives available. ### Homebrew If you're using OS X and [Homebrew](http://brew.sh), you can install Packer: -```text +``` {.text} $ brew install packer ``` diff --git a/website/source/intro/getting-started/vagrant.html.markdown b/website/source/intro/getting-started/vagrant.html.markdown index 4d6e20caf..c671095e7 100644 --- a/website/source/intro/getting-started/vagrant.html.markdown +++ b/website/source/intro/getting-started/vagrant.html.markdown @@ -1,33 +1,34 @@ --- -layout: "intro" -page_title: "Vagrant Boxes" -prev_url: "/intro/getting-started/parallel-builds.html" -next_url: "/intro/getting-started/remote-builds.html" -next_title: "Remote Builds and Storage" -description: |- - Packer also has the ability to take the results of a builder (such as an AMI or plain VMware image) and turn it into a Vagrant box. ---- +description: | + Packer also has the ability to take the results of a builder (such as an AMI or + plain VMware image) and turn it into a Vagrant box. +layout: intro +next_title: Remote Builds and Storage +next_url: '/intro/getting-started/remote-builds.html' +page_title: Vagrant Boxes +prev_url: '/intro/getting-started/parallel-builds.html' +... # Vagrant Boxes -Packer also has the ability to take the results of a builder (such as -an AMI or plain VMware image) and turn it into a [Vagrant](http://www.vagrantup.com) -box. +Packer also has the ability to take the results of a builder (such as an AMI or +plain VMware image) and turn it into a [Vagrant](http://www.vagrantup.com) box. This is done using [post-processors](/docs/templates/post-processors.html). These take an artifact created by a previous builder or post-processor and transforms it into a new one. In the case of the Vagrant post-processor, it takes an artifact from a builder and transforms it into a Vagrant box file. -Post-processors are a generally very useful concept. While the example on -this getting-started page will be creating Vagrant images, post-processors -have many interesting use cases. For example, you can write a post-processor -to compress artifacts, upload them, test them, etc. +Post-processors are a generally very useful concept. While the example on this +getting-started page will be creating Vagrant images, post-processors have many +interesting use cases. For example, you can write a post-processor to compress +artifacts, upload them, test them, etc. -Let's modify our template to use the Vagrant post-processor to turn our -AWS AMI into a Vagrant box usable with the [vagrant-aws plugin](https://github.com/mitchellh/vagrant-aws). If you followed along in the previous page and setup DigitalOcean, -Packer can't currently make Vagrant boxes for DigitalOcean, but will be able -to soon. +Let's modify our template to use the Vagrant post-processor to turn our AWS AMI +into a Vagrant box usable with the [vagrant-aws +plugin](https://github.com/mitchellh/vagrant-aws). If you followed along in the +previous page and setup DigitalOcean, Packer can't currently make Vagrant boxes +for DigitalOcean, but will be able to soon. ## Enabling the Post-Processor @@ -35,7 +36,7 @@ Post-processors are added in the `post-processors` section of a template, which we haven't created yet. Modify your `example.json` template and add the section. Your template should look like the following: -```javascript +``` {.javascript} { "builders": ["..."], "provisioners": ["..."], @@ -44,8 +45,8 @@ Your template should look like the following: ``` In this case, we're enabling a single post-processor named "vagrant". This -post-processor is built-in to Packer and will create Vagrant boxes. You -can always create [new post-processors](/docs/extend/post-processor.html), however. +post-processor is built-in to Packer and will create Vagrant boxes. You can +always create [new post-processors](/docs/extend/post-processor.html), however. The details on configuring post-processors is covered in the [post-processors](/docs/templates/post-processors.html) documentation. @@ -53,27 +54,26 @@ Validate the configuration using `packer validate`. ## Using the Post-Processor -Just run a normal `packer build` and it will now use the post-processor. -Since Packer can't currently make a Vagrant box for DigitalOcean anyways, -I recommend passing the `-only=amazon-ebs` flag to `packer build` so it only -builds the AMI. The command should look like the following: +Just run a normal `packer build` and it will now use the post-processor. Since +Packer can't currently make a Vagrant box for DigitalOcean anyways, I recommend +passing the `-only=amazon-ebs` flag to `packer build` so it only builds the AMI. +The command should look like the following: -```text +``` {.text} $ packer build -only=amazon-ebs example.json ``` -As you watch the output, you'll notice at the end in the artifact listing -that a Vagrant box was made (by default at `packer_aws.box` in the current -directory). Success! +As you watch the output, you'll notice at the end in the artifact listing that a +Vagrant box was made (by default at `packer_aws.box` in the current directory). +Success! But where did the AMI go? When using post-processors, Vagrant removes -intermediary artifacts since they're usually not wanted. Only the final -artifact is preserved. This behavior can be changed, of course. Changing -this behavior is covered [in the documentation](/docs/templates/post-processors.html). +intermediary artifacts since they're usually not wanted. Only the final artifact +is preserved. This behavior can be changed, of course. Changing this behavior is +covered [in the documentation](/docs/templates/post-processors.html). -Typically when removing intermediary artifacts, the actual underlying -files or resources of the artifact are also removed. For example, when -building a VMware image, if you turn it into a Vagrant box, the files of -the VMware image will be deleted since they were compressed into the Vagrant -box. With creating AWS images, however, the AMI is kept around, since Vagrant -needs it to function. +Typically when removing intermediary artifacts, the actual underlying files or +resources of the artifact are also removed. For example, when building a VMware +image, if you turn it into a Vagrant box, the files of the VMware image will be +deleted since they were compressed into the Vagrant box. With creating AWS +images, however, the AMI is kept around, since Vagrant needs it to function. diff --git a/website/source/intro/hashicorp-ecosystem.html.markdown b/website/source/intro/hashicorp-ecosystem.html.markdown index 37c26b9ad..034d02a65 100644 --- a/website/source/intro/hashicorp-ecosystem.html.markdown +++ b/website/source/intro/hashicorp-ecosystem.html.markdown @@ -1,32 +1,63 @@ --- -layout: "intro" -page_title: "Packer and the HashiCorp Ecosystem" -prev_url: "/intro/platforms.html" -next_url: "/intro/getting-started/setup.html" -next_title: "Getting Started: Install Packer" -description: |- - Learn how Packer fits in with the rest of the HashiCorp ecosystem of tools ---- +description: Learn how Packer fits in with the rest of the HashiCorp ecosystem of tools +layout: intro +next_title: 'Getting Started: Install Packer' +next_url: '/intro/getting-started/setup.html' +page_title: Packer and the HashiCorp Ecosystem +prev_url: '/intro/platforms.html' +... # Packer and the HashiCorp Ecosystem -HashiCorp is the creator of the open source projects Vagrant, Packer, Terraform, Serf, and Consul, and the commercial product Atlas. Packer is just one piece of the ecosystem HashiCorp has built to make application delivery a versioned, auditable, repeatable, and collaborative process. To learn more about our beliefs on the qualities of the modern datacenter and responsible application delivery, read [The Atlas Mindset: Version Control for Infrastructure](https://hashicorp.com/blog/atlas-mindset.html/?utm_source=packer&utm_campaign=HashicorpEcosystem). +HashiCorp is the creator of the open source projects Vagrant, Packer, Terraform, +Serf, and Consul, and the commercial product Atlas. Packer is just one piece of +the ecosystem HashiCorp has built to make application delivery a versioned, +auditable, repeatable, and collaborative process. To learn more about our +beliefs on the qualities of the modern datacenter and responsible application +delivery, read [The Atlas Mindset: Version Control for +Infrastructure](https://hashicorp.com/blog/atlas-mindset.html/?utm_source=packer&utm_campaign=HashicorpEcosystem). -If you are using Packer to build machine images and deployable artifacts, it's likely that you need a solution for deploying those artifacts. Terraform is our tool for creating, combining, and modifying infrastructure. +If you are using Packer to build machine images and deployable artifacts, it's +likely that you need a solution for deploying those artifacts. Terraform is our +tool for creating, combining, and modifying infrastructure. -Below are summaries of HashiCorp's open source projects and a graphic showing how Atlas connects them to create a full application delivery workflow. +Below are summaries of HashiCorp's open source projects and a graphic showing +how Atlas connects them to create a full application delivery workflow. # HashiCorp Ecosystem + ![Atlas Workflow](docs/atlas-workflow.png) -[Atlas](https://atlas.hashicorp.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) is HashiCorp's only commercial product. It unites Packer, Terraform, and Consul to make application delivery a versioned, auditable, repeatable, and collaborative process. +[Atlas](https://atlas.hashicorp.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) +is HashiCorp's only commercial product. It unites Packer, Terraform, and Consul +to make application delivery a versioned, auditable, repeatable, and +collaborative process. -[Packer](https://packer.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for creating machine images and deployable artifacts such as AMIs, OpenStack images, Docker containers, etc. +[Packer](https://packer.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) +is a HashiCorp tool for creating machine images and deployable artifacts such as +AMIs, OpenStack images, Docker containers, etc. -[Terraform](https://terraform.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for creating, combining, and modifying infrastructure. In the Atlas workflow Terraform reads from the artifact registry and provisions infrastructure. +[Terraform](https://terraform.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) +is a HashiCorp tool for creating, combining, and modifying infrastructure. In +the Atlas workflow Terraform reads from the artifact registry and provisions +infrastructure. -[Consul](https://consul.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for service discovery, service registry, and health checks. In the Atlas workflow Consul is configured at the Packer build stage and identifies the service(s) contained in each artifact. Since Consul is configured at the build phase with Packer, when the artifact is deployed with Terraform, it is fully configured with dependencies and service discovery pre-baked. This greatly reduces the risk of an unhealthy node in production due to configuration failure at runtime. +[Consul](https://consul.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) +is a HashiCorp tool for service discovery, service registry, and health checks. +In the Atlas workflow Consul is configured at the Packer build stage and +identifies the service(s) contained in each artifact. Since Consul is configured +at the build phase with Packer, when the artifact is deployed with Terraform, it +is fully configured with dependencies and service discovery pre-baked. This +greatly reduces the risk of an unhealthy node in production due to configuration +failure at runtime. -[Serf](https://serfdom.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for cluster membership and failure detection. Consul uses Serf's gossip protocol as the foundation for service discovery. +[Serf](https://serfdom.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is +a HashiCorp tool for cluster membership and failure detection. Consul uses +Serf's gossip protocol as the foundation for service discovery. -[Vagrant](https://www.vagrantup.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for managing development environments that mirror production. Vagrant environments reduce the friction of developing a project and reduce the risk of unexpected behavior appearing after deployment. Vagrant boxes can be built in parallel with production artifacts with Packer to maintain parity between development and production. +[Vagrant](https://www.vagrantup.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) +is a HashiCorp tool for managing development environments that mirror +production. Vagrant environments reduce the friction of developing a project and +reduce the risk of unexpected behavior appearing after deployment. Vagrant boxes +can be built in parallel with production artifacts with Packer to maintain +parity between development and production. diff --git a/website/source/intro/index.html.markdown b/website/source/intro/index.html.markdown index 147cc51ee..c9abcebe4 100644 --- a/website/source/intro/index.html.markdown +++ b/website/source/intro/index.html.markdown @@ -1,31 +1,34 @@ --- -layout: "intro" -page_title: "Introduction" -prev_url: "#" -next_url: "/intro/why.html" -next_title: "Why Use Packer?" -description: |- - Welcome to the world of Packer! This introduction guide will show you what Packer is, explain why it exists, the benefits it has to offer, and how you can get started with it. If you're already familiar with Packer, the documentation provides more of a reference for all available features. ---- +description: | + Welcome to the world of Packer! This introduction guide will show you what + Packer is, explain why it exists, the benefits it has to offer, and how you can + get started with it. If you're already familiar with Packer, the documentation + provides more of a reference for all available features. +layout: intro +next_title: 'Why Use Packer?' +next_url: '/intro/why.html' +page_title: Introduction +prev_url: '# ' +... # Introduction to Packer Welcome to the world of Packer! This introduction guide will show you what -Packer is, explain why it exists, the benefits it has to offer, and how -you can get started with it. If you're already familiar with Packer, the +Packer is, explain why it exists, the benefits it has to offer, and how you can +get started with it. If you're already familiar with Packer, the [documentation](/docs) provides more of a reference for all available features. ## What is Packer? -Packer is an open source tool for creating identical machine images for multiple platforms -from a single source configuration. Packer is lightweight, runs on every major -operating system, and is highly performant, creating machine images for -multiple platforms in parallel. Packer does not replace configuration management -like Chef or Puppet. In fact, when building images, Packer is able to use tools -like Chef or Puppet to install software onto the image. +Packer is an open source tool for creating identical machine images for multiple +platforms from a single source configuration. Packer is lightweight, runs on +every major operating system, and is highly performant, creating machine images +for multiple platforms in parallel. Packer does not replace configuration +management like Chef or Puppet. In fact, when building images, Packer is able to +use tools like Chef or Puppet to install software onto the image. -A _machine image_ is a single static unit that contains a pre-configured operating -system and installed software which is used to quickly create new running machines. -Machine image formats change for each platform. Some examples include -[AMIs](http://en.wikipedia.org/wiki/Amazon_Machine_Image) for EC2, +A *machine image* is a single static unit that contains a pre-configured +operating system and installed software which is used to quickly create new +running machines. Machine image formats change for each platform. Some examples +include [AMIs](http://en.wikipedia.org/wiki/Amazon_Machine_Image) for EC2, VMDK/VMX files for VMware, OVF exports for VirtualBox, etc. diff --git a/website/source/intro/platforms.html.markdown b/website/source/intro/platforms.html.markdown index d97756fd7..586c0c4ec 100644 --- a/website/source/intro/platforms.html.markdown +++ b/website/source/intro/platforms.html.markdown @@ -1,65 +1,73 @@ --- -layout: "intro" -page_title: "Supported Platforms" -prev_url: "/intro/use-cases.html" -next_url: "/intro/hashicorp-ecosystem.html" -next_title: "Packer & the HashiCorp Ecosystem" -description: |- - Packer can create machine images for any platform. Packer ships with support for a set of platforms, but can be extended through plugins to support any platform. This page documents the list of supported image types that Packer supports creating. ---- +description: | + Packer can create machine images for any platform. Packer ships with support for + a set of platforms, but can be extended through plugins to support any platform. + This page documents the list of supported image types that Packer supports + creating. +layout: intro +next_title: 'Packer & the HashiCorp Ecosystem' +next_url: '/intro/hashicorp-ecosystem.html' +page_title: Supported Platforms +prev_url: '/intro/use-cases.html' +... # Supported Platforms -Packer can create machine images for any platform. Packer ships with -support for a set of platforms, but can be [extended through plugins](/docs/extend/builder.html) -to support any platform. This page documents the list of supported image -types that Packer supports creating. +Packer can create machine images for any platform. Packer ships with support for +a set of platforms, but can be [extended through +plugins](/docs/extend/builder.html) to support any platform. This page documents +the list of supported image types that Packer supports creating. -If you were looking to see what platforms Packer is able to run on, see -the page on [installing Packer](/intro/getting-started/setup.html). +If you were looking to see what platforms Packer is able to run on, see the page +on [installing Packer](/intro/getting-started/setup.html). --> **Note:** We're always looking to officially support more -target platforms. If you're interested in adding support for another -platform, please help by opening an issue or pull request within -[GitHub](https://github.com/mitchellh/packer) so we can discuss -how to make it happen. +-> **Note:** We're always looking to officially support more target +platforms. If you're interested in adding support for another platform, please +help by opening an issue or pull request within +[GitHub](https://github.com/mitchellh/packer) so we can discuss how to make it +happen. -Packer supports creating images for the following platforms or targets. -The format of the resulting image and any high-level information about the -platform is noted. They are listed in alphabetical order. For more detailed -information on supported configuration parameters and usage, please see -the appropriate [documentation page within the documentation section](/docs). +Packer supports creating images for the following platforms or targets. The +format of the resulting image and any high-level information about the platform +is noted. They are listed in alphabetical order. For more detailed information +on supported configuration parameters and usage, please see the appropriate +[documentation page within the documentation section](/docs). -* ***Amazon EC2 (AMI)***. Both EBS-backed and instance-store AMIs within +- ***Amazon EC2 (AMI)***. Both EBS-backed and instance-store AMIs within [EC2](http://aws.amazon.com/ec2/), optionally distributed to multiple regions. -* ***DigitalOcean***. Snapshots for [DigitalOcean](http://www.digitalocean.com/) +- ***DigitalOcean***. Snapshots for [DigitalOcean](http://www.digitalocean.com/) that can be used to start a pre-configured DigitalOcean instance of any size. -* ***Docker***. Snapshots for [Docker](http://www.docker.io/) - that can be used to start a pre-configured Docker instance. +- ***Docker***. Snapshots for [Docker](http://www.docker.io/) that can be used + to start a pre-configured Docker instance. -* ***Google Compute Engine***. Snapshots for [Google Compute Engine](https://cloud.google.com/products/compute-engine) - that can be used to start a pre-configured Google Compute Engine instance. +- ***Google Compute Engine***. Snapshots for [Google Compute + Engine](https://cloud.google.com/products/compute-engine) that can be used to + start a pre-configured Google Compute Engine instance. -* ***OpenStack***. Images for [OpenStack](http://www.openstack.org/) - that can be used to start pre-configured OpenStack servers. +- ***OpenStack***. Images for [OpenStack](http://www.openstack.org/) that can be + used to start pre-configured OpenStack servers. -* ***Parallels (PVM)***. Exported virtual machines for [Parallels](http://www.parallels.com/downloads/desktop/), - including virtual machine metadata such as RAM, CPUs, etc. These virtual - machines are portable and can be started on any platform Parallels runs on. +- ***Parallels (PVM)***. Exported virtual machines for + [Parallels](http://www.parallels.com/downloads/desktop/), including virtual + machine metadata such as RAM, CPUs, etc. These virtual machines are portable + and can be started on any platform Parallels runs on. -* ***QEMU***. Images for [KVM](http://www.linux-kvm.org/) or [Xen](http://www.xenproject.org/) - that can be used to start pre-configured KVM or Xen instances. +- ***QEMU***. Images for [KVM](http://www.linux-kvm.org/) or + [Xen](http://www.xenproject.org/) that can be used to start pre-configured KVM + or Xen instances. -* ***VirtualBox (OVF)***. Exported virtual machines for [VirtualBox](https://www.virtualbox.org/), - including virtual machine metadata such as RAM, CPUs, etc. These virtual - machines are portable and can be started on any platform VirtualBox runs on. +- ***VirtualBox (OVF)***. Exported virtual machines for + [VirtualBox](https://www.virtualbox.org/), including virtual machine metadata + such as RAM, CPUs, etc. These virtual machines are portable and can be started + on any platform VirtualBox runs on. -* ***VMware (VMX)***. Exported virtual machines for [VMware](http://www.vmware.com/) - that can be run within any desktop products such as Fusion, Player, or - Workstation, as well as server products such as vSphere. +- ***VMware (VMX)***. Exported virtual machines for + [VMware](http://www.vmware.com/) that can be run within any desktop products + such as Fusion, Player, or Workstation, as well as server products such + as vSphere. -As previously mentioned, these are just the target image types that Packer -ships with out of the box. You can always [extend Packer through plugins](/docs/extend/builder.html) -to support more. +As previously mentioned, these are just the target image types that Packer ships +with out of the box. You can always [extend Packer through +plugins](/docs/extend/builder.html) to support more. diff --git a/website/source/intro/use-cases.html.markdown b/website/source/intro/use-cases.html.markdown index 0b73ea32c..2cd38d967 100644 --- a/website/source/intro/use-cases.html.markdown +++ b/website/source/intro/use-cases.html.markdown @@ -1,20 +1,24 @@ --- -layout: "intro" -page_title: "Use Cases" -prev_url: "/intro/why.html" -next_url: "/intro/platforms.html" -next_title: "Supported Platforms" -description: |- - By now you should know what Packer does and what the benefits of image creation are. In this section, we'll enumerate _some_ of the use cases for Packer. Note that this is not an exhaustive list by any means. There are definitely use cases for Packer not listed here. This list is just meant to give you an idea of how Packer may improve your processes. ---- +description: | + By now you should know what Packer does and what the benefits of image creation + are. In this section, we'll enumerate *some* of the use cases for Packer. Note + that this is not an exhaustive list by any means. There are definitely use cases + for Packer not listed here. This list is just meant to give you an idea of how + Packer may improve your processes. +layout: intro +next_title: Supported Platforms +next_url: '/intro/platforms.html' +page_title: Use Cases +prev_url: '/intro/why.html' +... # Use Cases -By now you should know what Packer does and what the benefits of image -creation are. In this section, we'll enumerate _some_ of the use cases -for Packer. Note that this is not an exhaustive list by any means. There are -definitely use cases for Packer not listed here. This list is just meant -to give you an idea of how Packer may improve your processes. +By now you should know what Packer does and what the benefits of image creation +are. In this section, we'll enumerate *some* of the use cases for Packer. Note +that this is not an exhaustive list by any means. There are definitely use cases +for Packer not listed here. This list is just meant to give you an idea of how +Packer may improve your processes. ### Continuous Delivery @@ -24,30 +28,31 @@ can be used to generate new machine images for multiple platforms on every change to Chef/Puppet. As part of this pipeline, the newly created images can then be launched and -tested, verifying the infrastructure changes work. If the tests pass, you can -be confident that that image will work when deployed. This brings a new level -of stability and testability to infrastructure changes. +tested, verifying the infrastructure changes work. If the tests pass, you can be +confident that that image will work when deployed. This brings a new level of +stability and testability to infrastructure changes. ### Dev/Prod Parity -Packer helps [keep development, staging, and production as similar as possible](http://www.12factor.net/dev-prod-parity). -Packer can be used to generate images for multiple platforms at the same time. -So if you use AWS for production and VMware (perhaps with [Vagrant](http://www.vagrantup.com)) -for development, you can generate both an AMI and a VMware machine using -Packer at the same time from the same template. +Packer helps [keep development, staging, and production as similar as +possible](http://www.12factor.net/dev-prod-parity). Packer can be used to +generate images for multiple platforms at the same time. So if you use AWS for +production and VMware (perhaps with [Vagrant](http://www.vagrantup.com)) for +development, you can generate both an AMI and a VMware machine using Packer at +the same time from the same template. Mix this in with the continuous delivery use case above, and you have a pretty -slick system for consistent work environments from development all the -way through to production. +slick system for consistent work environments from development all the way +through to production. ### Appliance/Demo Creation -Since Packer creates consistent images for multiple platforms in parallel, -it is perfect for creating [appliances](http://en.wikipedia.org/wiki/Software_appliance) -and disposable product demos. As your software changes, you can automatically -create appliances with the software pre-installed. Potential users can then -get started with your software by deploying it to the environment of their -choice. +Since Packer creates consistent images for multiple platforms in parallel, it is +perfect for creating +[appliances](http://en.wikipedia.org/wiki/Software_appliance) and disposable +product demos. As your software changes, you can automatically create appliances +with the software pre-installed. Potential users can then get started with your +software by deploying it to the environment of their choice. -Packaging up software with complex requirements has never been so easy. -Or enjoyable, if you ask me. +Packaging up software with complex requirements has never been so easy. Or +enjoyable, if you ask me. diff --git a/website/source/intro/why.html.markdown b/website/source/intro/why.html.markdown index 98de7855f..ee6b5ad9e 100644 --- a/website/source/intro/why.html.markdown +++ b/website/source/intro/why.html.markdown @@ -1,24 +1,29 @@ --- -layout: "intro" -page_title: "Why Use Packer?" -prev_url: "/intro/index.html" -next_url: "/intro/use-cases.html" -next_title: "Packer Use Cases" -description: |- - Pre-baked machine images have a lot of advantages, but most have been unable to benefit from them because images have been too tedious to create and manage. There were either no existing tools to automate the creation of machine images or they had too high of a learning curve. The result is that, prior to Packer, creating machine images threatened the agility of operations teams, and therefore aren't used, despite the massive benefits. ---- +description: | + Pre-baked machine images have a lot of advantages, but most have been unable to + benefit from them because images have been too tedious to create and manage. + There were either no existing tools to automate the creation of machine images + or they had too high of a learning curve. The result is that, prior to Packer, + creating machine images threatened the agility of operations teams, and + therefore aren't used, despite the massive benefits. +layout: intro +next_title: Packer Use Cases +next_url: '/intro/use-cases.html' +page_title: 'Why Use Packer?' +prev_url: '/intro/index.html' +... # Why Use Packer? -Pre-baked machine images have a lot of advantages, but most have been unable -to benefit from them because images have been too tedious to create and manage. -There were either no existing tools to automate the creation of machine images or -they had too high of a learning curve. The result is that, prior to Packer, -creating machine images threatened the agility of operations teams, and therefore -aren't used, despite the massive benefits. +Pre-baked machine images have a lot of advantages, but most have been unable to +benefit from them because images have been too tedious to create and manage. +There were either no existing tools to automate the creation of machine images +or they had too high of a learning curve. The result is that, prior to Packer, +creating machine images threatened the agility of operations teams, and +therefore aren't used, despite the massive benefits. -Packer changes all of this. Packer is easy to use and automates the creation -of any type of machine image. It embraces modern configuration management by +Packer changes all of this. Packer is easy to use and automates the creation of +any type of machine image. It embraces modern configuration management by encouraging you to use a framework such as Chef or Puppet to install and configure the software within your Packer-made images. @@ -28,25 +33,26 @@ untapped potential and opening new opportunities. ## Advantages of Using Packer ***Super fast infrastructure deployment***. Packer images allow you to launch -completely provisioned and configured machines in seconds, rather than -several minutes or hours. This benefits not only production, but development as well, -since development virtual machines can also be launched in seconds, without waiting -for a typically much longer provisioning time. +completely provisioned and configured machines in seconds, rather than several +minutes or hours. This benefits not only production, but development as well, +since development virtual machines can also be launched in seconds, without +waiting for a typically much longer provisioning time. ***Multi-provider portability***. Because Packer creates identical images for -multiple platforms, you can run production in AWS, staging/QA in a private -cloud like OpenStack, and development in desktop virtualization solutions -such as VMware or VirtualBox. Each environment is running an identical -machine image, giving ultimate portability. +multiple platforms, you can run production in AWS, staging/QA in a private cloud +like OpenStack, and development in desktop virtualization solutions such as +VMware or VirtualBox. Each environment is running an identical machine image, +giving ultimate portability. -***Improved stability***. Packer installs and configures all the software for -a machine at the time the image is built. If there are bugs in these scripts, -they'll be caught early, rather than several minutes after a machine is launched. +***Improved stability***. Packer installs and configures all the software for a +machine at the time the image is built. If there are bugs in these scripts, +they'll be caught early, rather than several minutes after a machine is +launched. ***Greater testability***. After a machine image is built, that machine image can be quickly launched and smoke tested to verify that things appear to be -working. If they are, you can be confident that any other machines launched -from that image will function properly. +working. If they are, you can be confident that any other machines launched from +that image will function properly. Packer makes it extremely easy to take advantage of all these benefits. From c42e7cfe414b6c18ed2395c1f962bc37a2382880 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 19:41:42 -0700 Subject: [PATCH 674/956] Added note about installing pandoc if we can't find it --- website/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/Makefile b/website/Makefile index 1cc81038c..604e9c628 100644 --- a/website/Makefile +++ b/website/Makefile @@ -12,6 +12,6 @@ build: init format: bundle exec htmlbeautifier -t 2 source/*.erb bundle exec htmlbeautifier -t 2 source/layouts/*.erb + @pandoc -v > /dev/null || echo "pandoc must be installed in order to format markdown content" pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "pandoc -r markdown -w markdown --tab-stop=2 --atx-headers -s --columns=80 {} > {}.new"\; || true pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "mv {}.new {}"\; || true - From 555a8ba792d1ae7cc1a86d3d9971a684f221b2e1 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 20:22:50 -0700 Subject: [PATCH 675/956] Change two blanks to one blank after numbered list item --- .../docs/extend/developing-plugins.html.markdown | 4 ++-- website/source/docs/extend/plugins.html.markdown | 6 +++--- .../docs/post-processors/atlas.html.markdown | 6 +++--- .../post-processors/vagrant-cloud.html.markdown | 16 ++++++++-------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/website/source/docs/extend/developing-plugins.html.markdown b/website/source/docs/extend/developing-plugins.html.markdown index 2ccdd437f..0d86df3d2 100644 --- a/website/source/docs/extend/developing-plugins.html.markdown +++ b/website/source/docs/extend/developing-plugins.html.markdown @@ -60,10 +60,10 @@ dependencies. There are two steps involved in creating a plugin: -1. Implement the desired interface. For example, if you're building a builder +1. Implement the desired interface. For example, if you're building a builder plugin, implement the `packer.Builder` interface. -2. Serve the interface by calling the appropriate plugin serving method in your +2. Serve the interface by calling the appropriate plugin serving method in your main method. In the case of a builder, this is `plugin.ServeBuilder`. A basic example is shown below. In this example, assume the `Builder` struct diff --git a/website/source/docs/extend/plugins.html.markdown b/website/source/docs/extend/plugins.html.markdown index f8b800a30..98249de5d 100644 --- a/website/source/docs/extend/plugins.html.markdown +++ b/website/source/docs/extend/plugins.html.markdown @@ -51,12 +51,12 @@ Once the plugin is named properly, Packer automatically discovers plugins in the following directories in the given order. If a conflicting plugin is found later, it will take precedence over one found earlier. -1. The directory where `packer` is, or the executable directory. +1. The directory where `packer` is, or the executable directory. -2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` +2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` on Windows. -3. The current working directory. +3. The current working directory. The valid types for plugins are: diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index c038a119a..18211c313 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -25,13 +25,13 @@ location in Atlas. Here is an example workflow: -1. Packer builds an AMI with the [Amazon AMI +1. Packer builds an AMI with the [Amazon AMI builder](/docs/builders/amazon.html) -2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. +2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. The `atlas` post-processor is configured with the name of the AMI, for example `hashicorp/foobar`, to create the artifact in Atlas or update the version if the artifact already exists -3. The new version is ready and available to be used in deployments with a tool +3. The new version is ready and available to be used in deployments with a tool like [Terraform](https://terraform.io) ## Configuration diff --git a/website/source/docs/post-processors/vagrant-cloud.html.markdown b/website/source/docs/post-processors/vagrant-cloud.html.markdown index e049552da..4891797e8 100644 --- a/website/source/docs/post-processors/vagrant-cloud.html.markdown +++ b/website/source/docs/post-processors/vagrant-cloud.html.markdown @@ -36,16 +36,16 @@ and deliver them to your team in some fashion. Here is an example workflow: -1. You use Packer to build a Vagrant Box for the `virtualbox` provider -2. The `vagrant-cloud` post-processor is configured to point to the box +1. You use Packer to build a Vagrant Box for the `virtualbox` provider +2. The `vagrant-cloud` post-processor is configured to point to the box `hashicorp/foobar` on Vagrant Cloud via the `box_tag` configuration -3. The post-processor receives the box from the `vagrant` post-processor -4. It then creates the configured version, or verifies the existence of it, on +3. The post-processor receives the box from the `vagrant` post-processor +4. It then creates the configured version, or verifies the existence of it, on Vagrant Cloud -5. A provider matching the name of the Vagrant provider is then created -6. The box is uploaded to Vagrant Cloud -7. The upload is verified -8. The version is released and available to users of the box +5. A provider matching the name of the Vagrant provider is then created +6. The box is uploaded to Vagrant Cloud +7. The upload is verified +8. The version is released and available to users of the box ## Configuration From d8e8f98b322d6fde3d10534ddafbeed648c80066 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 22 Jul 2015 20:25:58 -0700 Subject: [PATCH 676/956] Change to 4 spaces --- website/Makefile | 2 +- website/source/community/index.html.markdown | 36 +- .../docs/basics/terminology.html.markdown | 64 +-- .../docs/builders/amazon-chroot.html.markdown | 162 ++++---- .../docs/builders/amazon-ebs.html.markdown | 220 +++++----- .../builders/amazon-instance.html.markdown | 271 ++++++------- .../source/docs/builders/amazon.html.markdown | 25 +- .../docs/builders/digitalocean.html.markdown | 52 +-- .../source/docs/builders/docker.html.markdown | 62 +-- .../docs/builders/openstack.html.markdown | 94 ++--- .../docs/builders/parallels-iso.html.markdown | 269 +++++++------ .../docs/builders/parallels-pvm.html.markdown | 177 ++++---- .../docs/builders/parallels.html.markdown | 18 +- .../source/docs/builders/qemu.html.markdown | 267 +++++++------ .../builders/virtualbox-iso.html.markdown | 318 +++++++-------- .../builders/virtualbox-ovf.html.markdown | 248 ++++++------ .../docs/builders/virtualbox.html.markdown | 19 +- .../docs/builders/vmware-iso.html.markdown | 378 +++++++++--------- .../docs/builders/vmware-vmx.html.markdown | 157 ++++---- .../source/docs/builders/vmware.html.markdown | 20 +- .../docs/command-line/build.html.markdown | 36 +- .../docs/command-line/fix.html.markdown | 2 +- .../machine-readable.html.markdown | 24 +- .../docs/command-line/push.html.markdown | 16 +- .../docs/command-line/validate.html.markdown | 4 +- .../extend/developing-plugins.html.markdown | 16 +- .../source/docs/extend/plugins.html.markdown | 20 +- .../docs/extend/post-processor.html.markdown | 16 +- .../command-build.html.markdown | 88 ++-- .../command-inspect.html.markdown | 24 +- .../command-version.html.markdown | 24 +- .../machine-readable/general.html.markdown | 8 +- .../docs/machine-readable/index.html.markdown | 8 +- .../other/core-configuration.html.markdown | 18 +- .../environmental-variables.html.markdown | 38 +- .../docs/post-processors/atlas.html.markdown | 51 +-- .../post-processors/compress.html.markdown | 22 +- .../docker-import.html.markdown | 4 +- .../post-processors/docker-push.html.markdown | 12 +- .../post-processors/docker-save.html.markdown | 2 +- .../post-processors/docker-tag.html.markdown | 8 +- .../vagrant-cloud.html.markdown | 62 +-- .../post-processors/vagrant.html.markdown | 50 +-- .../post-processors/vsphere.html.markdown | 39 +- .../provisioners/ansible-local.html.markdown | 107 +++-- .../provisioners/chef-client.html.markdown | 113 +++--- .../docs/provisioners/chef-solo.html.markdown | 127 +++--- .../docs/provisioners/file.html.markdown | 22 +- .../provisioners/powershell.html.markdown | 77 ++-- .../puppet-masterless.html.markdown | 113 +++--- .../provisioners/puppet-server.html.markdown | 50 +-- .../salt-masterless.html.markdown | 40 +- .../docs/provisioners/shell.html.markdown | 132 +++--- .../configuration-templates.html.markdown | 46 ++- .../docs/templates/introduction.html.markdown | 59 +-- .../source/docs/templates/push.html.markdown | 34 +- website/source/intro/platforms.html.markdown | 54 +-- 57 files changed, 2252 insertions(+), 2173 deletions(-) diff --git a/website/Makefile b/website/Makefile index 604e9c628..af5f71039 100644 --- a/website/Makefile +++ b/website/Makefile @@ -13,5 +13,5 @@ format: bundle exec htmlbeautifier -t 2 source/*.erb bundle exec htmlbeautifier -t 2 source/layouts/*.erb @pandoc -v > /dev/null || echo "pandoc must be installed in order to format markdown content" - pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "pandoc -r markdown -w markdown --tab-stop=2 --atx-headers -s --columns=80 {} > {}.new"\; || true + pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "pandoc -r markdown -w markdown --tab-stop=4 --atx-headers -s --columns=80 {} > {}.new"\; || true pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "mv {}.new {}"\; || true diff --git a/website/source/community/index.html.markdown b/website/source/community/index.html.markdown index f4069fbdf..3951e909f 100644 --- a/website/source/community/index.html.markdown +++ b/website/source/community/index.html.markdown @@ -29,7 +29,8 @@ list as contributors come and go.
    -
    +
    +

    Mitchell Hashimoto (@mitchellh)

    @@ -41,9 +42,11 @@ list as contributors come and go. described as "automation obsessed."

    -
    -
    +
    + +
    +

    Jack Pearkes (@pearkes)

    @@ -52,9 +55,11 @@ list as contributors come and go. for Packer. Outside of Packer, Jack is an avid open source contributor and software consultant.

    -
    -
    +
    + +
    +

    Mark Peek (@markpeek)

    @@ -65,9 +70,11 @@ list as contributors come and go. IronPort Python libraries. Mark is also a FreeBSD committer.

    -
    -
    +
    + +
    +

    Ross Smith II (@rasa)

    @@ -78,9 +85,11 @@ VMware builder on Windows, and provides other valuable assistance. Ross is an open source enthusiast, published author, and freelance consultant.

    -
    -
    +
    + +
    +

    Rickard von Essen
    (@rickard-von-essen)

    @@ -90,8 +99,11 @@ Rickard von Essen maintains our Parallels Desktop builder. Rickard is an polyglot programmer and consults on Continuous Delivery.

    -
    - -
    + +
    + +
    + +
    diff --git a/website/source/docs/basics/terminology.html.markdown b/website/source/docs/basics/terminology.html.markdown index 800478143..b20220b5c 100644 --- a/website/source/docs/basics/terminology.html.markdown +++ b/website/source/docs/basics/terminology.html.markdown @@ -17,41 +17,41 @@ Luckily, there are relatively few. This page documents all the terminology required to understand and use Packer. The terminology is in alphabetical order for easy referencing. -- `Artifacts` are the results of a single build, and are usually a set of IDs or - files to represent a machine image. Every builder produces a single artifact. - As an example, in the case of the Amazon EC2 builder, the artifact is a set of - AMI IDs (one per region). For the VMware builder, the artifact is a directory - of files comprising the created virtual machine. +- `Artifacts` are the results of a single build, and are usually a set of IDs + or files to represent a machine image. Every builder produces a + single artifact. As an example, in the case of the Amazon EC2 builder, the + artifact is a set of AMI IDs (one per region). For the VMware builder, the + artifact is a directory of files comprising the created virtual machine. -- `Builds` are a single task that eventually produces an image for a - single platform. Multiple builds run in parallel. Example usage in a sentence: - "The Packer build produced an AMI to run our web application." Or: "Packer is - running the builds now for VMware, AWS, and VirtualBox." +- `Builds` are a single task that eventually produces an image for a + single platform. Multiple builds run in parallel. Example usage in a + sentence: "The Packer build produced an AMI to run our web application." Or: + "Packer is running the builds now for VMware, AWS, and VirtualBox." -- `Builders` are components of Packer that are able to create a machine image - for a single platform. Builders read in some configuration and use that to run - and generate a machine image. A builder is invoked as part of a build in order - to create the actual resulting images. Example builders include VirtualBox, - VMware, and Amazon EC2. Builders can be created and added to Packer in the - form of plugins. +- `Builders` are components of Packer that are able to create a machine image + for a single platform. Builders read in some configuration and use that to + run and generate a machine image. A builder is invoked as part of a build in + order to create the actual resulting images. Example builders include + VirtualBox, VMware, and Amazon EC2. Builders can be created and added to + Packer in the form of plugins. -- `Commands` are sub-commands for the `packer` program that perform some job. An - example command is "build", which is invoked as `packer build`. Packer ships - with a set of commands out of the box in order to define its - command-line interface. Commands can also be created and added to Packer in - the form of plugins. +- `Commands` are sub-commands for the `packer` program that perform some job. + An example command is "build", which is invoked as `packer build`. Packer + ships with a set of commands out of the box in order to define its + command-line interface. Commands can also be created and added to Packer in + the form of plugins. -- `Post-processors` are components of Packer that take the result of a builder - or another post-processor and process that to create a new artifact. Examples - of post-processors are compress to compress artifacts, upload to upload - artifacts, etc. +- `Post-processors` are components of Packer that take the result of a builder + or another post-processor and process that to create a new artifact. + Examples of post-processors are compress to compress artifacts, upload to + upload artifacts, etc. -- `Provisioners` are components of Packer that install and configure software - within a running machine prior to that machine being turned into a - static image. They perform the major work of making the image contain - useful software. Example provisioners include shell scripts, Chef, - Puppet, etc. +- `Provisioners` are components of Packer that install and configure software + within a running machine prior to that machine being turned into a + static image. They perform the major work of making the image contain + useful software. Example provisioners include shell scripts, Chef, + Puppet, etc. -- `Templates` are JSON files which define one or more builds by configuring the - various components of Packer. Packer is able to read a template and use that - information to create multiple machine images in parallel. +- `Templates` are JSON files which define one or more builds by configuring + the various components of Packer. Packer is able to read a template and use + that information to create multiple machine images in parallel. diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index c3e16a982..2826e67ab 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -60,98 +60,100 @@ builder. ### Required: -- `access_key` (string) - The access key used to communicate with AWS. If not - specified, Packer will use the key from any - [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - file or fall back to environment variables `AWS_ACCESS_KEY_ID` or - `AWS_ACCESS_KEY` (in that order), if set. If the environmental variables - aren't set and Packer is running on an EC2 instance, Packer will check the - instance metadata for IAM role keys. +- `access_key` (string) - The access key used to communicate with AWS. If not + specified, Packer will use the key from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_ACCESS_KEY_ID` or + `AWS_ACCESS_KEY` (in that order), if set. If the environmental variables + aren't set and Packer is running on an EC2 instance, Packer will check the + instance metadata for IAM role keys. -- `ami_name` (string) - The name of the resulting AMI that will appear when - managing AMIs in the AWS console or via APIs. This must be unique. To help - make this unique, use a function like `timestamp` (see [configuration - templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -- `secret_key` (string) - The secret key used to communicate with AWS. If not - specified, Packer will use the secret from any - [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or - `AWS_SECRET_KEY` (in that order), if set. If the environmental variables - aren't set and Packer is running on an EC2 instance, Packer will check the - instance metadata for IAM role keys. +- `secret_key` (string) - The secret key used to communicate with AWS. If not + specified, Packer will use the secret from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or + `AWS_SECRET_KEY` (in that order), if set. If the environmental variables + aren't set and Packer is running on an EC2 instance, Packer will check the + instance metadata for IAM role keys. -- `source_ami` (string) - The source AMI whose root volume will be copied and - provisioned on the currently running instance. This must be an EBS-backed AMI - with a root volume snapshot that you have access to. +- `source_ami` (string) - The source AMI whose root volume will be copied and + provisioned on the currently running instance. This must be an EBS-backed + AMI with a root volume snapshot that you have access to. ### Optional: -- `ami_description` (string) - The description to set for the resulting AMI(s). - By default this description is empty. +- `ami_description` (string) - The description to set for the + resulting AMI(s). By default this description is empty. -- `ami_groups` (array of strings) - A list of groups that have access to launch - the resulting AMI(s). By default no groups have permission to launch the AMI. - `all` will make the AMI publicly accessible. +- `ami_groups` (array of strings) - A list of groups that have access to + launch the resulting AMI(s). By default no groups have permission to launch + the AMI. `all` will make the AMI publicly accessible. -- `ami_product_codes` (array of strings) - A list of product codes to associate - with the AMI. By default no product codes are associated with the AMI. +- `ami_product_codes` (array of strings) - A list of product codes to + associate with the AMI. By default no product codes are associated with + the AMI. -- `ami_regions` (array of strings) - A list of regions to copy the AMI to. Tags - and attributes are copied along with the AMI. AMI copying takes time depending - on the size of the AMI, but will generally take many minutes. +- `ami_regions` (array of strings) - A list of regions to copy the AMI to. + Tags and attributes are copied along with the AMI. AMI copying takes time + depending on the size of the AMI, but will generally take many minutes. -- `ami_users` (array of strings) - A list of account IDs that have access to - launch the resulting AMI(s). By default no additional users other than the - user creating the AMI has permissions to launch it. +- `ami_users` (array of strings) - A list of account IDs that have access to + launch the resulting AMI(s). By default no additional users other than the + user creating the AMI has permissions to launch it. -- `ami_virtualization_type` (string) - The type of virtualization for the AMI - you are building. This option is required to register HVM images. Can be - "paravirtual" (default) or "hvm". +- `ami_virtualization_type` (string) - The type of virtualization for the AMI + you are building. This option is required to register HVM images. Can be + "paravirtual" (default) or "hvm". -- `chroot_mounts` (array of array of strings) - This is a list of additional - devices to mount into the chroot environment. This configuration parameter - requires some additional documentation which is in the "Chroot Mounts" - section below. Please read that section for more information on how to - use this. +- `chroot_mounts` (array of array of strings) - This is a list of additional + devices to mount into the chroot environment. This configuration parameter + requires some additional documentation which is in the "Chroot Mounts" + section below. Please read that section for more information on how to + use this. -- `command_wrapper` (string) - How to run shell commands. This defaults - to "{{.Command}}". This may be useful to set if you want to set environmental - variables or perhaps run it with `sudo` or so on. This is a configuration - template where the `.Command` variable is replaced with the command to be run. +- `command_wrapper` (string) - How to run shell commands. This defaults + to "{{.Command}}". This may be useful to set if you want to set + environmental variables or perhaps run it with `sudo` or so on. This is a + configuration template where the `.Command` variable is replaced with the + command to be run. -- `copy_files` (array of strings) - Paths to files on the running EC2 instance - that will be copied into the chroot environment prior to provisioning. This is - useful, for example, to copy `/etc/resolv.conf` so that DNS lookups work. +- `copy_files` (array of strings) - Paths to files on the running EC2 instance + that will be copied into the chroot environment prior to provisioning. This + is useful, for example, to copy `/etc/resolv.conf` so that DNS lookups work. -- `device_path` (string) - The path to the device where the root volume of the - source AMI will be attached. This defaults to "" (empty string), which forces - Packer to find an open device automatically. +- `device_path` (string) - The path to the device where the root volume of the + source AMI will be attached. This defaults to "" (empty string), which + forces Packer to find an open device automatically. -- `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) - on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS - IAM policy. +- `enhanced_networking` (boolean) - Enable enhanced + networking (SriovNetSupport) on HVM-compatible AMIs. If true, add + `ec2:ModifyInstanceAttribute` to your AWS IAM policy. -- `force_deregister` (boolean) - Force Packer to first deregister an existing - AMI if one with the same name already exists. Default `false`. +- `force_deregister` (boolean) - Force Packer to first deregister an existing + AMI if one with the same name already exists. Default `false`. -- `mount_path` (string) - The path where the volume will be mounted. This is - where the chroot environment will be. This defaults to - `packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration template - where the `.Device` variable is replaced with the name of the device where the - volume is attached. +- `mount_path` (string) - The path where the volume will be mounted. This is + where the chroot environment will be. This defaults to + `packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration template + where the `.Device` variable is replaced with the name of the device where + the volume is attached. -- `mount_options` (array of strings) - Options to supply the `mount` command - when mounting devices. Each option will be prefixed with `-o` and supplied to - the `mount` command ran by Packer. Because this command is ran in a shell, - user discrestion is advised. See [this manual page for the mount - command](http://linuxcommand.org/man_pages/mount8.html) for valid file system - specific options +- `mount_options` (array of strings) - Options to supply the `mount` command + when mounting devices. Each option will be prefixed with `-o` and supplied + to the `mount` command ran by Packer. Because this command is ran in a + shell, user discrestion is advised. See [this manual page for the mount + command](http://linuxcommand.org/man_pages/mount8.html) for valid file + system specific options -- `root_volume_size` (integer) - The size of the root volume for the chroot - environment, and the resulting AMI +- `root_volume_size` (integer) - The size of the root volume for the chroot + environment, and the resulting AMI -- `tags` (object of key/value strings) - Tags applied to the AMI. +- `tags` (object of key/value strings) - Tags applied to the AMI. ## Basic Example @@ -173,11 +175,11 @@ The `chroot_mounts` configuration can be used to mount additional devices within the chroot. By default, the following additional mounts are added into the chroot by Packer: -- `/proc` (proc) -- `/sys` (sysfs) -- `/dev` (bind to real `/dev`) -- `/dev/pts` (devpts) -- `/proc/sys/fs/binfmt_misc` (binfmt\_misc) +- `/proc` (proc) +- `/sys` (sysfs) +- `/dev` (bind to real `/dev`) +- `/dev/pts` (devpts) +- `/proc/sys/fs/binfmt_misc` (binfmt\_misc) These default mounts are usually good enough for anyone and are sane defaults. However, if you want to change or add the mount points, you may using the @@ -195,12 +197,12 @@ However, if you want to change or add the mount points, you may using the `chroot_mounts` is a list of a 3-tuples of strings. The three components of the 3-tuple, in order, are: -- The filesystem type. If this is "bind", then Packer will properly bind the - filesystem to another mount point. +- The filesystem type. If this is "bind", then Packer will properly bind the + filesystem to another mount point. -- The source device. +- The source device. -- The mount directory. +- The mount directory. ## Parallelism diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index cb6b7c9d5..34b84a06b 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -40,162 +40,164 @@ builder. ### Required: -- `access_key` (string) - The access key used to communicate with AWS. If not - specified, Packer will use the key from any - [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - file or fall back to environment variables `AWS_ACCESS_KEY_ID` or - `AWS_ACCESS_KEY` (in that order), if set. +- `access_key` (string) - The access key used to communicate with AWS. If not + specified, Packer will use the key from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_ACCESS_KEY_ID` or + `AWS_ACCESS_KEY` (in that order), if set. -- `ami_name` (string) - The name of the resulting AMI that will appear when - managing AMIs in the AWS console or via APIs. This must be unique. To help - make this unique, use a function like `timestamp` (see [configuration - templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -- `instance_type` (string) - The EC2 instance type to use while building the - AMI, such as "m1.small". +- `instance_type` (string) - The EC2 instance type to use while building the + AMI, such as "m1.small". -- `region` (string) - The name of the region, such as "us-east-1", in which to - launch the EC2 instance to create the AMI. +- `region` (string) - The name of the region, such as "us-east-1", in which to + launch the EC2 instance to create the AMI. -- `secret_key` (string) - The secret key used to communicate with AWS. If not - specified, Packer will use the secret from any - [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or - `AWS_SECRET_KEY` (in that order), if set. +- `secret_key` (string) - The secret key used to communicate with AWS. If not + specified, Packer will use the secret from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or + `AWS_SECRET_KEY` (in that order), if set. -- `source_ami` (string) - The initial AMI used as a base for the newly - created machine. +- `source_ami` (string) - The initial AMI used as a base for the newly + created machine. -- `ssh_username` (string) - The username to use in order to communicate over SSH - to the running machine. +- `ssh_username` (string) - The username to use in order to communicate over + SSH to the running machine. ### Optional: -- `ami_block_device_mappings` (array of block device mappings) - Add the block - device mappings to the AMI. The block device mappings allow for keys: +- `ami_block_device_mappings` (array of block device mappings) - Add the block + device mappings to the AMI. The block device mappings allow for keys: - - `device_name` (string) - The device name exposed to the instance (for +- `device_name` (string) - The device name exposed to the instance (for example, "/dev/sdh" or "xvdh") - - `virtual_name` (string) - The virtual device name. See the documentation on +- `virtual_name` (string) - The virtual device name. See the documentation on [Block Device Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) for more information - - `snapshot_id` (string) - The ID of the snapshot - - `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) +- `snapshot_id` (string) - The ID of the snapshot +- `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes - - `volume_size` (integer) - The size of the volume, in GiB. Required if not +- `volume_size` (integer) - The size of the volume, in GiB. Required if not specifying a `snapshot_id` - - `delete_on_termination` (boolean) - Indicates whether the EBS volume is +- `delete_on_termination` (boolean) - Indicates whether the EBS volume is deleted on instance termination - - `encrypted` (boolean) - Indicates whether to encrypt the volume or not - - `no_device` (boolean) - Suppresses the specified device included in the +- `encrypted` (boolean) - Indicates whether to encrypt the volume or not +- `no_device` (boolean) - Suppresses the specified device included in the block device mapping of the AMI - - `iops` (integer) - The number of I/O operations per second (IOPS) that the +- `iops` (integer) - The number of I/O operations per second (IOPS) that the volume supports. See the documentation on [IOPs](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) for more information -- `ami_description` (string) - The description to set for the resulting AMI(s). - By default this description is empty. +- `ami_description` (string) - The description to set for the + resulting AMI(s). By default this description is empty. -- `ami_groups` (array of strings) - A list of groups that have access to launch - the resulting AMI(s). By default no groups have permission to launch the AMI. - `all` will make the AMI publicly accessible. AWS currently doesn't accept any - value other than "all". +- `ami_groups` (array of strings) - A list of groups that have access to + launch the resulting AMI(s). By default no groups have permission to launch + the AMI. `all` will make the AMI publicly accessible. AWS currently doesn't + accept any value other than "all". -- `ami_product_codes` (array of strings) - A list of product codes to associate - with the AMI. By default no product codes are associated with the AMI. +- `ami_product_codes` (array of strings) - A list of product codes to + associate with the AMI. By default no product codes are associated with + the AMI. -- `ami_regions` (array of strings) - A list of regions to copy the AMI to. Tags - and attributes are copied along with the AMI. AMI copying takes time depending - on the size of the AMI, but will generally take many minutes. +- `ami_regions` (array of strings) - A list of regions to copy the AMI to. + Tags and attributes are copied along with the AMI. AMI copying takes time + depending on the size of the AMI, but will generally take many minutes. -- `ami_users` (array of strings) - A list of account IDs that have access to - launch the resulting AMI(s). By default no additional users other than the - user creating the AMI has permissions to launch it. +- `ami_users` (array of strings) - A list of account IDs that have access to + launch the resulting AMI(s). By default no additional users other than the + user creating the AMI has permissions to launch it. -- `associate_public_ip_address` (boolean) - If using a non-default VPC, public - IP addresses are not provided by default. If this is toggled, your new - instance will get a Public IP. +- `associate_public_ip_address` (boolean) - If using a non-default VPC, public + IP addresses are not provided by default. If this is toggled, your new + instance will get a Public IP. -- `availability_zone` (string) - Destination availability zone to launch - instance in. Leave this empty to allow Amazon to auto-assign. +- `availability_zone` (string) - Destination availability zone to launch + instance in. Leave this empty to allow Amazon to auto-assign. -- `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) - on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS - IAM policy. +- `enhanced_networking` (boolean) - Enable enhanced + networking (SriovNetSupport) on HVM-compatible AMIs. If true, add + `ec2:ModifyInstanceAttribute` to your AWS IAM policy. -- `force_deregister` (boolean) - Force Packer to first deregister an existing - AMI if one with the same name already exists. Default `false`. +- `force_deregister` (boolean) - Force Packer to first deregister an existing + AMI if one with the same name already exists. Default `false`. -- `iam_instance_profile` (string) - The name of an [IAM instance - profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) - to launch the EC2 instance with. +- `iam_instance_profile` (string) - The name of an [IAM instance + profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) + to launch the EC2 instance with. -- `launch_block_device_mappings` (array of block device mappings) - Add the - block device mappings to the launch instance. The block device mappings are - the same as `ami_block_device_mappings` above. +- `launch_block_device_mappings` (array of block device mappings) - Add the + block device mappings to the launch instance. The block device mappings are + the same as `ami_block_device_mappings` above. -- `run_tags` (object of key/value strings) - Tags to apply to the instance that - is *launched* to create the AMI. These tags are *not* applied to the resulting - AMI unless they're duplicated in `tags`. +- `run_tags` (object of key/value strings) - Tags to apply to the instance + that is *launched* to create the AMI. These tags are *not* applied to the + resulting AMI unless they're duplicated in `tags`. -- `security_group_id` (string) - The ID (*not* the name) of the security group - to assign to the instance. By default this is not set and Packer will - automatically create a new temporary security group to allow SSH access. Note - that if this is specified, you must be sure the security group allows access - to the `ssh_port` given below. +- `security_group_id` (string) - The ID (*not* the name) of the security group + to assign to the instance. By default this is not set and Packer will + automatically create a new temporary security group to allow SSH access. + Note that if this is specified, you must be sure the security group allows + access to the `ssh_port` given below. -- `security_group_ids` (array of strings) - A list of security groups as - described above. Note that if this is specified, you must omit the - `security_group_id`. +- `security_group_ids` (array of strings) - A list of security groups as + described above. Note that if this is specified, you must omit the + `security_group_id`. -- `spot_price` (string) - The maximum hourly price to pay for a spot instance to - create the AMI. Spot instances are a type of instance that EC2 starts when the - current spot price is less than the maximum price you specify. Spot price will - be updated based on available spot instance capacity and current spot - instance requests. It may save you some costs. You can set this to "auto" for - Packer to automatically discover the best spot price. +- `spot_price` (string) - The maximum hourly price to pay for a spot instance + to create the AMI. Spot instances are a type of instance that EC2 starts + when the current spot price is less than the maximum price you specify. Spot + price will be updated based on available spot instance capacity and current + spot instance requests. It may save you some costs. You can set this to + "auto" for Packer to automatically discover the best spot price. -- `spot_price_auto_product` (string) - Required if `spot_price` is set - to "auto". This tells Packer what sort of AMI you're launching to find the - best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, - `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +- `spot_price_auto_product` (string) - Required if `spot_price` is set + to "auto". This tells Packer what sort of AMI you're launching to find the + best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, + `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` -- `ssh_keypair_name` (string) - If specified, this is the key that will be used - for SSH with the machine. By default, this is blank, and Packer will generate - a temporary keypair. `ssh_private_key_file` must be specified with this. +- `ssh_keypair_name` (string) - If specified, this is the key that will be + used for SSH with the machine. By default, this is blank, and Packer will + generate a temporary keypair. `ssh_private_key_file` must be specified + with this. -- `ssh_private_ip` (boolean) - If true, then SSH will always use the private IP - if available. +- `ssh_private_ip` (boolean) - If true, then SSH will always use the private + IP if available. -- `subnet_id` (string) - If using VPC, the ID of the subnet, such as - "subnet-12345def", where Packer will launch the EC2 instance. This field is - required if you are using an non-default VPC. +- `subnet_id` (string) - If using VPC, the ID of the subnet, such as + "subnet-12345def", where Packer will launch the EC2 instance. This field is + required if you are using an non-default VPC. -- `tags` (object of key/value strings) - Tags applied to the AMI and - relevant snapshots. +- `tags` (object of key/value strings) - Tags applied to the AMI and + relevant snapshots. -- `temporary_key_pair_name` (string) - The name of the temporary keypair - to generate. By default, Packer generates a name with a UUID. +- `temporary_key_pair_name` (string) - The name of the temporary keypair + to generate. By default, Packer generates a name with a UUID. -- `token` (string) - The access token to use. This is different from the access - key and secret key. If you're not sure what this is, then you probably don't - need it. This will also be read from the `AWS_SECURITY_TOKEN` - environmental variable. +- `token` (string) - The access token to use. This is different from the + access key and secret key. If you're not sure what this is, then you + probably don't need it. This will also be read from the `AWS_SECURITY_TOKEN` + environmental variable. -- `user_data` (string) - User data to apply when launching the instance. Note - that you need to be careful about escaping characters due to the templates - being JSON. It is often more convenient to use `user_data_file`, instead. +- `user_data` (string) - User data to apply when launching the instance. Note + that you need to be careful about escaping characters due to the templates + being JSON. It is often more convenient to use `user_data_file`, instead. -- `user_data_file` (string) - Path to a file that will be used for the user data - when launching the instance. +- `user_data_file` (string) - Path to a file that will be used for the user + data when launching the instance. -- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID in - order to create a temporary security group within the VPC. +- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID + in order to create a temporary security group within the VPC. -- `windows_password_timeout` (string) - The timeout for waiting for a Windows - password for Windows instances. Defaults to 20 minutes. Example value: "10m" +- `windows_password_timeout` (string) - The timeout for waiting for a Windows + password for Windows instances. Defaults to 20 minutes. Example value: "10m" ## Basic Example diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 5ff36ccf2..3ba627680 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -45,196 +45,199 @@ builder. ### Required: -- `access_key` (string) - The access key used to communicate with AWS. If not - specified, Packer will use the key from any - [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - file or fall back to environment variables `AWS_ACCESS_KEY_ID` or - `AWS_ACCESS_KEY` (in that order), if set. +- `access_key` (string) - The access key used to communicate with AWS. If not + specified, Packer will use the key from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_ACCESS_KEY_ID` or + `AWS_ACCESS_KEY` (in that order), if set. -- `account_id` (string) - Your AWS account ID. This is required for bundling - the AMI. This is *not the same* as the access key. You can find your account - ID in the security credentials page of your AWS account. +- `account_id` (string) - Your AWS account ID. This is required for bundling + the AMI. This is *not the same* as the access key. You can find your account + ID in the security credentials page of your AWS account. -- `ami_name` (string) - The name of the resulting AMI that will appear when - managing AMIs in the AWS console or via APIs. This must be unique. To help - make this unique, use a function like `timestamp` (see [configuration - templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -- `instance_type` (string) - The EC2 instance type to use while building the - AMI, such as "m1.small". +- `instance_type` (string) - The EC2 instance type to use while building the + AMI, such as "m1.small". -- `region` (string) - The name of the region, such as "us-east-1", in which to - launch the EC2 instance to create the AMI. +- `region` (string) - The name of the region, such as "us-east-1", in which to + launch the EC2 instance to create the AMI. -- `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. This - bucket will be created if it doesn't exist. +- `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. This + bucket will be created if it doesn't exist. -- `secret_key` (string) - The secret key used to communicate with AWS. If not - specified, Packer will use the secret from any - [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or - `AWS_SECRET_KEY` (in that order), if set. +- `secret_key` (string) - The secret key used to communicate with AWS. If not + specified, Packer will use the secret from any + [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + file or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or + `AWS_SECRET_KEY` (in that order), if set. -- `source_ami` (string) - The initial AMI used as a base for the newly - created machine. +- `source_ami` (string) - The initial AMI used as a base for the newly + created machine. -- `ssh_username` (string) - The username to use in order to communicate over SSH - to the running machine. +- `ssh_username` (string) - The username to use in order to communicate over + SSH to the running machine. -- `x509_cert_path` (string) - The local path to a valid X509 certificate for - your AWS account. This is used for bundling the AMI. This X509 certificate - must be registered with your account from the security credentials page in the - AWS console. +- `x509_cert_path` (string) - The local path to a valid X509 certificate for + your AWS account. This is used for bundling the AMI. This X509 certificate + must be registered with your account from the security credentials page in + the AWS console. -- `x509_key_path` (string) - The local path to the private key for the X509 - certificate specified by `x509_cert_path`. This is used for bundling the AMI. +- `x509_key_path` (string) - The local path to the private key for the X509 + certificate specified by `x509_cert_path`. This is used for bundling + the AMI. ### Optional: -- `ami_block_device_mappings` (array of block device mappings) - Add the block - device mappings to the AMI. The block device mappings allow for keys: +- `ami_block_device_mappings` (array of block device mappings) - Add the block + device mappings to the AMI. The block device mappings allow for keys: - - `device_name` (string) - The device name exposed to the instance (for +- `device_name` (string) - The device name exposed to the instance (for example, "/dev/sdh" or "xvdh") - - `virtual_name` (string) - The virtual device name. See the documentation on +- `virtual_name` (string) - The virtual device name. See the documentation on [Block Device Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) for more information - - `snapshot_id` (string) - The ID of the snapshot - - `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) +- `snapshot_id` (string) - The ID of the snapshot +- `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes - - `volume_size` (integer) - The size of the volume, in GiB. Required if not +- `volume_size` (integer) - The size of the volume, in GiB. Required if not specifying a `snapshot_id` - - `delete_on_termination` (boolean) - Indicates whether the EBS volume is +- `delete_on_termination` (boolean) - Indicates whether the EBS volume is deleted on instance termination - - `encrypted` (boolean) - Indicates whether to encrypt the volume or not - - `no_device` (boolean) - Suppresses the specified device included in the +- `encrypted` (boolean) - Indicates whether to encrypt the volume or not +- `no_device` (boolean) - Suppresses the specified device included in the block device mapping of the AMI - - `iops` (integer) - The number of I/O operations per second (IOPS) that the +- `iops` (integer) - The number of I/O operations per second (IOPS) that the volume supports. See the documentation on [IOPs](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) for more information -- `ami_description` (string) - The description to set for the resulting AMI(s). - By default this description is empty. +- `ami_description` (string) - The description to set for the + resulting AMI(s). By default this description is empty. -- `ami_groups` (array of strings) - A list of groups that have access to launch - the resulting AMI(s). By default no groups have permission to launch the AMI. - `all` will make the AMI publicly accessible. AWS currently doesn't accept any - value other than "all". +- `ami_groups` (array of strings) - A list of groups that have access to + launch the resulting AMI(s). By default no groups have permission to launch + the AMI. `all` will make the AMI publicly accessible. AWS currently doesn't + accept any value other than "all". -- `ami_product_codes` (array of strings) - A list of product codes to associate - with the AMI. By default no product codes are associated with the AMI. +- `ami_product_codes` (array of strings) - A list of product codes to + associate with the AMI. By default no product codes are associated with + the AMI. -- `ami_regions` (array of strings) - A list of regions to copy the AMI to. Tags - and attributes are copied along with the AMI. AMI copying takes time depending - on the size of the AMI, but will generally take many minutes. +- `ami_regions` (array of strings) - A list of regions to copy the AMI to. + Tags and attributes are copied along with the AMI. AMI copying takes time + depending on the size of the AMI, but will generally take many minutes. -- `ami_users` (array of strings) - A list of account IDs that have access to - launch the resulting AMI(s). By default no additional users other than the - user creating the AMI has permissions to launch it. +- `ami_users` (array of strings) - A list of account IDs that have access to + launch the resulting AMI(s). By default no additional users other than the + user creating the AMI has permissions to launch it. -- `ami_virtualization_type` (string) - The type of virtualization for the AMI - you are building. This option is required to register HVM images. Can be - "paravirtual" (default) or "hvm". +- `ami_virtualization_type` (string) - The type of virtualization for the AMI + you are building. This option is required to register HVM images. Can be + "paravirtual" (default) or "hvm". -- `associate_public_ip_address` (boolean) - If using a non-default VPC, public - IP addresses are not provided by default. If this is toggled, your new - instance will get a Public IP. +- `associate_public_ip_address` (boolean) - If using a non-default VPC, public + IP addresses are not provided by default. If this is toggled, your new + instance will get a Public IP. -- `availability_zone` (string) - Destination availability zone to launch - instance in. Leave this empty to allow Amazon to auto-assign. +- `availability_zone` (string) - Destination availability zone to launch + instance in. Leave this empty to allow Amazon to auto-assign. -- `bundle_destination` (string) - The directory on the running instance where - the bundled AMI will be saved prior to uploading. By default this is "/tmp". - This directory must exist and be writable. +- `bundle_destination` (string) - The directory on the running instance where + the bundled AMI will be saved prior to uploading. By default this is "/tmp". + This directory must exist and be writable. -- `bundle_prefix` (string) - The prefix for files created from bundling the - root volume. By default this is "image-{{timestamp}}". The `timestamp` - variable should be used to make sure this is unique, otherwise it can collide - with other created AMIs by Packer in your account. +- `bundle_prefix` (string) - The prefix for files created from bundling the + root volume. By default this is "image-{{timestamp}}". The `timestamp` + variable should be used to make sure this is unique, otherwise it can + collide with other created AMIs by Packer in your account. -- `bundle_upload_command` (string) - The command to use to upload the - bundled volume. See the "custom bundle commands" section below for - more information. +- `bundle_upload_command` (string) - The command to use to upload the + bundled volume. See the "custom bundle commands" section below for + more information. -- `bundle_vol_command` (string) - The command to use to bundle the volume. See - the "custom bundle commands" section below for more information. +- `bundle_vol_command` (string) - The command to use to bundle the volume. See + the "custom bundle commands" section below for more information. -- `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) - on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS - IAM policy. +- `enhanced_networking` (boolean) - Enable enhanced + networking (SriovNetSupport) on HVM-compatible AMIs. If true, add + `ec2:ModifyInstanceAttribute` to your AWS IAM policy. -- `force_deregister` (boolean) - Force Packer to first deregister an existing - AMI if one with the same name already exists. Default `false`. +- `force_deregister` (boolean) - Force Packer to first deregister an existing + AMI if one with the same name already exists. Default `false`. -- `iam_instance_profile` (string) - The name of an [IAM instance - profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) - to launch the EC2 instance with. +- `iam_instance_profile` (string) - The name of an [IAM instance + profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) + to launch the EC2 instance with. -- `launch_block_device_mappings` (array of block device mappings) - Add the - block device mappings to the launch instance. The block device mappings are - the same as `ami_block_device_mappings` above. +- `launch_block_device_mappings` (array of block device mappings) - Add the + block device mappings to the launch instance. The block device mappings are + the same as `ami_block_device_mappings` above. -- `run_tags` (object of key/value strings) - Tags to apply to the instance that - is *launched* to create the AMI. These tags are *not* applied to the resulting - AMI unless they're duplicated in `tags`. +- `run_tags` (object of key/value strings) - Tags to apply to the instance + that is *launched* to create the AMI. These tags are *not* applied to the + resulting AMI unless they're duplicated in `tags`. -- `security_group_id` (string) - The ID (*not* the name) of the security group - to assign to the instance. By default this is not set and Packer will - automatically create a new temporary security group to allow SSH access. Note - that if this is specified, you must be sure the security group allows access - to the `ssh_port` given below. +- `security_group_id` (string) - The ID (*not* the name) of the security group + to assign to the instance. By default this is not set and Packer will + automatically create a new temporary security group to allow SSH access. + Note that if this is specified, you must be sure the security group allows + access to the `ssh_port` given below. -- `security_group_ids` (array of strings) - A list of security groups as - described above. Note that if this is specified, you must omit the - `security_group_id`. +- `security_group_ids` (array of strings) - A list of security groups as + described above. Note that if this is specified, you must omit the + `security_group_id`. -- `spot_price` (string) - The maximum hourly price to launch a spot instance to - create the AMI. It is a type of instances that EC2 starts when the maximum - price that you specify exceeds the current spot price. Spot price will be - updated based on available spot instance capacity and current spot - Instance requests. It may save you some costs. You can set this to "auto" for - Packer to automatically discover the best spot price. +- `spot_price` (string) - The maximum hourly price to launch a spot instance + to create the AMI. It is a type of instances that EC2 starts when the + maximum price that you specify exceeds the current spot price. Spot price + will be updated based on available spot instance capacity and current spot + Instance requests. It may save you some costs. You can set this to "auto" + for Packer to automatically discover the best spot price. -- `spot_price_auto_product` (string) - Required if `spot_price` is set - to "auto". This tells Packer what sort of AMI you're launching to find the - best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, - `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +- `spot_price_auto_product` (string) - Required if `spot_price` is set + to "auto". This tells Packer what sort of AMI you're launching to find the + best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, + `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` -- `ssh_keypair_name` (string) - If specified, this is the key that will be used - for SSH with the machine. By default, this is blank, and Packer will generate - a temporary keypair. `ssh_private_key_file` must be specified with this. +- `ssh_keypair_name` (string) - If specified, this is the key that will be + used for SSH with the machine. By default, this is blank, and Packer will + generate a temporary keypair. `ssh_private_key_file` must be specified + with this. -- `ssh_private_ip` (boolean) - If true, then SSH will always use the private IP - if available. +- `ssh_private_ip` (boolean) - If true, then SSH will always use the private + IP if available. -- `subnet_id` (string) - If using VPC, the ID of the subnet, such as - "subnet-12345def", where Packer will launch the EC2 instance. This field is - required if you are using an non-default VPC. +- `subnet_id` (string) - If using VPC, the ID of the subnet, such as + "subnet-12345def", where Packer will launch the EC2 instance. This field is + required if you are using an non-default VPC. -- `tags` (object of key/value strings) - Tags applied to the AMI. +- `tags` (object of key/value strings) - Tags applied to the AMI. -- `temporary_key_pair_name` (string) - The name of the temporary keypair - to generate. By default, Packer generates a name with a UUID. +- `temporary_key_pair_name` (string) - The name of the temporary keypair + to generate. By default, Packer generates a name with a UUID. -- `user_data` (string) - User data to apply when launching the instance. Note - that you need to be careful about escaping characters due to the templates - being JSON. It is often more convenient to use `user_data_file`, instead. +- `user_data` (string) - User data to apply when launching the instance. Note + that you need to be careful about escaping characters due to the templates + being JSON. It is often more convenient to use `user_data_file`, instead. -- `user_data_file` (string) - Path to a file that will be used for the user data - when launching the instance. +- `user_data_file` (string) - Path to a file that will be used for the user + data when launching the instance. -- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID in - order to create a temporary security group within the VPC. +- `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID + in order to create a temporary security group within the VPC. -- `x509_upload_path` (string) - The path on the remote machine where the X509 - certificate will be uploaded. This path must already exist and be writable. - X509 certificates are uploaded after provisioning is run, so it is perfectly - okay to create this directory as part of the provisioning process. +- `x509_upload_path` (string) - The path on the remote machine where the X509 + certificate will be uploaded. This path must already exist and be writable. + X509 certificates are uploaded after provisioning is run, so it is perfectly + okay to create this directory as part of the provisioning process. -- `windows_password_timeout` (string) - The timeout for waiting for a Windows - password for Windows instances. Defaults to 20 minutes. Example value: "10m" +- `windows_password_timeout` (string) - The timeout for waiting for a Windows + password for Windows instances. Defaults to 20 minutes. Example value: "10m" ## Basic Example diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index 69b4e509b..b96bfba32 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -12,20 +12,21 @@ Packer is able to create Amazon AMIs. To achieve this, Packer comes with multiple builders depending on the strategy you want to use to build the AMI. Packer supports the following builders at the moment: -- [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs by - launching a source AMI and re-packaging it into a new AMI after provisioning. - If in doubt, use this builder, which is the easiest to get started with. +- [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs by + launching a source AMI and re-packaging it into a new AMI + after provisioning. If in doubt, use this builder, which is the easiest to + get started with. -- [amazon-instance](/docs/builders/amazon-instance.html) - Create instance-store - AMIs by launching and provisioning a source instance, then rebundling it and - uploading it to S3. +- [amazon-instance](/docs/builders/amazon-instance.html) - Create + instance-store AMIs by launching and provisioning a source instance, then + rebundling it and uploading it to S3. -- [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs - from an existing EC2 instance by mounting the root device and using a - [Chroot](http://en.wikipedia.org/wiki/Chroot) environment to provision - that device. This is an **advanced builder and should not be used by - newcomers**. However, it is also the fastest way to build an EBS-backed AMI - since no new EC2 instance needs to be launched. +- [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs + from an existing EC2 instance by mounting the root device and using a + [Chroot](http://en.wikipedia.org/wiki/Chroot) environment to provision + that device. This is an **advanced builder and should not be used by + newcomers**. However, it is also the fastest way to build an EBS-backed AMI + since no new EC2 instance needs to be launched. -> **Don't know which builder to use?** If in doubt, use the [amazon-ebs builder](/docs/builders/amazon-ebs.html). It is much easier to use and Amazon diff --git a/website/source/docs/builders/digitalocean.html.markdown b/website/source/docs/builders/digitalocean.html.markdown index b20523944..b5657ce9d 100644 --- a/website/source/docs/builders/digitalocean.html.markdown +++ b/website/source/docs/builders/digitalocean.html.markdown @@ -34,41 +34,43 @@ builder. ### Required: -- `api_token` (string) - The client TOKEN to use to access your account. It can - also be specified via environment variable `DIGITALOCEAN_API_TOKEN`, if set. +- `api_token` (string) - The client TOKEN to use to access your account. It + can also be specified via environment variable `DIGITALOCEAN_API_TOKEN`, + if set. -- `image` (string) - The name (or slug) of the base image to use. This is the - image that will be used to launch a new droplet and provision it. See - https://developers.digitalocean.com/documentation/v2/\#list-all-images for - details on how to get a list of the the accepted image names/slugs. +- `image` (string) - The name (or slug) of the base image to use. This is the + image that will be used to launch a new droplet and provision it. See + https://developers.digitalocean.com/documentation/v2/\#list-all-images for + details on how to get a list of the the accepted image names/slugs. -- `region` (string) - The name (or slug) of the region to launch the droplet in. - Consequently, this is the region where the snapshot will be available. See - https://developers.digitalocean.com/documentation/v2/\#list-all-regions for - the accepted region names/slugs. +- `region` (string) - The name (or slug) of the region to launch the + droplet in. Consequently, this is the region where the snapshot will + be available. See + https://developers.digitalocean.com/documentation/v2/\#list-all-regions for + the accepted region names/slugs. -- `size` (string) - The name (or slug) of the droplet size to use. See - https://developers.digitalocean.com/documentation/v2/\#list-all-sizes for the - accepted size names/slugs. +- `size` (string) - The name (or slug) of the droplet size to use. See + https://developers.digitalocean.com/documentation/v2/\#list-all-sizes for + the accepted size names/slugs. ### Optional: -- `droplet_name` (string) - The name assigned to the droplet. DigitalOcean sets - the hostname of the machine to this value. +- `droplet_name` (string) - The name assigned to the droplet. DigitalOcean + sets the hostname of the machine to this value. -- `private_networking` (boolean) - Set to `true` to enable private networking - for the droplet being created. This defaults to `false`, or not enabled. +- `private_networking` (boolean) - Set to `true` to enable private networking + for the droplet being created. This defaults to `false`, or not enabled. -- `snapshot_name` (string) - The name of the resulting snapshot that will appear - in your account. This must be unique. To help make this unique, use a function - like `timestamp` (see [configuration - templates](/docs/templates/configuration-templates.html) for more info) +- `snapshot_name` (string) - The name of the resulting snapshot that will + appear in your account. This must be unique. To help make this unique, use a + function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -- `state_timeout` (string) - The time to wait, as a duration string, for a - droplet to enter a desired state (such as "active") before timing out. The - default state timeout is "6m". +- `state_timeout` (string) - The time to wait, as a duration string, for a + droplet to enter a desired state (such as "active") before timing out. The + default state timeout is "6m". -- `user_data` (string) - User data to launch with the Droplet. +- `user_data` (string) - User data to launch with the Droplet. ## Basic Example diff --git a/website/source/docs/builders/docker.html.markdown b/website/source/docs/builders/docker.html.markdown index b2fab5b19..76b1d4057 100644 --- a/website/source/docs/builders/docker.html.markdown +++ b/website/source/docs/builders/docker.html.markdown @@ -68,42 +68,42 @@ builder. ### Required: -- `commit` (boolean) - If true, the container will be committed to an image - rather than exported. This cannot be set if `export_path` is set. +- `commit` (boolean) - If true, the container will be committed to an image + rather than exported. This cannot be set if `export_path` is set. -- `export_path` (string) - The path where the final container will be exported - as a tar file. This cannot be set if `commit` is set to true. +- `export_path` (string) - The path where the final container will be exported + as a tar file. This cannot be set if `commit` is set to true. -- `image` (string) - The base image for the Docker container that will - be started. This image will be pulled from the Docker registry if it doesn't - already exist. +- `image` (string) - The base image for the Docker container that will + be started. This image will be pulled from the Docker registry if it doesn't + already exist. ### Optional: -- `login` (boolean) - Defaults to false. If true, the builder will login in - order to pull the image. The builder only logs in for the duration of - the pull. It always logs out afterwards. +- `login` (boolean) - Defaults to false. If true, the builder will login in + order to pull the image. The builder only logs in for the duration of + the pull. It always logs out afterwards. -- `login_email` (string) - The email to use to authenticate to login. +- `login_email` (string) - The email to use to authenticate to login. -- `login_username` (string) - The username to use to authenticate to login. +- `login_username` (string) - The username to use to authenticate to login. -- `login_password` (string) - The password to use to authenticate to login. +- `login_password` (string) - The password to use to authenticate to login. -- `login_server` (string) - The server address to login to. +- `login_server` (string) - The server address to login to. -- `pull` (boolean) - If true, the configured image will be pulled using - `docker pull` prior to use. Otherwise, it is assumed the image already exists - and can be used. This defaults to true if not set. +- `pull` (boolean) - If true, the configured image will be pulled using + `docker pull` prior to use. Otherwise, it is assumed the image already + exists and can be used. This defaults to true if not set. -- `run_command` (array of strings) - An array of arguments to pass to - `docker run` in order to run the container. By default this is set to - `["-d", "-i", "-t", "{{.Image}}", "/bin/bash"]`. As you can see, you have a - couple template variables to customize, as well. +- `run_command` (array of strings) - An array of arguments to pass to + `docker run` in order to run the container. By default this is set to + `["-d", "-i", "-t", "{{.Image}}", "/bin/bash"]`. As you can see, you have a + couple template variables to customize, as well. -- `volumes` (map of strings to strings) - A mapping of additional volumes to - mount into this container. The key of the object is the host path, the value - is the container path. +- `volumes` (map of strings to strings) - A mapping of additional volumes to + mount into this container. The key of the object is the host path, the value + is the container path. ## Using the Artifact: Export @@ -226,11 +226,11 @@ Dockerfiles have some additional features that Packer doesn't support which are able to be worked around. Many of these features will be automated by Packer in the future: -- Dockerfiles will snapshot the container at each step, allowing you to go back - to any step in the history of building. Packer doesn't do this yet, but - inter-step snapshotting is on the way. +- Dockerfiles will snapshot the container at each step, allowing you to go + back to any step in the history of building. Packer doesn't do this yet, but + inter-step snapshotting is on the way. -- Dockerfiles can contain information such as exposed ports, shared volumes, and - other metadata. Packer builds a raw Docker container image that has none of - this metadata. You can pass in much of this metadata at runtime with - `docker run`. +- Dockerfiles can contain information such as exposed ports, shared volumes, + and other metadata. Packer builds a raw Docker container image that has none + of this metadata. You can pass in much of this metadata at runtime with + `docker run`. diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index 409275c7b..01eb3c7e1 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -38,67 +38,67 @@ builder. ### Required: -- `flavor` (string) - The ID, name, or full URL for the desired flavor for the - server to be created. +- `flavor` (string) - The ID, name, or full URL for the desired flavor for the + server to be created. -- `image_name` (string) - The name of the resulting image. +- `image_name` (string) - The name of the resulting image. -- `source_image` (string) - The ID or full URL to the base image to use. This is - the image that will be used to launch a new server and provision it. Unless - you specify completely custom SSH settings, the source image must have - `cloud-init` installed so that the keypair gets assigned properly. +- `source_image` (string) - The ID or full URL to the base image to use. This + is the image that will be used to launch a new server and provision it. + Unless you specify completely custom SSH settings, the source image must + have `cloud-init` installed so that the keypair gets assigned properly. -- `username` (string) - The username used to connect to the OpenStack service. - If not specified, Packer will use the environment variable `OS_USERNAME`, - if set. +- `username` (string) - The username used to connect to the OpenStack service. + If not specified, Packer will use the environment variable `OS_USERNAME`, + if set. -- `password` (string) - The password used to connect to the OpenStack service. - If not specified, Packer will use the environment variables `OS_PASSWORD`, - if set. +- `password` (string) - The password used to connect to the OpenStack service. + If not specified, Packer will use the environment variables `OS_PASSWORD`, + if set. ### Optional: -- `api_key` (string) - The API key used to access OpenStack. Some OpenStack - installations require this. +- `api_key` (string) - The API key used to access OpenStack. Some OpenStack + installations require this. -- `availability_zone` (string) - The availability zone to launch the server in. - If this isn't specified, the default enforced by your OpenStack cluster will - be used. This may be required for some OpenStack clusters. +- `availability_zone` (string) - The availability zone to launch the + server in. If this isn't specified, the default enforced by your OpenStack + cluster will be used. This may be required for some OpenStack clusters. -- `floating_ip` (string) - A specific floating IP to assign to this instance. - `use_floating_ip` must also be set to true for this to have an affect. +- `floating_ip` (string) - A specific floating IP to assign to this instance. + `use_floating_ip` must also be set to true for this to have an affect. -- `floating_ip_pool` (string) - The name of the floating IP pool to use to - allocate a floating IP. `use_floating_ip` must also be set to true for this to - have an affect. +- `floating_ip_pool` (string) - The name of the floating IP pool to use to + allocate a floating IP. `use_floating_ip` must also be set to true for this + to have an affect. -- `insecure` (boolean) - Whether or not the connection to OpenStack can be done - over an insecure connection. By default this is false. +- `insecure` (boolean) - Whether or not the connection to OpenStack can be + done over an insecure connection. By default this is false. -- `networks` (array of strings) - A list of networks by UUID to attach to - this instance. +- `networks` (array of strings) - A list of networks by UUID to attach to + this instance. -- `tenant_id` or `tenant_name` (string) - The tenant ID or name to boot the - instance into. Some OpenStack installations require this. If not specified, - Packer will use the environment variable `OS_TENANT_NAME`, if set. +- `tenant_id` or `tenant_name` (string) - The tenant ID or name to boot the + instance into. Some OpenStack installations require this. If not specified, + Packer will use the environment variable `OS_TENANT_NAME`, if set. -- `security_groups` (array of strings) - A list of security groups by name to - add to this instance. +- `security_groups` (array of strings) - A list of security groups by name to + add to this instance. -- `region` (string) - The name of the region, such as "DFW", in which to launch - the server to create the AMI. If not specified, Packer will use the - environment variable `OS_REGION_NAME`, if set. +- `region` (string) - The name of the region, such as "DFW", in which to + launch the server to create the AMI. If not specified, Packer will use the + environment variable `OS_REGION_NAME`, if set. -- `ssh_interface` (string) - The type of interface to connect via SSH. Values - useful for Rackspace are "public" or "private", and the default behavior is to - connect via whichever is returned first from the OpenStack API. +- `ssh_interface` (string) - The type of interface to connect via SSH. Values + useful for Rackspace are "public" or "private", and the default behavior is + to connect via whichever is returned first from the OpenStack API. -- `use_floating_ip` (boolean) - Whether or not to use a floating IP for - the instance. Defaults to false. +- `use_floating_ip` (boolean) - Whether or not to use a floating IP for + the instance. Defaults to false. -- `rackconnect_wait` (boolean) - For rackspace, whether or not to wait for - Rackconnect to assign the machine an IP address before connecting via SSH. - Defaults to false. +- `rackconnect_wait` (boolean) - For rackspace, whether or not to wait for + Rackconnect to assign the machine an IP address before connecting via SSH. + Defaults to false. ## Basic Example: Rackspace public cloud @@ -138,7 +138,7 @@ appear in the template. That is because I source a standard OpenStack script with environment variables set before I run this. This script is setting environment variables like: -- `OS_AUTH_URL` -- `OS_TENANT_ID` -- `OS_USERNAME` -- `OS_PASSWORD` +- `OS_AUTH_URL` +- `OS_TENANT_ID` +- `OS_USERNAME` +- `OS_PASSWORD` diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index d89b5394f..76278ec2b 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -56,146 +56,149 @@ builder. ### Required: -- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files - are so large, this is required and Packer will verify it prior to booting a - virtual machine with the ISO attached. The type of the checksum is specified - with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO + files are so large, this is required and Packer will verify it prior to + booting a virtual machine with the ISO attached. The type of the checksum is + specified with `iso_checksum_type`, documented below. -- `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or - "sha512" currently. While "none" will skip checksumming, this is not - recommended since ISO files are generally large and corruption does happen - from time to time. +- `iso_checksum_type` (string) - The type of the checksum specified in + `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or + "sha512" currently. While "none" will skip checksumming, this is not + recommended since ISO files are generally large and corruption does happen + from time to time. -- `iso_url` (string) - A URL to the ISO containing the installation image. This - URL can be either an HTTP URL or a file URL (or path to a file). If this is an - HTTP URL, Packer will download it and cache it between runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. + This URL can be either an HTTP URL or a file URL (or path to a file). If + this is an HTTP URL, Packer will download it and cache it between runs. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. -- `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to - install into the VM. Valid values are "win", "lin", "mac", "os2" and "other". - This can be omitted only if `parallels_tools_mode` is "disable". +- `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to + install into the VM. Valid values are "win", "lin", "mac", "os2" + and "other". This can be omitted only if `parallels_tools_mode` + is "disable". ### Optional: -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `disk_size` (integer) - The size, in megabytes, of the hard disk to create for - the VM. By default, this is 40000 (about 40 GB). +- `disk_size` (integer) - The size, in megabytes, of the hard disk to create + for the VM. By default, this is 40000 (about 40 GB). -- `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful for - unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default, no floppy will be attached. All files listed in - this setting get placed into the root directory of the floppy and the floppy - is attached as the first floppy device. Currently, no support exists for - creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) - are allowed. Directory names are also allowed, which will add all the files - found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, + and \[\]) are allowed. Directory names are also allowed, which will add all + the files found in the directory to the floppy. -- `guest_os_type` (string) - The guest OS type being installed. By default this - is "other", but you can get *dramatic* performance improvements by setting - this to the proper value. To view all available values for this run - `prlctl create x --distribution list`. Setting the correct value hints to - Parallels Desktop how to optimize the virtual hardware to work best with that - operating system. +- `guest_os_type` (string) - The guest OS type being installed. By default + this is "other", but you can get *dramatic* performance improvements by + setting this to the proper value. To view all available values for this run + `prlctl create x --distribution list`. Setting the correct value hints to + Parallels Desktop how to optimize the virtual hardware to work best with + that operating system. -- `hard_drive_interface` (string) - The type of controller that the hard drives - are attached to, defaults to "sata". Valid options are "sata", "ide", - and "scsi". +- `hard_drive_interface` (string) - The type of controller that the hard + drives are attached to, defaults to "sata". Valid options are "sata", "ide", + and "scsi". -- `host_interfaces` (array of strings) - A list of which interfaces on the host - should be searched for a IP address. The first IP address found on one of - these will be used as `{{ .HTTPIP }}` in the `boot_command`. Defaults to - \["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9", - "ppp0", "ppp1", "ppp2"\]. +- `host_interfaces` (array of strings) - A list of which interfaces on the + host should be searched for a IP address. The first IP address found on one + of these will be used as `{{ .HTTPIP }}` in the `boot_command`. Defaults to + \["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9", + "ppp0", "ppp1", "ppp2"\]. -- `http_directory` (string) - Path to a directory to serve using an HTTP server. - The files in this directory will be available over HTTP that will be - requestable from the virtual machine. This is useful for hosting kickstart - files and so on. By default this is "", which means no HTTP server will - be started. The address and port of the HTTP server will be available as - variables in `boot_command`. This is covered in more detail below. +- `http_directory` (string) - Path to a directory to serve using an + HTTP server. The files in this directory will be available over HTTP that + will be requestable from the virtual machine. This is useful for hosting + kickstart files and so on. By default this is "", which means no HTTP server + will be started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. -- `http_port_min` and `http_port_max` (integer) - These are the minimum and - maximum port to use for the HTTP server started to serve the `http_directory`. - Because Packer often runs in parallel, Packer will choose a randomly available - port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. By - default the values are 8000 and 9000, respectively. +- `http_port_min` and `http_port_max` (integer) - These are the minimum and + maximum port to use for the HTTP server started to serve the + `http_directory`. Because Packer often runs in parallel, Packer will choose + a randomly available port in this range to run the HTTP server. If you want + to force the HTTP server to be on one port, make this minimum and maximum + port the same. By default the values are 8000 and 9000, respectively. -- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer - will try these in order. If anything goes wrong attempting to download or - while downloading a single URL, it will move on to the next. All URLs must - point to the same file (same checksum). By default this is empty and `iso_url` - is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. + Packer will try these in order. If anything goes wrong attempting to + download or while downloading a single URL, it will move on to the next. All + URLs must point to the same file (same checksum). By default this is empty + and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `parallels_tools_guest_path` (string) - The path in the virtual machine to - upload Parallels Tools. This only takes effect if `parallels_tools_mode` - is "upload". This is a [configuration - template](/docs/templates/configuration-templates.html) that has a single - valid variable: `Flavor`, which will be the value of `parallels_tools_flavor`. - By default this is "prl-tools-{{.Flavor}}.iso" which should upload into the - login directory of the user. +- `parallels_tools_guest_path` (string) - The path in the virtual machine to + upload Parallels Tools. This only takes effect if `parallels_tools_mode` + is "upload". This is a [configuration + template](/docs/templates/configuration-templates.html) that has a single + valid variable: `Flavor`, which will be the value of + `parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso" + which should upload into the login directory of the user. -- `parallels_tools_mode` (string) - The method by which Parallels Tools are made - available to the guest for installation. Valid options are "upload", "attach", - or "disable". If the mode is "attach" the Parallels Tools ISO will be attached - as a CD device to the virtual machine. If the mode is "upload" the Parallels - Tools ISO will be uploaded to the path specified by - `parallels_tools_guest_path`. The default value is "upload". +- `parallels_tools_mode` (string) - The method by which Parallels Tools are + made available to the guest for installation. Valid options are "upload", + "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will + be attached as a CD device to the virtual machine. If the mode is "upload" + the Parallels Tools ISO will be uploaded to the path specified by + `parallels_tools_guest_path`. The default value is "upload". -- `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in - order to further customize the virtual machine being created. The value of - this is an array of commands to execute. The commands are executed in the - order defined in the template. For each command, the command is defined itself - as an array of strings, where each string represents a single argument on the - command-line to `prlctl` (but excluding `prlctl` itself). Each arg is treated - as a [configuration template](/docs/templates/configuration-templates.html), - where the `Name` variable is replaced with the VM name. More details on how to - use `prlctl` are below. +- `prlctl` (array of array of strings) - Custom `prlctl` commands to execute + in order to further customize the virtual machine being created. The value + of this is an array of commands to execute. The commands are executed in the + order defined in the template. For each command, the command is defined + itself as an array of strings, where each string represents a single + argument on the command-line to `prlctl` (but excluding `prlctl` itself). + Each arg is treated as a [configuration + template](/docs/templates/configuration-templates.html), where the `Name` + variable is replaced with the VM name. More details on how to use `prlctl` + are below. -- `prlctl_post` (array of array of strings) - Identical to `prlctl`, except that - it is run after the virtual machine is shutdown, and before the virtual - machine is exported. +- `prlctl_post` (array of array of strings) - Identical to `prlctl`, except + that it is run after the virtual machine is shutdown, and before the virtual + machine is exported. -- `prlctl_version_file` (string) - The path within the virtual machine to upload - a file that contains the `prlctl` version that was used to create the machine. - This information can be useful for provisioning. By default this is - ".prlctl\_version", which will generally upload it into the home directory. +- `prlctl_version_file` (string) - The path within the virtual machine to + upload a file that contains the `prlctl` version that was used to create + the machine. This information can be useful for provisioning. By default + this is ".prlctl\_version", which will generally upload it into the + home directory. -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `vm_name` (string) - This is the name of the PVM directory for the new virtual - machine, without the file extension. By default this is "packer-BUILDNAME", - where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the PVM directory for the new + virtual machine, without the file extension. By default this is + "packer-BUILDNAME", where "BUILDNAME" is the name of the build. ## Boot Command @@ -214,40 +217,40 @@ simulating a human actually typing the keyboard. There are a set of special keys available. If these are in your boot command, they will be replaced by the proper key: -- `` - Backspace +- `` - Backspace -- `` - Delete +- `` - Delete -- `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -- `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -- `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -- `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -- `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -- `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -- `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -- `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -- `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -- `` `` `` - Adds a 1, 5 or 10 second pause before sending - any additional keys. This is useful if you have to generally wait for the UI - to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before + sending any additional keys. This is useful if you have to generally wait + for the UI to update before typing more. In addition to the special keys, each command to type is treated as a [configuration template](/docs/templates/configuration-templates.html). The available variables are: -- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that - is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will be - blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server + that is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! Example boot command. This is actually a working boot command used to start an Ubuntu 12.04 installer: diff --git a/website/source/docs/builders/parallels-pvm.html.markdown b/website/source/docs/builders/parallels-pvm.html.markdown index f4f9f352c..ce13f2c19 100644 --- a/website/source/docs/builders/parallels-pvm.html.markdown +++ b/website/source/docs/builders/parallels-pvm.html.markdown @@ -53,96 +53,99 @@ builder. ### Required: -- `source_path` (string) - The path to a PVM directory that acts as the source - of this build. +- `source_path` (string) - The path to a PVM directory that acts as the source + of this build. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. -- `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to - install into the VM. Valid values are "win", "lin", "mac", "os2" and "other". - This can be omitted only if `parallels_tools_mode` is "disable". +- `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to + install into the VM. Valid values are "win", "lin", "mac", "os2" + and "other". This can be omitted only if `parallels_tools_mode` + is "disable". ### Optional: -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `floppy_files` (array of strings) - A list of files to put onto a floppy disk - that is attached when the VM is booted for the first time. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default no floppy will be attached. The files listed in - this configuration will all be put into the root directory of the floppy disk; - sub-directories are not supported. +- `floppy_files` (array of strings) - A list of files to put onto a floppy + disk that is attached when the VM is booted for the first time. This is most + useful for unattended Windows installs, which look for an `Autounattend.xml` + file on removable media. By default no floppy will be attached. The files + listed in this configuration will all be put into the root directory of the + floppy disk; sub-directories are not supported. -- `reassign_mac` (boolean) - If this is "false" the MAC address of the first NIC - will reused when imported else a new MAC address will be generated - by Parallels. Defaults to "false". +- `reassign_mac` (boolean) - If this is "false" the MAC address of the first + NIC will reused when imported else a new MAC address will be generated + by Parallels. Defaults to "false". -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `parallels_tools_guest_path` (string) - The path in the VM to upload - Parallels Tools. This only takes effect if `parallels_tools_mode` is "upload". - This is a [configuration - template](/docs/templates/configuration-templates.html) that has a single - valid variable: `Flavor`, which will be the value of `parallels_tools_flavor`. - By default this is "prl-tools-{{.Flavor}}.iso" which should upload into the - login directory of the user. +- `parallels_tools_guest_path` (string) - The path in the VM to upload + Parallels Tools. This only takes effect if `parallels_tools_mode` + is "upload". This is a [configuration + template](/docs/templates/configuration-templates.html) that has a single + valid variable: `Flavor`, which will be the value of + `parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso" + which should upload into the login directory of the user. -- `parallels_tools_mode` (string) - The method by which Parallels Tools are made - available to the guest for installation. Valid options are "upload", "attach", - or "disable". If the mode is "attach" the Parallels Tools ISO will be attached - as a CD device to the virtual machine. If the mode is "upload" the Parallels - Tools ISO will be uploaded to the path specified by - `parallels_tools_guest_path`. The default value is "upload". +- `parallels_tools_mode` (string) - The method by which Parallels Tools are + made available to the guest for installation. Valid options are "upload", + "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will + be attached as a CD device to the virtual machine. If the mode is "upload" + the Parallels Tools ISO will be uploaded to the path specified by + `parallels_tools_guest_path`. The default value is "upload". -- `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in - order to further customize the virtual machine being created. The value of - this is an array of commands to execute. The commands are executed in the - order defined in the template. For each command, the command is defined itself - as an array of strings, where each string represents a single argument on the - command-line to `prlctl` (but excluding `prlctl` itself). Each arg is treated - as a [configuration template](/docs/templates/configuration-templates.html), - where the `Name` variable is replaced with the VM name. More details on how to - use `prlctl` are below. +- `prlctl` (array of array of strings) - Custom `prlctl` commands to execute + in order to further customize the virtual machine being created. The value + of this is an array of commands to execute. The commands are executed in the + order defined in the template. For each command, the command is defined + itself as an array of strings, where each string represents a single + argument on the command-line to `prlctl` (but excluding `prlctl` itself). + Each arg is treated as a [configuration + template](/docs/templates/configuration-templates.html), where the `Name` + variable is replaced with the VM name. More details on how to use `prlctl` + are below. -- `prlctl_post` (array of array of strings) - Identical to `prlctl`, except that - it is run after the virtual machine is shutdown, and before the virtual - machine is exported. +- `prlctl_post` (array of array of strings) - Identical to `prlctl`, except + that it is run after the virtual machine is shutdown, and before the virtual + machine is exported. -- `prlctl_version_file` (string) - The path within the virtual machine to upload - a file that contains the `prlctl` version that was used to create the machine. - This information can be useful for provisioning. By default this is - ".prlctl\_version", which will generally upload it into the home directory. +- `prlctl_version_file` (string) - The path within the virtual machine to + upload a file that contains the `prlctl` version that was used to create + the machine. This information can be useful for provisioning. By default + this is ".prlctl\_version", which will generally upload it into the + home directory. -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `vm_name` (string) - This is the name of the virtual machine when it is - imported as well as the name of the PVM directory when the virtual machine - is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the - name of the build. +- `vm_name` (string) - This is the name of the virtual machine when it is + imported as well as the name of the PVM directory when the virtual machine + is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the + name of the build. ## Parallels Tools @@ -168,31 +171,31 @@ simulating a human actually typing the keyboard. There are a set of special keys available. If these are in your boot command, they will be replaced by the proper key: -- `` - Backspace +- `` - Backspace -- `` - Delete +- `` - Delete -- `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -- `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -- `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -- `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -- `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -- `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -- `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -- `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -- `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -- `` `` `` - Adds a 1, 5 or 10 second pause before sending - any additional keys. This is useful if you have to generally wait for the UI - to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before + sending any additional keys. This is useful if you have to generally wait + for the UI to update before typing more. In addition to the special keys, each command to type is treated as a [configuration template](/docs/templates/configuration-templates.html). The diff --git a/website/source/docs/builders/parallels.html.markdown b/website/source/docs/builders/parallels.html.markdown index 7d355eaef..582f8e0af 100644 --- a/website/source/docs/builders/parallels.html.markdown +++ b/website/source/docs/builders/parallels.html.markdown @@ -16,16 +16,16 @@ Packer actually comes with multiple builders able to create Parallels machines, depending on the strategy you want to use to build the image. Packer supports the following Parallels builders: -- [parallels-iso](/docs/builders/parallels-iso.html) - Starts from an ISO file, - creates a brand new Parallels VM, installs an OS, provisions software within - the OS, then exports that machine to create an image. This is best for people - who want to start from scratch. +- [parallels-iso](/docs/builders/parallels-iso.html) - Starts from an ISO + file, creates a brand new Parallels VM, installs an OS, provisions software + within the OS, then exports that machine to create an image. This is best + for people who want to start from scratch. -- [parallels-pvm](/docs/builders/parallels-pvm.html) - This builder imports an - existing PVM file, runs provisioners on top of that VM, and exports that - machine to create an image. This is best if you have an existing Parallels VM - export you want to use as the source. As an additional benefit, you can feed - the artifact of this builder back into itself to iterate on a machine. +- [parallels-pvm](/docs/builders/parallels-pvm.html) - This builder imports an + existing PVM file, runs provisioners on top of that VM, and exports that + machine to create an image. This is best if you have an existing Parallels + VM export you want to use as the source. As an additional benefit, you can + feed the artifact of this builder back into itself to iterate on a machine. ## Requirements diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index 57c53e4c0..651c69122 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -81,124 +81,124 @@ builder. ### Required: -- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files - are so large, this is required and Packer will verify it prior to booting a - virtual machine with the ISO attached. The type of the checksum is specified - with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO + files are so large, this is required and Packer will verify it prior to + booting a virtual machine with the ISO attached. The type of the checksum is + specified with `iso_checksum_type`, documented below. -- `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum`. Valid values are "md5", "sha1", "sha256", or - "sha512" currently. +- `iso_checksum_type` (string) - The type of the checksum specified in + `iso_checksum`. Valid values are "md5", "sha1", "sha256", or + "sha512" currently. -- `iso_url` (string) - A URL to the ISO containing the installation image. This - URL can be either an HTTP URL or a file URL (or path to a file). If this is an - HTTP URL, Packer will download it and cache it between runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. + This URL can be either an HTTP URL or a file URL (or path to a file). If + this is an HTTP URL, Packer will download it and cache it between runs. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. ### Optional: -- `accelerator` (string) - The accelerator type to use when running the VM. This - may have a value of either "none", "kvm", "tcg", or "xen" and you must have - that support in on the machine on which you run the builder. By default "kvm" - is used. +- `accelerator` (string) - The accelerator type to use when running the VM. + This may have a value of either "none", "kvm", "tcg", or "xen" and you must + have that support in on the machine on which you run the builder. By default + "kvm" is used. -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `disk_cache` (string) - The cache mode to use for disk. Allowed values include - any of "writethrough", "writeback", "none", "unsafe" or "directsync". By - default, this is set to "writeback". +- `disk_cache` (string) - The cache mode to use for disk. Allowed values + include any of "writethrough", "writeback", "none", "unsafe" + or "directsync". By default, this is set to "writeback". -- `disk_discard` (string) - The discard mode to use for disk. Allowed values - include any of "unmap" or "ignore". By default, this is set to "ignore". +- `disk_discard` (string) - The discard mode to use for disk. Allowed values + include any of "unmap" or "ignore". By default, this is set to "ignore". -- `disk_image` (boolean) - Packer defaults to building from an ISO file, this - parameter controls whether the ISO URL supplied is actually a bootable - QEMU image. When this value is set to true, the machine will clone the source, - resize it according to `disk_size` and boot the image. +- `disk_image` (boolean) - Packer defaults to building from an ISO file, this + parameter controls whether the ISO URL supplied is actually a bootable + QEMU image. When this value is set to true, the machine will clone the + source, resize it according to `disk_size` and boot the image. -- `disk_interface` (string) - The interface to use for the disk. Allowed values - include any of "ide," "scsi" or "virtio." Note also that any boot commands or - kickstart type scripts must have proper adjustments for resulting - device names. The Qemu builder uses "virtio" by default. +- `disk_interface` (string) - The interface to use for the disk. Allowed + values include any of "ide," "scsi" or "virtio." Note also that any boot + commands or kickstart type scripts must have proper adjustments for + resulting device names. The Qemu builder uses "virtio" by default. -- `disk_size` (integer) - The size, in megabytes, of the hard disk to create for - the VM. By default, this is 40000 (about 40 GB). +- `disk_size` (integer) - The size, in megabytes, of the hard disk to create + for the VM. By default, this is 40000 (about 40 GB). -- `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful for - unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default, no floppy will be attached. All files listed in - this setting get placed into the root directory of the floppy and the floppy - is attached as the first floppy device. Currently, no support exists for - creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) - are allowed. Directory names are also allowed, which will add all the files - found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, + and \[\]) are allowed. Directory names are also allowed, which will add all + the files found in the directory to the floppy. -- `format` (string) - Either "qcow2" or "raw", this specifies the output format - of the virtual machine image. This defaults to "qcow2". +- `format` (string) - Either "qcow2" or "raw", this specifies the output + format of the virtual machine image. This defaults to "qcow2". -- `headless` (boolean) - Packer defaults to building QEMU virtual machines by - launching a GUI that shows the console of the machine being built. When this - value is set to true, the machine will start without a console. +- `headless` (boolean) - Packer defaults to building QEMU virtual machines by + launching a GUI that shows the console of the machine being built. When this + value is set to true, the machine will start without a console. -- `http_directory` (string) - Path to a directory to serve using an HTTP server. - The files in this directory will be available over HTTP that will be - requestable from the virtual machine. This is useful for hosting kickstart - files and so on. By default this is "", which means no HTTP server will - be started. The address and port of the HTTP server will be available as - variables in `boot_command`. This is covered in more detail below. +- `http_directory` (string) - Path to a directory to serve using an + HTTP server. The files in this directory will be available over HTTP that + will be requestable from the virtual machine. This is useful for hosting + kickstart files and so on. By default this is "", which means no HTTP server + will be started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. -- `http_port_min` and `http_port_max` (integer) - These are the minimum and - maximum port to use for the HTTP server started to serve the `http_directory`. - Because Packer often runs in parallel, Packer will choose a randomly available - port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. By - default the values are 8000 and 9000, respectively. +- `http_port_min` and `http_port_max` (integer) - These are the minimum and + maximum port to use for the HTTP server started to serve the + `http_directory`. Because Packer often runs in parallel, Packer will choose + a randomly available port in this range to run the HTTP server. If you want + to force the HTTP server to be on one port, make this minimum and maximum + port the same. By default the values are 8000 and 9000, respectively. -- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer - will try these in order. If anything goes wrong attempting to download or - while downloading a single URL, it will move on to the next. All URLs must - point to the same file (same checksum). By default this is empty and `iso_url` - is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. + Packer will try these in order. If anything goes wrong attempting to + download or while downloading a single URL, it will move on to the next. All + URLs must point to the same file (same checksum). By default this is empty + and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. -- `machine_type` (string) - The type of machine emulation to use. Run your qemu - binary with the flags `-machine help` to list available types for your system. - This defaults to "pc". +- `machine_type` (string) - The type of machine emulation to use. Run your + qemu binary with the flags `-machine help` to list available types for + your system. This defaults to "pc". -- `net_device` (string) - The driver to use for the network interface. Allowed - values "ne2k\_pci," "i82551," "i82557b," "i82559er," "rtl8139," "e1000," - "pcnet" or "virtio." The Qemu builder uses "virtio" by default. +- `net_device` (string) - The driver to use for the network interface. Allowed + values "ne2k\_pci," "i82551," "i82557b," "i82559er," "rtl8139," "e1000," + "pcnet" or "virtio." The Qemu builder uses "virtio" by default. -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `qemu_binary` (string) - The name of the Qemu binary to look for. This - defaults to "qemu-system-x86\_64", but may need to be changed for - some platforms. For example "qemu-kvm", or "qemu-system-i386" may be a better - choice for some systems. +- `qemu_binary` (string) - The name of the Qemu binary to look for. This + defaults to "qemu-system-x86\_64", but may need to be changed for + some platforms. For example "qemu-kvm", or "qemu-system-i386" may be a + better choice for some systems. -- `qemuargs` (array of array of strings) - Allows complete control over the qemu - command line (though not, at this time, qemu-img). Each array of strings makes - up a command line switch that overrides matching default switch/value pairs. - Any value specified as an empty string is ignored. All values after the switch - are concatenated with no separator. +- `qemuargs` (array of array of strings) - Allows complete control over the + qemu command line (though not, at this time, qemu-img). Each array of + strings makes up a command line switch that overrides matching default + switch/value pairs. Any value specified as an empty string is ignored. All + values after the switch are concatenated with no separator. \~> **Warning:** The qemu command line allows extreme flexibility, so beware of conflicting arguments causing failures of your run. For instance, using @@ -207,7 +207,7 @@ shutdown -P now) to the virtual machine, thus preventing proper shutdown. To see the defaults, look in the packer.log file and search for the qemu-system-x86 command. The arguments are all printed for review. - The following shows a sample usage: +The following shows a sample usage: ``` {.javascript} // ... @@ -225,34 +225,35 @@ command. The arguments are all printed for review. // ... ``` - would produce the following (not including other defaults supplied by the builder and not otherwise conflicting with the qemuargs): +would produce the following (not including other defaults supplied by the +builder and not otherwise conflicting with the qemuargs):
       qemu-system-x86 -m 1024m --no-acpi -netdev user,id=mynet0,hostfwd=hostip:hostport-guestip:guestport -device virtio-net,netdev=mynet0"
     
    -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and - maximum port to use for the SSH port on the host machine which is forwarded to - the SSH port on the guest machine. Because Packer often runs in parallel, - Packer will choose a randomly available port in this range to use as the - host port. +- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and + maximum port to use for the SSH port on the host machine which is forwarded + to the SSH port on the guest machine. Because Packer often runs in parallel, + Packer will choose a randomly available port in this range to use as the + host port. -- `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for the - new virtual machine, without the file extension. By default this is - "packer-BUILDNAME", where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for + the new virtual machine, without the file extension. By default this is + "packer-BUILDNAME", where "BUILDNAME" is the name of the build. -- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to - use for the VNC port on the host machine which is forwarded to the VNC port on - the guest machine. Because Packer often runs in parallel, Packer will choose a - randomly available port in this range to use as the host port. +- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port + to use for the VNC port on the host machine which is forwarded to the VNC + port on the guest machine. Because Packer often runs in parallel, Packer + will choose a randomly available port in this range to use as the host port. ## Boot Command @@ -270,40 +271,40 @@ machine, simulating a human actually typing the keyboard. There are a set of special keys available. If these are in your boot command, they will be replaced by the proper key: -- `` - Backspace +- `` - Backspace -- `` - Delete +- `` - Delete -- `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -- `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -- `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -- `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -- `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -- `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -- `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -- `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -- `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -- `` `` `` - Adds a 1, 5 or 10 second pause before sending - any additional keys. This is useful if you have to generally wait for the UI - to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before + sending any additional keys. This is useful if you have to generally wait + for the UI to update before typing more. In addition to the special keys, each command to type is treated as a [configuration template](/docs/templates/configuration-templates.html). The available variables are: -- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that - is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will be - blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server + that is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! Example boot command. This is actually a working boot command used to start an CentOS 6.4 installer: diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index bdccdf768..7df4975dc 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -54,177 +54,179 @@ builder. ### Required: -- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files - are so large, this is required and Packer will verify it prior to booting a - virtual machine with the ISO attached. The type of the checksum is specified - with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO + files are so large, this is required and Packer will verify it prior to + booting a virtual machine with the ISO attached. The type of the checksum is + specified with `iso_checksum_type`, documented below. -- `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or - "sha512" currently. While "none" will skip checksumming, this is not - recommended since ISO files are generally large and corruption does happen - from time to time. +- `iso_checksum_type` (string) - The type of the checksum specified in + `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or + "sha512" currently. While "none" will skip checksumming, this is not + recommended since ISO files are generally large and corruption does happen + from time to time. -- `iso_url` (string) - A URL to the ISO containing the installation image. This - URL can be either an HTTP URL or a file URL (or path to a file). If this is an - HTTP URL, Packer will download it and cache it between runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. + This URL can be either an HTTP URL or a file URL (or path to a file). If + this is an HTTP URL, Packer will download it and cache it between runs. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. ### Optional: -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `disk_size` (integer) - The size, in megabytes, of the hard disk to create for - the VM. By default, this is 40000 (about 40 GB). +- `disk_size` (integer) - The size, in megabytes, of the hard disk to create + for the VM. By default, this is 40000 (about 40 GB). -- `export_opts` (array of strings) - Additional options to pass to the - `VBoxManage export`. This can be useful for passing product information to - include in the resulting appliance file. +- `export_opts` (array of strings) - Additional options to pass to the + `VBoxManage export`. This can be useful for passing product information to + include in the resulting appliance file. -- `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful for - unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default, no floppy will be attached. All files listed in - this setting get placed into the root directory of the floppy and the floppy - is attached as the first floppy device. Currently, no support exists for - creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) - are allowed. Directory names are also allowed, which will add all the files - found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, + and \[\]) are allowed. Directory names are also allowed, which will add all + the files found in the directory to the floppy. -- `format` (string) - Either "ovf" or "ova", this specifies the output format of - the exported virtual machine. This defaults to "ovf". +- `format` (string) - Either "ovf" or "ova", this specifies the output format + of the exported virtual machine. This defaults to "ovf". -- `guest_additions_mode` (string) - The method by which guest additions are made - available to the guest for installation. Valid options are "upload", "attach", - or "disable". If the mode is "attach" the guest additions ISO will be attached - as a CD device to the virtual machine. If the mode is "upload" the guest - additions ISO will be uploaded to the path specified by - `guest_additions_path`. The default value is "upload". If "disable" is used, - guest additions won't be downloaded, either. +- `guest_additions_mode` (string) - The method by which guest additions are + made available to the guest for installation. Valid options are "upload", + "attach", or "disable". If the mode is "attach" the guest additions ISO will + be attached as a CD device to the virtual machine. If the mode is "upload" + the guest additions ISO will be uploaded to the path specified by + `guest_additions_path`. The default value is "upload". If "disable" is used, + guest additions won't be downloaded, either. -- `guest_additions_path` (string) - The path on the guest virtual machine where - the VirtualBox guest additions ISO will be uploaded. By default this is - "VBoxGuestAdditions.iso" which should upload into the login directory of - the user. This is a [configuration - template](/docs/templates/configuration-templates.html) where the `Version` - variable is replaced with the VirtualBox version. +- `guest_additions_path` (string) - The path on the guest virtual machine + where the VirtualBox guest additions ISO will be uploaded. By default this + is "VBoxGuestAdditions.iso" which should upload into the login directory of + the user. This is a [configuration + template](/docs/templates/configuration-templates.html) where the `Version` + variable is replaced with the VirtualBox version. -- `guest_additions_sha256` (string) - The SHA256 checksum of the guest additions - ISO that will be uploaded to the guest VM. By default the checksums will be - downloaded from the VirtualBox website, so this only needs to be set if you - want to be explicit about the checksum. +- `guest_additions_sha256` (string) - The SHA256 checksum of the guest + additions ISO that will be uploaded to the guest VM. By default the + checksums will be downloaded from the VirtualBox website, so this only needs + to be set if you want to be explicit about the checksum. -- `guest_additions_url` (string) - The URL to the guest additions ISO to upload. - This can also be a file URL if the ISO is at a local path. By default, the - VirtualBox builder will attempt to find the guest additions ISO on the local - file system. If it is not available locally, the builder will download the - proper guest additions ISO from the internet. +- `guest_additions_url` (string) - The URL to the guest additions ISO + to upload. This can also be a file URL if the ISO is at a local path. By + default, the VirtualBox builder will attempt to find the guest additions ISO + on the local file system. If it is not available locally, the builder will + download the proper guest additions ISO from the internet. -- `guest_os_type` (string) - The guest OS type being installed. By default this - is "other", but you can get *dramatic* performance improvements by setting - this to the proper value. To view all available values for this run - `VBoxManage list ostypes`. Setting the correct value hints to VirtualBox how - to optimize the virtual hardware to work best with that operating system. +- `guest_os_type` (string) - The guest OS type being installed. By default + this is "other", but you can get *dramatic* performance improvements by + setting this to the proper value. To view all available values for this run + `VBoxManage list ostypes`. Setting the correct value hints to VirtualBox how + to optimize the virtual hardware to work best with that operating system. -- `hard_drive_interface` (string) - The type of controller that the primary hard - drive is attached to, defaults to "ide". When set to "sata", the drive is - attached to an AHCI SATA controller. When set to "scsi", the drive is attached - to an LsiLogic SCSI controller. +- `hard_drive_interface` (string) - The type of controller that the primary + hard drive is attached to, defaults to "ide". When set to "sata", the drive + is attached to an AHCI SATA controller. When set to "scsi", the drive is + attached to an LsiLogic SCSI controller. -- `headless` (boolean) - Packer defaults to building VirtualBox virtual machines - by launching a GUI that shows the console of the machine being built. When - this value is set to true, the machine will start without a console. +- `headless` (boolean) - Packer defaults to building VirtualBox virtual + machines by launching a GUI that shows the console of the machine + being built. When this value is set to true, the machine will start without + a console. -- `http_directory` (string) - Path to a directory to serve using an HTTP server. - The files in this directory will be available over HTTP that will be - requestable from the virtual machine. This is useful for hosting kickstart - files and so on. By default this is "", which means no HTTP server will - be started. The address and port of the HTTP server will be available as - variables in `boot_command`. This is covered in more detail below. +- `http_directory` (string) - Path to a directory to serve using an + HTTP server. The files in this directory will be available over HTTP that + will be requestable from the virtual machine. This is useful for hosting + kickstart files and so on. By default this is "", which means no HTTP server + will be started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. -- `http_port_min` and `http_port_max` (integer) - These are the minimum and - maximum port to use for the HTTP server started to serve the `http_directory`. - Because Packer often runs in parallel, Packer will choose a randomly available - port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. By - default the values are 8000 and 9000, respectively. +- `http_port_min` and `http_port_max` (integer) - These are the minimum and + maximum port to use for the HTTP server started to serve the + `http_directory`. Because Packer often runs in parallel, Packer will choose + a randomly available port in this range to run the HTTP server. If you want + to force the HTTP server to be on one port, make this minimum and maximum + port the same. By default the values are 8000 and 9000, respectively. -- `iso_interface` (string) - The type of controller that the ISO is attached to, - defaults to "ide". When set to "sata", the drive is attached to an AHCI - SATA controller. +- `iso_interface` (string) - The type of controller that the ISO is attached + to, defaults to "ide". When set to "sata", the drive is attached to an AHCI + SATA controller. -- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer - will try these in order. If anything goes wrong attempting to download or - while downloading a single URL, it will move on to the next. All URLs must - point to the same file (same checksum). By default this is empty and `iso_url` - is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. + Packer will try these in order. If anything goes wrong attempting to + download or while downloading a single URL, it will move on to the next. All + URLs must point to the same file (same checksum). By default this is empty + and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine unless a shutdown - command takes place inside script so this may safely be omitted. If one or - more scripts require a reboot it is suggested to leave this blank since - reboots may fail and specify the final shutdown command in your last script. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine unless a + shutdown command takes place inside script so this may safely be omitted. If + one or more scripts require a reboot it is suggested to leave this blank + since reboots may fail and specify the final shutdown command in your + last script. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and - maximum port to use for the SSH port on the host machine which is forwarded to - the SSH port on the guest machine. Because Packer often runs in parallel, - Packer will choose a randomly available port in this range to use as the - host port. +- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and + maximum port to use for the SSH port on the host machine which is forwarded + to the SSH port on the guest machine. Because Packer often runs in parallel, + Packer will choose a randomly available port in this range to use as the + host port. -- `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer - does not setup forwarded port mapping for SSH requests and uses `ssh_port` on - the host to communicate to the virtual machine +- `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer + does not setup forwarded port mapping for SSH requests and uses `ssh_port` + on the host to communicate to the virtual machine -- `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to - execute in order to further customize the virtual machine being created. The - value of this is an array of commands to execute. The commands are executed in - the order defined in the template. For each command, the command is defined - itself as an array of strings, where each string represents a single argument - on the command-line to `VBoxManage` (but excluding `VBoxManage` itself). Each - arg is treated as a [configuration - template](/docs/templates/configuration-templates.html), where the `Name` - variable is replaced with the VM name. More details on how to use `VBoxManage` - are below. +- `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to + execute in order to further customize the virtual machine being created. The + value of this is an array of commands to execute. The commands are executed + in the order defined in the template. For each command, the command is + defined itself as an array of strings, where each string represents a single + argument on the command-line to `VBoxManage` (but excluding + `VBoxManage` itself). Each arg is treated as a [configuration + template](/docs/templates/configuration-templates.html), where the `Name` + variable is replaced with the VM name. More details on how to use + `VBoxManage` are below. -- `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, - except that it is run after the virtual machine is shutdown, and before the - virtual machine is exported. +- `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, + except that it is run after the virtual machine is shutdown, and before the + virtual machine is exported. -- `virtualbox_version_file` (string) - The path within the virtual machine to - upload a file that contains the VirtualBox version that was used to create - the machine. This information can be useful for provisioning. By default this - is ".vbox\_version", which will generally be upload it into the - home directory. +- `virtualbox_version_file` (string) - The path within the virtual machine to + upload a file that contains the VirtualBox version that was used to create + the machine. This information can be useful for provisioning. By default + this is ".vbox\_version", which will generally be upload it into the + home directory. -- `vm_name` (string) - This is the name of the OVF file for the new virtual - machine, without the file extension. By default this is "packer-BUILDNAME", - where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the OVF file for the new virtual + machine, without the file extension. By default this is "packer-BUILDNAME", + where "BUILDNAME" is the name of the build. ## Boot Command @@ -242,40 +244,40 @@ machine, simulating a human actually typing the keyboard. There are a set of special keys available. If these are in your boot command, they will be replaced by the proper key: -- `` - Backspace +- `` - Backspace -- `` - Delete +- `` - Delete -- `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -- `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -- `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -- `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -- `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -- `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -- `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -- `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -- `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -- `` `` `` - Adds a 1, 5 or 10 second pause before sending - any additional keys. This is useful if you have to generally wait for the UI - to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before + sending any additional keys. This is useful if you have to generally wait + for the UI to update before typing more. In addition to the special keys, each command to type is treated as a [configuration template](/docs/templates/configuration-templates.html). The available variables are: -- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that - is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will be - blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server + that is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! Example boot command. This is actually a working boot command used to start an Ubuntu 12.04 installer: diff --git a/website/source/docs/builders/virtualbox-ovf.html.markdown b/website/source/docs/builders/virtualbox-ovf.html.markdown index dcf5dbd5c..b9b2de033 100644 --- a/website/source/docs/builders/virtualbox-ovf.html.markdown +++ b/website/source/docs/builders/virtualbox-ovf.html.markdown @@ -19,11 +19,13 @@ image). When exporting from VirtualBox make sure to choose OVF Version 2, since Version 1 is not compatible and will generate errors like this: - ==> virtualbox-ovf: Progress state: VBOX_E_FILE_ERROR - ==> virtualbox-ovf: VBoxManage: error: Appliance read failed - ==> virtualbox-ovf: VBoxManage: error: Error reading "source.ova": element "Section" has no "type" attribute, line 21 - ==> virtualbox-ovf: VBoxManage: error: Details: code VBOX_E_FILE_ERROR (0x80bb0004), component Appliance, interface IAppliance - ==> virtualbox-ovf: VBoxManage: error: Context: "int handleImportAppliance(HandlerArg*)" at line 304 of file VBoxManageAppliance.cpp +==> virtualbox-ovf: Progress state: VBOX\_E\_FILE\_ERROR ==> +virtualbox-ovf: VBoxManage: error: Appliance read failed ==> virtualbox-ovf: +VBoxManage: error: Error reading "source.ova": element "Section" has no "type" +attribute, line 21 ==> virtualbox-ovf: VBoxManage: error: Details: code +VBOX\_E\_FILE\_ERROR (0x80bb0004), component Appliance, interface IAppliance +==> virtualbox-ovf: VBoxManage: error: Context: "int +handleImportAppliance(HandlerArg\*)" at line 304 of file VBoxManageAppliance.cpp The builder builds a virtual machine by importing an existing OVF or OVA file. It then boots this image, runs provisioners on this new VM, and exports that VM @@ -61,149 +63,151 @@ builder. ### Required: -- `source_path` (string) - The path to an OVF or OVA file that acts as the - source of this build. +- `source_path` (string) - The path to an OVF or OVA file that acts as the + source of this build. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. ### Optional: -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `export_opts` (array of strings) - Additional options to pass to the - `VBoxManage export`. This can be useful for passing product information to - include in the resulting appliance file. +- `export_opts` (array of strings) - Additional options to pass to the + `VBoxManage export`. This can be useful for passing product information to + include in the resulting appliance file. -- `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful for - unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default, no floppy will be attached. All files listed in - this setting get placed into the root directory of the floppy and the floppy - is attached as the first floppy device. Currently, no support exists for - creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) - are allowed. Directory names are also allowed, which will add all the files - found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, + and \[\]) are allowed. Directory names are also allowed, which will add all + the files found in the directory to the floppy. -- `format` (string) - Either "ovf" or "ova", this specifies the output format of - the exported virtual machine. This defaults to "ovf". +- `format` (string) - Either "ovf" or "ova", this specifies the output format + of the exported virtual machine. This defaults to "ovf". -- `guest_additions_mode` (string) - The method by which guest additions are made - available to the guest for installation. Valid options are "upload", "attach", - or "disable". If the mode is "attach" the guest additions ISO will be attached - as a CD device to the virtual machine. If the mode is "upload" the guest - additions ISO will be uploaded to the path specified by - `guest_additions_path`. The default value is "upload". If "disable" is used, - guest additions won't be downloaded, either. +- `guest_additions_mode` (string) - The method by which guest additions are + made available to the guest for installation. Valid options are "upload", + "attach", or "disable". If the mode is "attach" the guest additions ISO will + be attached as a CD device to the virtual machine. If the mode is "upload" + the guest additions ISO will be uploaded to the path specified by + `guest_additions_path`. The default value is "upload". If "disable" is used, + guest additions won't be downloaded, either. -- `guest_additions_path` (string) - The path on the guest virtual machine where - the VirtualBox guest additions ISO will be uploaded. By default this is - "VBoxGuestAdditions.iso" which should upload into the login directory of - the user. This is a [configuration - template](/docs/templates/configuration-templates.html) where the `Version` - variable is replaced with the VirtualBox version. +- `guest_additions_path` (string) - The path on the guest virtual machine + where the VirtualBox guest additions ISO will be uploaded. By default this + is "VBoxGuestAdditions.iso" which should upload into the login directory of + the user. This is a [configuration + template](/docs/templates/configuration-templates.html) where the `Version` + variable is replaced with the VirtualBox version. -- `guest_additions_sha256` (string) - The SHA256 checksum of the guest additions - ISO that will be uploaded to the guest VM. By default the checksums will be - downloaded from the VirtualBox website, so this only needs to be set if you - want to be explicit about the checksum. +- `guest_additions_sha256` (string) - The SHA256 checksum of the guest + additions ISO that will be uploaded to the guest VM. By default the + checksums will be downloaded from the VirtualBox website, so this only needs + to be set if you want to be explicit about the checksum. -- `guest_additions_url` (string) - The URL to the guest additions ISO to upload. - This can also be a file URL if the ISO is at a local path. By default the - VirtualBox builder will go and download the proper guest additions ISO from - the internet. +- `guest_additions_url` (string) - The URL to the guest additions ISO + to upload. This can also be a file URL if the ISO is at a local path. By + default the VirtualBox builder will go and download the proper guest + additions ISO from the internet. -- `headless` (boolean) - Packer defaults to building VirtualBox virtual machines - by launching a GUI that shows the console of the machine being built. When - this value is set to true, the machine will start without a console. +- `headless` (boolean) - Packer defaults to building VirtualBox virtual + machines by launching a GUI that shows the console of the machine + being built. When this value is set to true, the machine will start without + a console. -- `http_directory` (string) - Path to a directory to serve using an HTTP server. - The files in this directory will be available over HTTP that will be - requestable from the virtual machine. This is useful for hosting kickstart - files and so on. By default this is "", which means no HTTP server will - be started. The address and port of the HTTP server will be available as - variables in `boot_command`. This is covered in more detail below. +- `http_directory` (string) - Path to a directory to serve using an + HTTP server. The files in this directory will be available over HTTP that + will be requestable from the virtual machine. This is useful for hosting + kickstart files and so on. By default this is "", which means no HTTP server + will be started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. -- `http_port_min` and `http_port_max` (integer) - These are the minimum and - maximum port to use for the HTTP server started to serve the `http_directory`. - Because Packer often runs in parallel, Packer will choose a randomly available - port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. By - default the values are 8000 and 9000, respectively. +- `http_port_min` and `http_port_max` (integer) - These are the minimum and + maximum port to use for the HTTP server started to serve the + `http_directory`. Because Packer often runs in parallel, Packer will choose + a randomly available port in this range to run the HTTP server. If you want + to force the HTTP server to be on one port, make this minimum and maximum + port the same. By default the values are 8000 and 9000, respectively. -- `import_flags` (array of strings) - Additional flags to pass to - `VBoxManage import`. This can be used to add additional command-line flags - such as `--eula-accept` to accept a EULA in the OVF. +- `import_flags` (array of strings) - Additional flags to pass to + `VBoxManage import`. This can be used to add additional command-line flags + such as `--eula-accept` to accept a EULA in the OVF. -- `import_opts` (string) - Additional options to pass to the - `VBoxManage import`. This can be useful for passing "keepallmacs" or - "keepnatmacs" options for existing ovf images. +- `import_opts` (string) - Additional options to pass to the + `VBoxManage import`. This can be useful for passing "keepallmacs" or + "keepnatmacs" options for existing ovf images. -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine unless a shutdown - command takes place inside script so this may safely be omitted. If one or - more scripts require a reboot it is suggested to leave this blank since - reboots may fail and specify the final shutdown command in your last script. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine unless a + shutdown command takes place inside script so this may safely be omitted. If + one or more scripts require a reboot it is suggested to leave this blank + since reboots may fail and specify the final shutdown command in your + last script. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and - maximum port to use for the SSH port on the host machine which is forwarded to - the SSH port on the guest machine. Because Packer often runs in parallel, - Packer will choose a randomly available port in this range to use as the - host port. +- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and + maximum port to use for the SSH port on the host machine which is forwarded + to the SSH port on the guest machine. Because Packer often runs in parallel, + Packer will choose a randomly available port in this range to use as the + host port. -- `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer - does not setup forwarded port mapping for SSH requests and uses `ssh_port` on - the host to communicate to the virtual machine +- `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer + does not setup forwarded port mapping for SSH requests and uses `ssh_port` + on the host to communicate to the virtual machine -- `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to - execute in order to further customize the virtual machine being created. The - value of this is an array of commands to execute. The commands are executed in - the order defined in the template. For each command, the command is defined - itself as an array of strings, where each string represents a single argument - on the command-line to `VBoxManage` (but excluding `VBoxManage` itself). Each - arg is treated as a [configuration - template](/docs/templates/configuration-templates.html), where the `Name` - variable is replaced with the VM name. More details on how to use `VBoxManage` - are below. +- `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to + execute in order to further customize the virtual machine being created. The + value of this is an array of commands to execute. The commands are executed + in the order defined in the template. For each command, the command is + defined itself as an array of strings, where each string represents a single + argument on the command-line to `VBoxManage` (but excluding + `VBoxManage` itself). Each arg is treated as a [configuration + template](/docs/templates/configuration-templates.html), where the `Name` + variable is replaced with the VM name. More details on how to use + `VBoxManage` are below. -- `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, - except that it is run after the virtual machine is shutdown, and before the - virtual machine is exported. +- `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, + except that it is run after the virtual machine is shutdown, and before the + virtual machine is exported. -- `virtualbox_version_file` (string) - The path within the virtual machine to - upload a file that contains the VirtualBox version that was used to create - the machine. This information can be useful for provisioning. By default this - is ".vbox\_version", which will generally be upload it into the - home directory. +- `virtualbox_version_file` (string) - The path within the virtual machine to + upload a file that contains the VirtualBox version that was used to create + the machine. This information can be useful for provisioning. By default + this is ".vbox\_version", which will generally be upload it into the + home directory. -- `vm_name` (string) - This is the name of the virtual machine when it is - imported as well as the name of the OVF file when the virtual machine - is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the - name of the build. +- `vm_name` (string) - This is the name of the virtual machine when it is + imported as well as the name of the OVF file when the virtual machine + is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the + name of the build. ## Guest Additions diff --git a/website/source/docs/builders/virtualbox.html.markdown b/website/source/docs/builders/virtualbox.html.markdown index f96d37515..b2064f7d2 100644 --- a/website/source/docs/builders/virtualbox.html.markdown +++ b/website/source/docs/builders/virtualbox.html.markdown @@ -16,13 +16,14 @@ Packer actually comes with multiple builders able to create VirtualBox machines, depending on the strategy you want to use to build the image. Packer supports the following VirtualBox builders: -- [virtualbox-iso](/docs/builders/virtualbox-iso.html) - Starts from an ISO - file, creates a brand new VirtualBox VM, installs an OS, provisions software - within the OS, then exports that machine to create an image. This is best for - people who want to start from scratch. +- [virtualbox-iso](/docs/builders/virtualbox-iso.html) - Starts from an ISO + file, creates a brand new VirtualBox VM, installs an OS, provisions software + within the OS, then exports that machine to create an image. This is best + for people who want to start from scratch. -- [virtualbox-ovf](/docs/builders/virtualbox-ovf.html) - This builder imports an - existing OVF/OVA file, runs provisioners on top of that VM, and exports that - machine to create an image. This is best if you have an existing VirtualBox VM - export you want to use as the source. As an additional benefit, you can feed - the artifact of this builder back into itself to iterate on a machine. +- [virtualbox-ovf](/docs/builders/virtualbox-ovf.html) - This builder imports + an existing OVF/OVA file, runs provisioners on top of that VM, and exports + that machine to create an image. This is best if you have an existing + VirtualBox VM export you want to use as the source. As an additional + benefit, you can feed the artifact of this builder back into itself to + iterate on a machine. diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index ad2ac5c33..fba47f0ad 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -57,195 +57,199 @@ builder. ### Required: -- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files - are so large, this is required and Packer will verify it prior to booting a - virtual machine with the ISO attached. The type of the checksum is specified - with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO + files are so large, this is required and Packer will verify it prior to + booting a virtual machine with the ISO attached. The type of the checksum is + specified with `iso_checksum_type`, documented below. -- `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or - "sha512" currently. While "none" will skip checksumming, this is not - recommended since ISO files are generally large and corruption does happen - from time to time. +- `iso_checksum_type` (string) - The type of the checksum specified in + `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or + "sha512" currently. While "none" will skip checksumming, this is not + recommended since ISO files are generally large and corruption does happen + from time to time. -- `iso_url` (string) - A URL to the ISO containing the installation image. This - URL can be either an HTTP URL or a file URL (or path to a file). If this is an - HTTP URL, Packer will download it and cache it between runs. +- `iso_url` (string) - A URL to the ISO containing the installation image. + This URL can be either an HTTP URL or a file URL (or path to a file). If + this is an HTTP URL, Packer will download it and cache it between runs. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. ### Optional: -- `disk_additional_size` (array of integers) - The size(s) of any additional - hard disks for the VM in megabytes. If this is not specified then the VM will - only contain a primary hard disk. The builder uses expandable, not fixed-size - virtual hard disks, so the actual file representing the disk will not use the - full size unless it is full. +- `disk_additional_size` (array of integers) - The size(s) of any additional + hard disks for the VM in megabytes. If this is not specified then the VM + will only contain a primary hard disk. The builder uses expandable, not + fixed-size virtual hard disks, so the actual file representing the disk will + not use the full size unless it is full. -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `disk_size` (integer) - The size of the hard disk for the VM in megabytes. The - builder uses expandable, not fixed-size virtual hard disks, so the actual file - representing the disk will not use the full size unless it is full. By default - this is set to 40,000 (about 40 GB). +- `disk_size` (integer) - The size of the hard disk for the VM in megabytes. + The builder uses expandable, not fixed-size virtual hard disks, so the + actual file representing the disk will not use the full size unless it + is full. By default this is set to 40,000 (about 40 GB). -- `disk_type_id` (string) - The type of VMware virtual disk to create. The - default is "1", which corresponds to a growable virtual disk split in - 2GB files. This option is for advanced usage, modify only if you know what - you're doing. For more information, please consult the [Virtual Disk Manager - User's Guide](http://www.vmware.com/pdf/VirtualDiskManager.pdf) for desktop - VMware clients. For ESXi, refer to the proper ESXi documentation. +- `disk_type_id` (string) - The type of VMware virtual disk to create. The + default is "1", which corresponds to a growable virtual disk split in + 2GB files. This option is for advanced usage, modify only if you know what + you're doing. For more information, please consult the [Virtual Disk Manager + User's Guide](http://www.vmware.com/pdf/VirtualDiskManager.pdf) for desktop + VMware clients. For ESXi, refer to the proper ESXi documentation. -- `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful for - unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default, no floppy will be attached. All files listed in - this setting get placed into the root directory of the floppy and the floppy - is attached as the first floppy device. Currently, no support exists for - creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) - are allowed. Directory names are also allowed, which will add all the files - found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, + and \[\]) are allowed. Directory names are also allowed, which will add all + the files found in the directory to the floppy. -- `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is - "/Applications/VMware Fusion.app" but this setting allows you to - customize this. +- `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is + "/Applications/VMware Fusion.app" but this setting allows you to + customize this. -- `guest_os_type` (string) - The guest OS type being installed. This will be set - in the VMware VMX. By default this is "other". By specifying a more specific - OS type, VMware may perform some optimizations or virtual hardware changes to - better support the operating system running in the virtual machine. +- `guest_os_type` (string) - The guest OS type being installed. This will be + set in the VMware VMX. By default this is "other". By specifying a more + specific OS type, VMware may perform some optimizations or virtual hardware + changes to better support the operating system running in the + virtual machine. -- `headless` (boolean) - Packer defaults to building VMware virtual machines by - launching a GUI that shows the console of the machine being built. When this - value is set to true, the machine will start without a console. For VMware - machines, Packer will output VNC connection information in case you need to - connect to the console to debug the build process. +- `headless` (boolean) - Packer defaults to building VMware virtual machines + by launching a GUI that shows the console of the machine being built. When + this value is set to true, the machine will start without a console. For + VMware machines, Packer will output VNC connection information in case you + need to connect to the console to debug the build process. -- `http_directory` (string) - Path to a directory to serve using an HTTP server. - The files in this directory will be available over HTTP that will be - requestable from the virtual machine. This is useful for hosting kickstart - files and so on. By default this is "", which means no HTTP server will - be started. The address and port of the HTTP server will be available as - variables in `boot_command`. This is covered in more detail below. +- `http_directory` (string) - Path to a directory to serve using an + HTTP server. The files in this directory will be available over HTTP that + will be requestable from the virtual machine. This is useful for hosting + kickstart files and so on. By default this is "", which means no HTTP server + will be started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. -- `http_port_min` and `http_port_max` (integer) - These are the minimum and - maximum port to use for the HTTP server started to serve the `http_directory`. - Because Packer often runs in parallel, Packer will choose a randomly available - port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. By - default the values are 8000 and 9000, respectively. +- `http_port_min` and `http_port_max` (integer) - These are the minimum and + maximum port to use for the HTTP server started to serve the + `http_directory`. Because Packer often runs in parallel, Packer will choose + a randomly available port in this range to run the HTTP server. If you want + to force the HTTP server to be on one port, make this minimum and maximum + port the same. By default the values are 8000 and 9000, respectively. -- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer - will try these in order. If anything goes wrong attempting to download or - while downloading a single URL, it will move on to the next. All URLs must - point to the same file (same checksum). By default this is empty and `iso_url` - is used. Only one of `iso_url` or `iso_urls` can be specified. +- `iso_urls` (array of strings) - Multiple URLs for the ISO to download. + Packer will try these in order. If anything goes wrong attempting to + download or while downloading a single URL, it will move on to the next. All + URLs must point to the same file (same checksum). By default this is empty + and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `remote_cache_datastore` (string) - The path to the datastore where supporting - files will be stored during the build on the remote machine. By default this - is the same as the `remote_datastore` option. This only has an effect if - `remote_type` is enabled. +- `remote_cache_datastore` (string) - The path to the datastore where + supporting files will be stored during the build on the remote machine. By + default this is the same as the `remote_datastore` option. This only has an + effect if `remote_type` is enabled. -- `remote_cache_directory` (string) - The path where the ISO and/or floppy files - will be stored during the build on the remote machine. The path is relative to - the `remote_cache_datastore` on the remote machine. By default this - is "packer\_cache". This only has an effect if `remote_type` is enabled. +- `remote_cache_directory` (string) - The path where the ISO and/or floppy + files will be stored during the build on the remote machine. The path is + relative to the `remote_cache_datastore` on the remote machine. By default + this is "packer\_cache". This only has an effect if `remote_type` + is enabled. -- `remote_datastore` (string) - The path to the datastore where the resulting VM - will be stored when it is built on the remote machine. By default this - is "datastore1". This only has an effect if `remote_type` is enabled. +- `remote_datastore` (string) - The path to the datastore where the resulting + VM will be stored when it is built on the remote machine. By default this + is "datastore1". This only has an effect if `remote_type` is enabled. -- `remote_host` (string) - The host of the remote machine used for access. This - is only required if `remote_type` is enabled. +- `remote_host` (string) - The host of the remote machine used for access. + This is only required if `remote_type` is enabled. -- `remote_password` (string) - The SSH password for the user used to access the - remote machine. By default this is empty. This only has an effect if - `remote_type` is enabled. +- `remote_password` (string) - The SSH password for the user used to access + the remote machine. By default this is empty. This only has an effect if + `remote_type` is enabled. -- `remote_type` (string) - The type of remote machine that will be used to build - this VM rather than a local desktop product. The only value accepted for this - currently is "esx5". If this is not set, a desktop product will be used. By - default, this is not set. +- `remote_type` (string) - The type of remote machine that will be used to + build this VM rather than a local desktop product. The only value accepted + for this currently is "esx5". If this is not set, a desktop product will + be used. By default, this is not set. -- `remote_username` (string) - The username for the SSH user that will access - the remote machine. This is required if `remote_type` is enabled. +- `remote_username` (string) - The username for the SSH user that will access + the remote machine. This is required if `remote_type` is enabled. -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `skip_compaction` (boolean) - VMware-created disks are defragmented and - compacted at the end of the build process using `vmware-vdiskmanager`. In - certain rare cases, this might actually end up making the resulting disks - slightly larger. If you find this to be the case, you can disable compaction - using this configuration value. +- `skip_compaction` (boolean) - VMware-created disks are defragmented and + compacted at the end of the build process using `vmware-vdiskmanager`. In + certain rare cases, this might actually end up making the resulting disks + slightly larger. If you find this to be the case, you can disable compaction + using this configuration value. -- `tools_upload_flavor` (string) - The flavor of the VMware Tools ISO to upload - into the VM. Valid values are "darwin", "linux", and "windows". By default, - this is empty, which means VMware tools won't be uploaded. +- `tools_upload_flavor` (string) - The flavor of the VMware Tools ISO to + upload into the VM. Valid values are "darwin", "linux", and "windows". By + default, this is empty, which means VMware tools won't be uploaded. -- `tools_upload_path` (string) - The path in the VM to upload the VMware tools. - This only takes effect if `tools_upload_flavor` is non-empty. This is a - [configuration template](/docs/templates/configuration-templates.html) that - has a single valid variable: `Flavor`, which will be the value of - `tools_upload_flavor`. By default the upload path is set to `{{.Flavor}}.iso`. - This setting is not used when `remote_type` is "esx5". +- `tools_upload_path` (string) - The path in the VM to upload the + VMware tools. This only takes effect if `tools_upload_flavor` is non-empty. + This is a [configuration + template](/docs/templates/configuration-templates.html) that has a single + valid variable: `Flavor`, which will be the value of `tools_upload_flavor`. + By default the upload path is set to `{{.Flavor}}.iso`. This setting is not + used when `remote_type` is "esx5". -- `version` (string) - The [vmx hardware - version](http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003746) - for the new virtual machine. Only the default value has been tested, any other - value is experimental. Default value is '9'. +- `version` (string) - The [vmx hardware + version](http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003746) + for the new virtual machine. Only the default value has been tested, any + other value is experimental. Default value is '9'. -- `vm_name` (string) - This is the name of the VMX file for the new virtual - machine, without the file extension. By default this is "packer-BUILDNAME", - where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the VMX file for the new virtual + machine, without the file extension. By default this is "packer-BUILDNAME", + where "BUILDNAME" is the name of the build. -- `vmdk_name` (string) - The filename of the virtual disk that'll be created, - without the extension. This defaults to "packer". +- `vmdk_name` (string) - The filename of the virtual disk that'll be created, + without the extension. This defaults to "packer". -- `vmx_data` (object of key/value strings) - Arbitrary key/values to enter into - the virtual machine VMX file. This is for advanced users who want to set - properties such as memory, CPU, etc. +- `vmx_data` (object of key/value strings) - Arbitrary key/values to enter + into the virtual machine VMX file. This is for advanced users who want to + set properties such as memory, CPU, etc. -- `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, - except that it is run after the virtual machine is shutdown, and before the - virtual machine is exported. +- `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, + except that it is run after the virtual machine is shutdown, and before the + virtual machine is exported. -- `vmx_template_path` (string) - Path to a [configuration - template](/docs/templates/configuration-templates.html) that defines the - contents of the virtual machine VMX file for VMware. This is for **advanced - users only** as this can render the virtual machine non-functional. See below - for more information. For basic VMX modifications, try `vmx_data` first. +- `vmx_template_path` (string) - Path to a [configuration + template](/docs/templates/configuration-templates.html) that defines the + contents of the virtual machine VMX file for VMware. This is for **advanced + users only** as this can render the virtual machine non-functional. See + below for more information. For basic VMX modifications, try + `vmx_data` first. -- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to - use for VNC access to the virtual machine. The builder uses VNC to type the - initial `boot_command`. Because Packer generally runs in parallel, Packer uses - a randomly chosen port in this range that appears available. By default this - is 5900 to 6000. The minimum and maximum ports are inclusive. +- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port + to use for VNC access to the virtual machine. The builder uses VNC to type + the initial `boot_command`. Because Packer generally runs in parallel, + Packer uses a randomly chosen port in this range that appears available. By + default this is 5900 to 6000. The minimum and maximum ports are inclusive. ## Boot Command @@ -263,40 +267,40 @@ machine, simulating a human actually typing the keyboard. There are a set of special keys available. If these are in your boot command, they will be replaced by the proper key: -- `` - Backspace +- `` - Backspace -- `` - Delete +- `` - Delete -- `` and `` - Simulates an actual "enter" or "return" keypress. +- `` and `` - Simulates an actual "enter" or "return" keypress. -- `` - Simulates pressing the escape key. +- `` - Simulates pressing the escape key. -- `` - Simulates pressing the tab key. +- `` - Simulates pressing the tab key. -- `` - `` - Simulates pressing a function key. +- `` - `` - Simulates pressing a function key. -- `` `` `` `` - Simulates pressing an arrow key. +- `` `` `` `` - Simulates pressing an arrow key. -- `` - Simulates pressing the spacebar. +- `` - Simulates pressing the spacebar. -- `` - Simulates pressing the insert key. +- `` - Simulates pressing the insert key. -- `` `` - Simulates pressing the home and end keys. +- `` `` - Simulates pressing the home and end keys. -- `` `` - Simulates pressing the page up and page down keys. +- `` `` - Simulates pressing the page up and page down keys. -- `` `` `` - Adds a 1, 5 or 10 second pause before sending - any additional keys. This is useful if you have to generally wait for the UI - to update before typing more. +- `` `` `` - Adds a 1, 5 or 10 second pause before + sending any additional keys. This is useful if you have to generally wait + for the UI to update before typing more. In addition to the special keys, each command to type is treated as a [configuration template](/docs/templates/configuration-templates.html). The available variables are: -- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server that - is started serving the directory specified by the `http_directory` - configuration parameter. If `http_directory` isn't specified, these will be - blank! +- `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server + that is started serving the directory specified by the `http_directory` + configuration parameter. If `http_directory` isn't specified, these will be + blank! Example boot command. This is actually a working boot command used to start an Ubuntu 12.04 installer: @@ -332,12 +336,12 @@ Within the template, a handful of variables are available so that your template can continue working with the rest of the Packer machinery. Using these variables isn't required, however. -- `Name` - The name of the virtual machine. -- `GuestOS` - The VMware-valid guest OS type. -- `DiskName` - The filename (without the suffix) of the main virtual disk. -- `ISOPath` - The path to the ISO to use for the OS installation. -- `Version` - The Hardware version VMWare will execute this vm under. Also known - as the `virtualhw.version`. +- `Name` - The name of the virtual machine. +- `GuestOS` - The VMware-valid guest OS type. +- `DiskName` - The filename (without the suffix) of the main virtual disk. +- `ISOPath` - The path to the ISO to use for the OS installation. +- `Version` - The Hardware version VMWare will execute this vm under. Also + known as the `virtualhw.version`. ## Building on a Remote vSphere Hypervisor @@ -367,23 +371,23 @@ connections. To use a remote VMware vSphere Hypervisor to build your virtual machine, fill in the required `remote_*` configurations: -- `remote_type` - This must be set to "esx5". +- `remote_type` - This must be set to "esx5". -- `remote_host` - The host of the remote machine. +- `remote_host` - The host of the remote machine. Additionally, there are some optional configurations that you'll likely have to modify as well: -- `remote_datastore` - The path to the datastore where the VM will be stored on - the ESXi machine. +- `remote_datastore` - The path to the datastore where the VM will be stored + on the ESXi machine. -- `remote_cache_datastore` - The path to the datastore where supporting files - will be stored during the build on the remote machine. +- `remote_cache_datastore` - The path to the datastore where supporting files + will be stored during the build on the remote machine. -- `remote_cache_directory` - The path where the ISO and/or floppy files will be - stored during the build on the remote machine. The path is relative to the - `remote_cache_datastore` on the remote machine. +- `remote_cache_directory` - The path where the ISO and/or floppy files will + be stored during the build on the remote machine. The path is relative to + the `remote_cache_datastore` on the remote machine. -- `remote_username` - The SSH username used to access the remote machine. +- `remote_username` - The SSH username used to access the remote machine. -- `remote_password` - The SSH password for access to the remote machine. +- `remote_password` - The SSH password for access to the remote machine. diff --git a/website/source/docs/builders/vmware-vmx.html.markdown b/website/source/docs/builders/vmware-vmx.html.markdown index bd1afb83c..da3a418b3 100644 --- a/website/source/docs/builders/vmware-vmx.html.markdown +++ b/website/source/docs/builders/vmware-vmx.html.markdown @@ -53,99 +53,100 @@ builder. ### Required: -- `source_path` (string) - Path to the source VMX file to clone. +- `source_path` (string) - Path to the source VMX file to clone. -- `ssh_username` (string) - The username to use to SSH into the machine once the - OS is installed. +- `ssh_username` (string) - The username to use to SSH into the machine once + the OS is installed. ### Optional: -- `boot_command` (array of strings) - This is an array of commands to type when - the virtual machine is first booted. The goal of these commands should be to - type just enough to initialize the operating system installer. Special keys - can be typed as well, and are covered in the section below on the - boot command. If this is not specified, it is assumed the installer will - start itself. +- `boot_command` (array of strings) - This is an array of commands to type + when the virtual machine is first booted. The goal of these commands should + be to type just enough to initialize the operating system installer. Special + keys can be typed as well, and are covered in the section below on the + boot command. If this is not specified, it is assumed the installer will + start itself. -- `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait five - seconds and one minute 30 seconds, respectively. If this isn't specified, the - default is 10 seconds. +- `boot_wait` (string) - The time to wait after booting the initial virtual + machine before typing the `boot_command`. The value of this should be + a duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful for - unattended Windows installs, which look for an `Autounattend.xml` file on - removable media. By default, no floppy will be attached. All files listed in - this setting get placed into the root directory of the floppy and the floppy - is attached as the first floppy device. Currently, no support exists for - creating sub-directories on the floppy. Wildcard characters (\*, ?, and \[\]) - are allowed. Directory names are also allowed, which will add all the files - found in the directory to the floppy. +- `floppy_files` (array of strings) - A list of files to place onto a floppy + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed in + this setting get placed into the root directory of the floppy and the floppy + is attached as the first floppy device. Currently, no support exists for + creating sub-directories on the floppy. Wildcard characters (\*, ?, + and \[\]) are allowed. Directory names are also allowed, which will add all + the files found in the directory to the floppy. -- `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is - "/Applications/VMware Fusion.app" but this setting allows you to - customize this. +- `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is + "/Applications/VMware Fusion.app" but this setting allows you to + customize this. -- `headless` (boolean) - Packer defaults to building VMware virtual machines by - launching a GUI that shows the console of the machine being built. When this - value is set to true, the machine will start without a console. For VMware - machines, Packer will output VNC connection information in case you need to - connect to the console to debug the build process. +- `headless` (boolean) - Packer defaults to building VMware virtual machines + by launching a GUI that shows the console of the machine being built. When + this value is set to true, the machine will start without a console. For + VMware machines, Packer will output VNC connection information in case you + need to connect to the console to debug the build process. -- `http_directory` (string) - Path to a directory to serve using an HTTP server. - The files in this directory will be available over HTTP that will be - requestable from the virtual machine. This is useful for hosting kickstart - files and so on. By default this is "", which means no HTTP server will - be started. The address and port of the HTTP server will be available as - variables in `boot_command`. This is covered in more detail below. +- `http_directory` (string) - Path to a directory to serve using an + HTTP server. The files in this directory will be available over HTTP that + will be requestable from the virtual machine. This is useful for hosting + kickstart files and so on. By default this is "", which means no HTTP server + will be started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. -- `http_port_min` and `http_port_max` (integer) - These are the minimum and - maximum port to use for the HTTP server started to serve the `http_directory`. - Because Packer often runs in parallel, Packer will choose a randomly available - port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. By - default the values are 8000 and 9000, respectively. +- `http_port_min` and `http_port_max` (integer) - These are the minimum and + maximum port to use for the HTTP server started to serve the + `http_directory`. Because Packer often runs in parallel, Packer will choose + a randomly available port in this range to run the HTTP server. If you want + to force the HTTP server to be on one port, make this minimum and maximum + port the same. By default the values are 8000 and 9000, respectively. -- `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running - the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the - name of the build. +- `output_directory` (string) - This is the path to the directory where the + resulting virtual machine will be created. This may be relative or absolute. + If relative, the path is relative to the working directory when `packer` + is executed. This directory must not exist or be empty prior to running + the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the + name of the build. -- `shutdown_command` (string) - The command to use to gracefully shut down the - machine once all the provisioning is done. By default this is an empty string, - which tells Packer to just forcefully shut down the machine unless a shutdown - command takes place inside script so this may safely be omitted. If one or - more scripts require a reboot it is suggested to leave this blank since - reboots may fail and specify the final shutdown command in your last script. +- `shutdown_command` (string) - The command to use to gracefully shut down the + machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine unless a + shutdown command takes place inside script so this may safely be omitted. If + one or more scripts require a reboot it is suggested to leave this blank + since reboots may fail and specify the final shutdown command in your + last script. -- `shutdown_timeout` (string) - The amount of time to wait after executing the - `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. +- `shutdown_timeout` (string) - The amount of time to wait after executing the + `shutdown_command` for the virtual machine to actually shut down. If it + doesn't shut down in this time, it is an error. By default, the timeout is + "5m", or five minutes. -- `skip_compaction` (boolean) - VMware-created disks are defragmented and - compacted at the end of the build process using `vmware-vdiskmanager`. In - certain rare cases, this might actually end up making the resulting disks - slightly larger. If you find this to be the case, you can disable compaction - using this configuration value. +- `skip_compaction` (boolean) - VMware-created disks are defragmented and + compacted at the end of the build process using `vmware-vdiskmanager`. In + certain rare cases, this might actually end up making the resulting disks + slightly larger. If you find this to be the case, you can disable compaction + using this configuration value. -- `vm_name` (string) - This is the name of the VMX file for the new virtual - machine, without the file extension. By default this is "packer-BUILDNAME", - where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the VMX file for the new virtual + machine, without the file extension. By default this is "packer-BUILDNAME", + where "BUILDNAME" is the name of the build. -- `vmx_data` (object of key/value strings) - Arbitrary key/values to enter into - the virtual machine VMX file. This is for advanced users who want to set - properties such as memory, CPU, etc. +- `vmx_data` (object of key/value strings) - Arbitrary key/values to enter + into the virtual machine VMX file. This is for advanced users who want to + set properties such as memory, CPU, etc. -- `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, - except that it is run after the virtual machine is shutdown, and before the - virtual machine is exported. +- `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, + except that it is run after the virtual machine is shutdown, and before the + virtual machine is exported. -- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to - use for VNC access to the virtual machine. The builder uses VNC to type the - initial `boot_command`. Because Packer generally runs in parallel, Packer uses - a randomly chosen port in this range that appears available. By default this - is 5900 to 6000. The minimum and maximum ports are inclusive. +- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port + to use for VNC access to the virtual machine. The builder uses VNC to type + the initial `boot_command`. Because Packer generally runs in parallel, + Packer uses a randomly chosen port in this range that appears available. By + default this is 5900 to 6000. The minimum and maximum ports are inclusive. diff --git a/website/source/docs/builders/vmware.html.markdown b/website/source/docs/builders/vmware.html.markdown index e77fe574a..e8486ca4c 100644 --- a/website/source/docs/builders/vmware.html.markdown +++ b/website/source/docs/builders/vmware.html.markdown @@ -15,14 +15,14 @@ Packer actually comes with multiple builders able to create VMware machines, depending on the strategy you want to use to build the image. Packer supports the following VMware builders: -- [vmware-iso](/docs/builders/vmware-iso.html) - Starts from an ISO file, - creates a brand new VMware VM, installs an OS, provisions software within the - OS, then exports that machine to create an image. This is best for people who - want to start from scratch. +- [vmware-iso](/docs/builders/vmware-iso.html) - Starts from an ISO file, + creates a brand new VMware VM, installs an OS, provisions software within + the OS, then exports that machine to create an image. This is best for + people who want to start from scratch. -- [vmware-vmx](/docs/builders/vmware-vmx.html) - This builder imports an - existing VMware machine (from a VMX file), runs provisioners on top of that - VM, and exports that machine to create an image. This is best if you have an - existing VMware VM you want to use as the source. As an additional benefit, - you can feed the artifact of this builder back into Packer to iterate on - a machine. +- [vmware-vmx](/docs/builders/vmware-vmx.html) - This builder imports an + existing VMware machine (from a VMX file), runs provisioners on top of that + VM, and exports that machine to create an image. This is best if you have an + existing VMware VM you want to use as the source. As an additional benefit, + you can feed the artifact of this builder back into Packer to iterate on + a machine. diff --git a/website/source/docs/command-line/build.html.markdown b/website/source/docs/command-line/build.html.markdown index 92afda570..ba4421293 100644 --- a/website/source/docs/command-line/build.html.markdown +++ b/website/source/docs/command-line/build.html.markdown @@ -17,24 +17,26 @@ artifacts that are created will be outputted at the end of the build. ## Options -- `-color=false` - Disables colorized output. Enabled by default. +- `-color=false` - Disables colorized output. Enabled by default. -- `-debug` - Disables parallelization and enables debug mode. Debug mode flags - the builders that they should output debugging information. The exact behavior - of debug mode is left to the builder. In general, builders usually will stop - between each step, waiting for keyboard input before continuing. This will - allow the user to inspect state and so on. +- `-debug` - Disables parallelization and enables debug mode. Debug mode flags + the builders that they should output debugging information. The exact + behavior of debug mode is left to the builder. In general, builders usually + will stop between each step, waiting for keyboard input before continuing. + This will allow the user to inspect state and so on. -- `-except=foo,bar,baz` - Builds all the builds except those with the given - comma-separated names. Build names by default are the names of their builders, - unless a specific `name` attribute is specified within the configuration. +- `-except=foo,bar,baz` - Builds all the builds except those with the given + comma-separated names. Build names by default are the names of their + builders, unless a specific `name` attribute is specified within + the configuration. -- `-force` - Forces a builder to run when artifacts from a previous build - prevent a build from running. The exact behavior of a forced build is left to - the builder. In general, a builder supporting the forced build will remove the - artifacts from the previous build. This will allow the user to repeat a build - without having to manually clean these artifacts beforehand. +- `-force` - Forces a builder to run when artifacts from a previous build + prevent a build from running. The exact behavior of a forced build is left + to the builder. In general, a builder supporting the forced build will + remove the artifacts from the previous build. This will allow the user to + repeat a build without having to manually clean these artifacts beforehand. -- `-only=foo,bar,baz` - Only build the builds with the given - comma-separated names. Build names by default are the names of their builders, - unless a specific `name` attribute is specified within the configuration. +- `-only=foo,bar,baz` - Only build the builds with the given + comma-separated names. Build names by default are the names of their + builders, unless a specific `name` attribute is specified within + the configuration. diff --git a/website/source/docs/command-line/fix.html.markdown b/website/source/docs/command-line/fix.html.markdown index eb383fec6..ec18b69bc 100644 --- a/website/source/docs/command-line/fix.html.markdown +++ b/website/source/docs/command-line/fix.html.markdown @@ -19,7 +19,7 @@ The fix command will output the changed template to standard out, so you should redirect standard using standard OS-specific techniques if you want to save it to a file. For example, on Linux systems, you may want to do this: - $ packer fix old.json > new.json +\$ packer fix old.json > new.json If fixing fails for any reason, the fix command will exit with a non-zero exit status. Error messages appear on standard error, so if you're redirecting diff --git a/website/source/docs/command-line/machine-readable.html.markdown b/website/source/docs/command-line/machine-readable.html.markdown index 550a14f35..fa9fe3cac 100644 --- a/website/source/docs/command-line/machine-readable.html.markdown +++ b/website/source/docs/command-line/machine-readable.html.markdown @@ -53,20 +53,22 @@ timestamp,target,type,data... Each component is explained below: -- **timestamp** is a Unix timestamp in UTC of when the message was printed. +- **timestamp** is a Unix timestamp in UTC of when the message was printed. -- **target** is the target of the following output. This is empty if the message - is related to Packer globally. Otherwise, this is generally a build name so - you can relate output to a specific build while parallel builds are running. +- **target** is the target of the following output. This is empty if the + message is related to Packer globally. Otherwise, this is generally a build + name so you can relate output to a specific build while parallel builds + are running. -- **type** is the type of machine-readable message being outputted. There are a - set of standard types which are covered later, but each component of Packer - (builders, provisioners, etc.) may output their own custom types as well, - allowing the machine-readable output to be infinitely flexible. +- **type** is the type of machine-readable message being outputted. There are + a set of standard types which are covered later, but each component of + Packer (builders, provisioners, etc.) may output their own custom types as + well, allowing the machine-readable output to be infinitely flexible. -- **data** is zero or more comma-seperated values associated with the - prior type. The exact amount and meaning of this data is type-dependent, so - you must read the documentation associated with the type to understand fully. +- **data** is zero or more comma-seperated values associated with the + prior type. The exact amount and meaning of this data is type-dependent, so + you must read the documentation associated with the type to + understand fully. Within the format, if data contains a comma, it is replaced with `%!(PACKER_COMMA)`. This was preferred over an escape character such as `\'` diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index 0cc9699f5..764333967 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -26,16 +26,16 @@ configuration](/docs/templates/push.html) must be completed within the template. ## Options -- `-message` - A message to identify the purpose or changes in this Packer - template much like a VCS commit message. This message will be passed to the - Packer build service. This option is also available as a short option `-m`. +- `-message` - A message to identify the purpose or changes in this Packer + template much like a VCS commit message. This message will be passed to the + Packer build service. This option is also available as a short option `-m`. -- `-token` - An access token for authenticating the push to the Packer build - service such as Atlas. This can also be specified within the push - configuration in the template. +- `-token` - An access token for authenticating the push to the Packer build + service such as Atlas. This can also be specified within the push + configuration in the template. -- `-name` - The name of the build in the service. This typically looks like - `hashicorp/precise64`. +- `-name` - The name of the build in the service. This typically looks like + `hashicorp/precise64`. ## Examples diff --git a/website/source/docs/command-line/validate.html.markdown b/website/source/docs/command-line/validate.html.markdown index e17f23dc4..c49e6587d 100644 --- a/website/source/docs/command-line/validate.html.markdown +++ b/website/source/docs/command-line/validate.html.markdown @@ -29,5 +29,5 @@ Errors validating build 'vmware'. 1 error(s) occurred: ## Options -- `-syntax-only` - Only the syntax of the template is checked. The configuration - is not validated. +- `-syntax-only` - Only the syntax of the template is checked. The + configuration is not validated. diff --git a/website/source/docs/extend/developing-plugins.html.markdown b/website/source/docs/extend/developing-plugins.html.markdown index 0d86df3d2..8af8a241d 100644 --- a/website/source/docs/extend/developing-plugins.html.markdown +++ b/website/source/docs/extend/developing-plugins.html.markdown @@ -52,19 +52,19 @@ the following two packages, you're encouraged to use whatever packages you want. Because plugins are their own processes, there is no danger of colliding dependencies. -- `github.com/mitchellh/packer` - Contains all the interfaces that you have to - implement for any given plugin. +- `github.com/mitchellh/packer` - Contains all the interfaces that you have to + implement for any given plugin. -- `github.com/mitchellh/packer/plugin` - Contains the code to serve the plugin. - This handles all the inter-process communication stuff. +- `github.com/mitchellh/packer/plugin` - Contains the code to serve + the plugin. This handles all the inter-process communication stuff. There are two steps involved in creating a plugin: -1. Implement the desired interface. For example, if you're building a builder - plugin, implement the `packer.Builder` interface. +1. Implement the desired interface. For example, if you're building a builder + plugin, implement the `packer.Builder` interface. -2. Serve the interface by calling the appropriate plugin serving method in your - main method. In the case of a builder, this is `plugin.ServeBuilder`. +2. Serve the interface by calling the appropriate plugin serving method in your + main method. In the case of a builder, this is `plugin.ServeBuilder`. A basic example is shown below. In this example, assume the `Builder` struct implements the `packer.Builder` interface: diff --git a/website/source/docs/extend/plugins.html.markdown b/website/source/docs/extend/plugins.html.markdown index 98249de5d..9f18ca138 100644 --- a/website/source/docs/extend/plugins.html.markdown +++ b/website/source/docs/extend/plugins.html.markdown @@ -51,21 +51,21 @@ Once the plugin is named properly, Packer automatically discovers plugins in the following directories in the given order. If a conflicting plugin is found later, it will take precedence over one found earlier. -1. The directory where `packer` is, or the executable directory. +1. The directory where `packer` is, or the executable directory. -2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` - on Windows. +2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` + on Windows. -3. The current working directory. +3. The current working directory. The valid types for plugins are: -- `builder` - Plugins responsible for building images for a specific platform. +- `builder` - Plugins responsible for building images for a specific platform. -- `command` - A CLI sub-command for `packer`. +- `command` - A CLI sub-command for `packer`. -- `post-processor` - A post-processor responsible for taking an artifact from a - builder and turning it into something else. +- `post-processor` - A post-processor responsible for taking an artifact from + a builder and turning it into something else. -- `provisioner` - A provisioner to install software on images created by - a builder. +- `provisioner` - A provisioner to install software on images created by + a builder. diff --git a/website/source/docs/extend/post-processor.html.markdown b/website/source/docs/extend/post-processor.html.markdown index 1120bc31d..9067e19d8 100644 --- a/website/source/docs/extend/post-processor.html.markdown +++ b/website/source/docs/extend/post-processor.html.markdown @@ -79,11 +79,11 @@ creating a new artifact with a single file: the compressed archive. The result signature of this method is `(Artifact, bool, error)`. Each return value is explained below: -- `Artifact` - The newly created artifact if no errors occurred. -- `bool` - If true, the input artifact will forcefully be kept. By default, - Packer typically deletes all input artifacts, since the user doesn't generally - want intermediary artifacts. However, some post-processors depend on the - previous artifact existing. If this is `true`, it forces packer to keep the - artifact around. -- `error` - Non-nil if there was an error in any way. If this is the case, the - other two return values are ignored. +- `Artifact` - The newly created artifact if no errors occurred. +- `bool` - If true, the input artifact will forcefully be kept. By default, + Packer typically deletes all input artifacts, since the user doesn't + generally want intermediary artifacts. However, some post-processors depend + on the previous artifact existing. If this is `true`, it forces packer to + keep the artifact around. +- `error` - Non-nil if there was an error in any way. If this is the case, the + other two return values are ignored. diff --git a/website/source/docs/machine-readable/command-build.html.markdown b/website/source/docs/machine-readable/command-build.html.markdown index 7472b7bfc..7b7b27993 100644 --- a/website/source/docs/machine-readable/command-build.html.markdown +++ b/website/source/docs/machine-readable/command-build.html.markdown @@ -12,8 +12,10 @@ These are the machine-readable types that exist as part of the output of `packer build`.
    -
    artifact (>= 2)
    -
    +
    +artifact (>= 2) +
    +

    Information about an artifact of the targeted item. This is a fairly complex (but uniform!) machine-readable type that contains @@ -37,10 +39,12 @@ These are the machine-readable types that exist as part of the output of data points related to the subtype. The exact count and meaning of this subtypes comes from the subtype documentation.

    -
    -
    artifact-count (1)
    -
    +
    +
    +artifact-count (1) +
    +

    The number of artifacts associated with the given target. This will always be outputted _before_ any other artifact information, @@ -51,10 +55,12 @@ These are the machine-readable types that exist as part of the output of Data 1: count - The number of artifacts as a base 10 integer.

    -
    -
    artifact subtype: builder-id (1)
    -
    +
    +
    +artifact subtype: builder-id (1) +
    +

    The unique ID of the builder that created this artifact.

    @@ -62,19 +68,23 @@ These are the machine-readable types that exist as part of the output of

    Data 1: id - The unique ID of the builder.

    -
    -
    artifact subtype: end (0)
    -
    +
    +
    +artifact subtype: end (0) +
    +

    The last machine-readable output line outputted for an artifact. This is a sentinel value so you know that no more data related to the targetted artifact will be outputted.

    -
    -
    artifact subtype: file (2)
    -
    +
    +
    +artifact subtype: file (2) +
    +

    A single file associated with the artifact. There are 0 to "files-count" of these entries to describe every file that is @@ -89,10 +99,12 @@ These are the machine-readable types that exist as part of the output of

    Data 2: filename - The filename.

    -
    -
    artifact subtype: files-count (1)
    -
    +
    +
    +artifact subtype: files-count (1) +
    +

    The number of files associated with this artifact. Not all artifacts have files associated with it. @@ -101,10 +113,12 @@ These are the machine-readable types that exist as part of the output of

    Data 1: count - The number of files.

    -
    -
    artifact subtype: id (1)
    -
    +
    +
    +artifact subtype: id (1) +
    +

    The ID (if any) of the artifact that was built. Not all artifacts have associated IDs. For example, AMIs built have IDs associated @@ -115,18 +129,22 @@ These are the machine-readable types that exist as part of the output of

    Data 1: id - The ID of the artifact.

    -
    -
    artifact subtype: nil (0)
    -
    +
    +
    +artifact subtype: nil (0) +
    +

    If present, this means that the artifact was nil, or that the targeted build completed successfully but no artifact was created.

    -
    -
    artifact subtype: string (1)
    -
    +
    +
    +artifact subtype: string (1) +
    +

    The human-readable string description of the artifact provided by the artifact itself. @@ -135,10 +153,12 @@ These are the machine-readable types that exist as part of the output of

    Data 1: string - The string output for the artifact.

    -
    -
    error-count (1)
    -
    +
    +
    +error-count (1) +
    +

    The number of errors that occurred during the build. This will always be outputted before any errors so you know how many are coming. @@ -148,10 +168,12 @@ These are the machine-readable types that exist as part of the output of Data 1: count - The number of build errors as a base 10 integer.

    -
    -
    error (1)
    -
    +
    +
    +error (1) +
    +

    A build error that occurred. The target of this output will be the build that had the error. @@ -160,6 +182,6 @@ These are the machine-readable types that exist as part of the output of

    Data 1: error - The error message as a string.

    -
    +
    diff --git a/website/source/docs/machine-readable/command-inspect.html.markdown b/website/source/docs/machine-readable/command-inspect.html.markdown index 4a5d68876..a75b892f3 100644 --- a/website/source/docs/machine-readable/command-inspect.html.markdown +++ b/website/source/docs/machine-readable/command-inspect.html.markdown @@ -12,8 +12,10 @@ These are the machine-readable types that exist as part of the output of `packer inspect`.
    -
    template-variable (3)
    -
    +
    +template-variable (3) +
    +

    A user variable defined within the template. @@ -32,10 +34,12 @@ These are the machine-readable types that exist as part of the output of Data 3: required - If non-zero, then this variable is required.

    -
    -
    template-builder (2)
    -
    +
    +
    +template-builder (2) +
    +

    A builder defined within the template

    @@ -48,10 +52,12 @@ These are the machine-readable types that exist as part of the output of generally be the same as the name unless you explicitly override the name.

    -
    -
    template-provisioner (1)
    -
    +
    +
    +template-provisioner (1) +
    +

    A provisioner defined within the template. Multiple of these may exist. If so, they are outputted in the order they would run. @@ -60,6 +66,6 @@ These are the machine-readable types that exist as part of the output of

    Data 1: name - The name/type of the provisioner.

    -
    +
    diff --git a/website/source/docs/machine-readable/command-version.html.markdown b/website/source/docs/machine-readable/command-version.html.markdown index 8b32b2540..4d7be6d23 100644 --- a/website/source/docs/machine-readable/command-version.html.markdown +++ b/website/source/docs/machine-readable/command-version.html.markdown @@ -12,8 +12,10 @@ These are the machine-readable types that exist as part of the output of `packer version`.
    -
    version (1)
    -
    +
    +version (1) +
    +

    The version number of Packer running.

    @@ -21,19 +23,23 @@ These are the machine-readable types that exist as part of the output of only including the major, minor, and patch versions. Example: "0.2.4".

    -
    -
    version-commit (1)
    -
    +
    +
    +version-commit (1) +
    +

    The SHA1 of the Git commit that built this version of Packer.

    Data 1: commit SHA1 - The SHA1 of the commit.

    -
    -
    version-prerelease (1)
    -
    +
    +
    +version-prerelease (1) +
    +

    The prerelease tag (if any) for the running version of Packer. This can be "beta", "dev", "alpha", etc. If this is empty, you can assume @@ -44,6 +50,6 @@ These are the machine-readable types that exist as part of the output of Data 1: prerelease name - The name of the prerelease tag.

    -
    +
    diff --git a/website/source/docs/machine-readable/general.html.markdown b/website/source/docs/machine-readable/general.html.markdown index b29ae053f..721406d7a 100644 --- a/website/source/docs/machine-readable/general.html.markdown +++ b/website/source/docs/machine-readable/general.html.markdown @@ -12,8 +12,10 @@ These are the machine-readable types that can appear in almost any machine-readable output and are provided by Packer core itself.
    -
    ui (2)
    -
    +
    +ui (2) +
    +

    Specifies the output and type of output that would've normally gone to the console if Packer were running in human-readable @@ -28,6 +30,6 @@ machine-readable output and are provided by Packer core itself. Data 2: output - The UI message that would have been outputted.

    -
    +
    diff --git a/website/source/docs/machine-readable/index.html.markdown b/website/source/docs/machine-readable/index.html.markdown index 161bda001..cde344947 100644 --- a/website/source/docs/machine-readable/index.html.markdown +++ b/website/source/docs/machine-readable/index.html.markdown @@ -24,12 +24,14 @@ Within each section, the format of the documentation is the following:
    -
    type-name (data-count)
    -
    +
    +type-name (data-count) +
    +

    Description of the type.

    Data 1: name - Description.

    -
    +
    diff --git a/website/source/docs/other/core-configuration.html.markdown b/website/source/docs/other/core-configuration.html.markdown index db1f75ab7..a112801e8 100644 --- a/website/source/docs/other/core-configuration.html.markdown +++ b/website/source/docs/other/core-configuration.html.markdown @@ -32,13 +32,13 @@ The format of the configuration file is basic JSON. Below is the list of all available configuration parameters for the core configuration file. None of these are required, since all have sane defaults. -- `plugin_min_port` and `plugin_max_port` (integer) - These are the minimum and - maximum ports that Packer uses for communication with plugins, since plugin - communication happens over TCP connections on your local host. By default - these are 10,000 and 25,000, respectively. Be sure to set a fairly wide range - here, since Packer can easily use over 25 ports on a single run. +- `plugin_min_port` and `plugin_max_port` (integer) - These are the minimum + and maximum ports that Packer uses for communication with plugins, since + plugin communication happens over TCP connections on your local host. By + default these are 10,000 and 25,000, respectively. Be sure to set a fairly + wide range here, since Packer can easily use over 25 ports on a single run. -- `builders`, `commands`, `post-processors`, and `provisioners` are objects that - are used to install plugins. The details of how exactly these are set is - covered in more detail in the [installing plugins documentation - page](/docs/extend/plugins.html). +- `builders`, `commands`, `post-processors`, and `provisioners` are objects + that are used to install plugins. The details of how exactly these are set + is covered in more detail in the [installing plugins documentation + page](/docs/extend/plugins.html). diff --git a/website/source/docs/other/environmental-variables.html.markdown b/website/source/docs/other/environmental-variables.html.markdown index 7d455c708..8827ea5d9 100644 --- a/website/source/docs/other/environmental-variables.html.markdown +++ b/website/source/docs/other/environmental-variables.html.markdown @@ -9,28 +9,28 @@ page_title: Environmental Variables for Packer Packer uses a variety of environmental variables. A listing and description of each can be found below: -- `PACKER_CACHE_DIR` - The location of the packer cache. +- `PACKER_CACHE_DIR` - The location of the packer cache. -- `PACKER_CONFIG` - The location of the core configuration file. The format of - the configuration file is basic JSON. See the [core configuration - page](/docs/other/core-configuration.html). +- `PACKER_CONFIG` - The location of the core configuration file. The format of + the configuration file is basic JSON. See the [core configuration + page](/docs/other/core-configuration.html). -- `PACKER_LOG` - Setting this to any value will enable the logger. See the - [debugging page](/docs/other/debugging.html). +- `PACKER_LOG` - Setting this to any value will enable the logger. See the + [debugging page](/docs/other/debugging.html). -- `PACKER_LOG_PATH` - The location of the log file. Note: `PACKER_LOG` must be - set for any logging to occur. See the [debugging - page](/docs/other/debugging.html). +- `PACKER_LOG_PATH` - The location of the log file. Note: `PACKER_LOG` must be + set for any logging to occur. See the [debugging + page](/docs/other/debugging.html). -- `PACKER_NO_COLOR` - Setting this to any value will disable color in - the terminal. +- `PACKER_NO_COLOR` - Setting this to any value will disable color in + the terminal. -- `PACKER_PLUGIN_MAX_PORT` - The maximum port that Packer uses for communication - with plugins, since plugin communication happens over TCP connections on your - local host. The default is 25,000. See the [core configuration - page](/docs/other/core-configuration.html). +- `PACKER_PLUGIN_MAX_PORT` - The maximum port that Packer uses for + communication with plugins, since plugin communication happens over TCP + connections on your local host. The default is 25,000. See the [core + configuration page](/docs/other/core-configuration.html). -- `PACKER_PLUGIN_MIN_PORT` - The minimum port that Packer uses for communication - with plugins, since plugin communication happens over TCP connections on your - local host. The default is 10,000. See the [core configuration - page](/docs/other/core-configuration.html). +- `PACKER_PLUGIN_MIN_PORT` - The minimum port that Packer uses for + communication with plugins, since plugin communication happens over TCP + connections on your local host. The default is 10,000. See the [core + configuration page](/docs/other/core-configuration.html). diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 18211c313..4f2cb3640 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -25,14 +25,14 @@ location in Atlas. Here is an example workflow: -1. Packer builds an AMI with the [Amazon AMI - builder](/docs/builders/amazon.html) -2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. - The `atlas` post-processor is configured with the name of the AMI, for example - `hashicorp/foobar`, to create the artifact in Atlas or update the version if - the artifact already exists -3. The new version is ready and available to be used in deployments with a tool - like [Terraform](https://terraform.io) +1. Packer builds an AMI with the [Amazon AMI + builder](/docs/builders/amazon.html) +2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. + The `atlas` post-processor is configured with the name of the AMI, for + example `hashicorp/foobar`, to create the artifact in Atlas or update the + version if the artifact already exists +3. The new version is ready and available to be used in deployments with a tool + like [Terraform](https://terraform.io) ## Configuration @@ -40,32 +40,33 @@ The configuration allows you to specify and access the artifact in Atlas. ### Required: -- `token` (string) - Your access token for the Atlas API. This can be generated - on your [tokens page](https://atlas.hashicorp.com/settings/tokens). - Alternatively you can export your Atlas token as an environmental variable and - remove it from the configuration. +- `token` (string) - Your access token for the Atlas API. This can be + generated on your [tokens + page](https://atlas.hashicorp.com/settings/tokens). Alternatively you can + export your Atlas token as an environmental variable and remove it from + the configuration. -- `artifact` (string) - The shorthand tag for your artifact that maps to Atlas, - i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. You must - have access to the organization, hashicorp in this example, in order to add an - artifact to the organization in Atlas. +- `artifact` (string) - The shorthand tag for your artifact that maps to + Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. + You must have access to the organization, hashicorp in this example, in + order to add an artifact to the organization in Atlas. -- `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will - always be `amazon.ami`. This field must be defined because Atlas can host - other artifact types, such as Vagrant boxes. +- `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will + always be `amazon.ami`. This field must be defined because Atlas can host + other artifact types, such as Vagrant boxes. -> **Note:** If you want to upload Vagrant boxes to Atlas, use the [Atlas post-processor](/docs/post-processors/atlas.html). ### Optional: -- `atlas_url` (string) - Override the base URL for Atlas. This is useful if - you're using Atlas Enterprise in your own network. Defaults to - `https://atlas.hashicorp.com/api/v1`. +- `atlas_url` (string) - Override the base URL for Atlas. This is useful if + you're using Atlas Enterprise in your own network. Defaults to + `https://atlas.hashicorp.com/api/v1`. -- `metadata` (map) - Send metadata about the artifact. If the artifact type is - "vagrant.box", you must specify a "provider" metadata about what provider - to use. +- `metadata` (map) - Send metadata about the artifact. If the artifact type is + "vagrant.box", you must specify a "provider" metadata about what provider + to use. ### Example Configuration diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index 716e4e866..ad78a9315 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -20,25 +20,25 @@ VMware or VirtualBox) and compresses the artifact into a single archive. You must specify the output filename. The archive format is derived from the filename. -- `output` (string) - The path to save the compressed archive. The archive - format is inferred from the filename. E.g. `.tar.gz` will be a - gzipped tarball. `.zip` will be a zip file. If the extension can't be detected - packer defaults to `.tar.gz` behavior but will not change the filename. +- `output` (string) - The path to save the compressed archive. The archive + format is inferred from the filename. E.g. `.tar.gz` will be a + gzipped tarball. `.zip` will be a zip file. If the extension can't be + detected packer defaults to `.tar.gz` behavior but will not change + the filename. - If you are executing multiple builders in parallel you should make sure - `output` is unique for each one. For example - `packer_{{.BuildName}}_{{.Provider}}.zip`. +If you are executing multiple builders in parallel you should make sure `output` +is unique for each one. For example `packer_{{.BuildName}}_{{.Provider}}.zip`. ### Optional: If you want more control over how the archive is created you can specify the following settings: -- `compression_level` (integer) - Specify the compression level, for algorithms - that support it, from 1 through 9 inclusive. Typically higher compression - levels take longer but produce smaller files. Defaults to `6` +- `compression_level` (integer) - Specify the compression level, for + algorithms that support it, from 1 through 9 inclusive. Typically higher + compression levels take longer but produce smaller files. Defaults to `6` -- `keep_input_artifact` (boolean) - Keep source files; defaults to `false` +- `keep_input_artifact` (boolean) - Keep source files; defaults to `false` ### Supported Formats diff --git a/website/source/docs/post-processors/docker-import.html.markdown b/website/source/docs/post-processors/docker-import.html.markdown index 0c3855622..968705f4b 100644 --- a/website/source/docs/post-processors/docker-import.html.markdown +++ b/website/source/docs/post-processors/docker-import.html.markdown @@ -24,9 +24,9 @@ registry. The configuration for this post-processor is extremely simple. At least a repository is required. -- `repository` (string) - The repository of the imported image. +- `repository` (string) - The repository of the imported image. -- `tag` (string) - The tag for the imported image. By default this is not set. +- `tag` (string) - The tag for the imported image. By default this is not set. ## Example diff --git a/website/source/docs/post-processors/docker-push.html.markdown b/website/source/docs/post-processors/docker-push.html.markdown index 72793b735..9657e27b7 100644 --- a/website/source/docs/post-processors/docker-push.html.markdown +++ b/website/source/docs/post-processors/docker-push.html.markdown @@ -18,16 +18,16 @@ pushes it to a Docker registry. This post-processor has only optional configuration: -- `login` (boolean) - Defaults to false. If true, the post-processor will login - prior to pushing. +- `login` (boolean) - Defaults to false. If true, the post-processor will + login prior to pushing. -- `login_email` (string) - The email to use to authenticate to login. +- `login_email` (string) - The email to use to authenticate to login. -- `login_username` (string) - The username to use to authenticate to login. +- `login_username` (string) - The username to use to authenticate to login. -- `login_password` (string) - The password to use to authenticate to login. +- `login_password` (string) - The password to use to authenticate to login. -- `login_server` (string) - The server address to login to. +- `login_server` (string) - The server address to login to. -> **Note:** If you login using the credentials above, the post-processor will automatically log you out afterwards (just the server specified). diff --git a/website/source/docs/post-processors/docker-save.html.markdown b/website/source/docs/post-processors/docker-save.html.markdown index 8f758755c..27b9b7533 100644 --- a/website/source/docs/post-processors/docker-save.html.markdown +++ b/website/source/docs/post-processors/docker-save.html.markdown @@ -25,7 +25,7 @@ familiar with this and vice versa. The configuration for this post-processor is extremely simple. -- `path` (string) - The path to save the image. +- `path` (string) - The path to save the image. ## Example diff --git a/website/source/docs/post-processors/docker-tag.html.markdown b/website/source/docs/post-processors/docker-tag.html.markdown index 42c480676..ea9fccad1 100644 --- a/website/source/docs/post-processors/docker-tag.html.markdown +++ b/website/source/docs/post-processors/docker-tag.html.markdown @@ -27,12 +27,12 @@ that this works with committed resources, rather than exported. The configuration for this post-processor is extremely simple. At least a repository is required. -- `repository` (string) - The repository of the image. +- `repository` (string) - The repository of the image. -- `tag` (string) - The tag for the image. By default this is not set. +- `tag` (string) - The tag for the image. By default this is not set. -- `force` (boolean) - If true, this post-processor forcibly tag the image even - if tag name is collided. Default to `false`. +- `force` (boolean) - If true, this post-processor forcibly tag the image even + if tag name is collided. Default to `false`. ## Example diff --git a/website/source/docs/post-processors/vagrant-cloud.html.markdown b/website/source/docs/post-processors/vagrant-cloud.html.markdown index 4891797e8..237684aa1 100644 --- a/website/source/docs/post-processors/vagrant-cloud.html.markdown +++ b/website/source/docs/post-processors/vagrant-cloud.html.markdown @@ -36,16 +36,16 @@ and deliver them to your team in some fashion. Here is an example workflow: -1. You use Packer to build a Vagrant Box for the `virtualbox` provider -2. The `vagrant-cloud` post-processor is configured to point to the box - `hashicorp/foobar` on Vagrant Cloud via the `box_tag` configuration -3. The post-processor receives the box from the `vagrant` post-processor -4. It then creates the configured version, or verifies the existence of it, on - Vagrant Cloud -5. A provider matching the name of the Vagrant provider is then created -6. The box is uploaded to Vagrant Cloud -7. The upload is verified -8. The version is released and available to users of the box +1. You use Packer to build a Vagrant Box for the `virtualbox` provider +2. The `vagrant-cloud` post-processor is configured to point to the box + `hashicorp/foobar` on Vagrant Cloud via the `box_tag` configuration +3. The post-processor receives the box from the `vagrant` post-processor +4. It then creates the configured version, or verifies the existence of it, on + Vagrant Cloud +5. A provider matching the name of the Vagrant provider is then created +6. The box is uploaded to Vagrant Cloud +7. The upload is verified +8. The version is released and available to users of the box ## Configuration @@ -54,35 +54,35 @@ on Vagrant Cloud, as well as authentication and version information. ### Required: -- `access_token` (string) - Your access token for the Vagrant Cloud API. This - can be generated on your [tokens - page](https://vagrantcloud.com/account/tokens). +- `access_token` (string) - Your access token for the Vagrant Cloud API. This + can be generated on your [tokens + page](https://vagrantcloud.com/account/tokens). -- `box_tag` (string) - The shorthand tag for your box that maps to Vagrant - Cloud, i.e `hashicorp/precise64` for `vagrantcloud.com/hashicorp/precise64` +- `box_tag` (string) - The shorthand tag for your box that maps to Vagrant + Cloud, i.e `hashicorp/precise64` for `vagrantcloud.com/hashicorp/precise64` -- `version` (string) - The version number, typically incrementing a - previous version. The version string is validated based on [Semantic - Versioning](http://semver.org/). The string must match a pattern that could be - semver, and doesn't validate that the version comes after your - previous versions. +- `version` (string) - The version number, typically incrementing a + previous version. The version string is validated based on [Semantic + Versioning](http://semver.org/). The string must match a pattern that could + be semver, and doesn't validate that the version comes after your + previous versions. ### Optional: -- `no_release` (string) - If set to true, does not release the version on - Vagrant Cloud, making it active. You can manually release the version via the - API or Web UI. Defaults to false. +- `no_release` (string) - If set to true, does not release the version on + Vagrant Cloud, making it active. You can manually release the version via + the API or Web UI. Defaults to false. -- `vagrant_cloud_url` (string) - Override the base URL for Vagrant Cloud. This - is useful if you're using Vagrant Private Cloud in your own network. Defaults - to `https://vagrantcloud.com/api/v1` +- `vagrant_cloud_url` (string) - Override the base URL for Vagrant Cloud. This + is useful if you're using Vagrant Private Cloud in your own network. + Defaults to `https://vagrantcloud.com/api/v1` -- `version_description` (string) - Optionally markdown text used as a - full-length and in-depth description of the version, typically for denoting - changes introduced +- `version_description` (string) - Optionally markdown text used as a + full-length and in-depth description of the version, typically for denoting + changes introduced -- `box_download_url` (string) - Optional URL for a self-hosted box. If this is - set the box will not be uploaded to the Vagrant Cloud. +- `box_download_url` (string) - Optional URL for a self-hosted box. If this is + set the box will not be uploaded to the Vagrant Cloud. ## Use with Vagrant Post-Processor diff --git a/website/source/docs/post-processors/vagrant.html.markdown b/website/source/docs/post-processors/vagrant.html.markdown index da1b8daa9..3e55e2549 100644 --- a/website/source/docs/post-processors/vagrant.html.markdown +++ b/website/source/docs/post-processors/vagrant.html.markdown @@ -29,13 +29,13 @@ certain builders into proper boxes for their respective providers. Currently, the Vagrant post-processor can create boxes for the following providers. -- AWS -- DigitalOcean -- Hyper-V -- Parallels -- QEMU -- VirtualBox -- VMware +- AWS +- DigitalOcean +- Hyper-V +- Parallels +- QEMU +- VirtualBox +- VMware -> **Support for additional providers** is planned. If the Vagrant post-processor doesn't support creating boxes for a provider you care about, @@ -51,28 +51,28 @@ However, if you want to configure things a bit more, the post-processor does expose some configuration options. The available options are listed below, with more details about certain options in following sections. -- `compression_level` (integer) - An integer representing the compression level - to use when creating the Vagrant box. Valid values range from 0 to 9, with 0 - being no compression and 9 being the best compression. By default, compression - is enabled at level 6. +- `compression_level` (integer) - An integer representing the compression + level to use when creating the Vagrant box. Valid values range from 0 to 9, + with 0 being no compression and 9 being the best compression. By default, + compression is enabled at level 6. -- `include` (array of strings) - Paths to files to include in the Vagrant box. - These files will each be copied into the top level directory of the Vagrant - box (regardless of their paths). They can then be used from the Vagrantfile. +- `include` (array of strings) - Paths to files to include in the Vagrant box. + These files will each be copied into the top level directory of the Vagrant + box (regardless of their paths). They can then be used from the Vagrantfile. -- `keep_input_artifact` (boolean) - If set to true, do not delete the - `output_directory` on a successful build. Defaults to false. +- `keep_input_artifact` (boolean) - If set to true, do not delete the + `output_directory` on a successful build. Defaults to false. -- `output` (string) - The full path to the box file that will be created by - this post-processor. This is a [configuration - template](/docs/templates/configuration-templates.html). The variable - `Provider` is replaced by the Vagrant provider the box is for. The variable - `ArtifactId` is replaced by the ID of the input artifact. The variable - `BuildName` is replaced with the name of the build. By default, the value of - this config is `packer_{{.BuildName}}_{{.Provider}}.box`. +- `output` (string) - The full path to the box file that will be created by + this post-processor. This is a [configuration + template](/docs/templates/configuration-templates.html). The variable + `Provider` is replaced by the Vagrant provider the box is for. The variable + `ArtifactId` is replaced by the ID of the input artifact. The variable + `BuildName` is replaced with the name of the build. By default, the value of + this config is `packer_{{.BuildName}}_{{.Provider}}.box`. -- `vagrantfile_template` (string) - Path to a template to use for the - Vagrantfile that is packaged with the box. +- `vagrantfile_template` (string) - Path to a template to use for the + Vagrantfile that is packaged with the box. ## Provider-Specific Overrides diff --git a/website/source/docs/post-processors/vsphere.html.markdown b/website/source/docs/post-processors/vsphere.html.markdown index f0fd9588e..300155773 100644 --- a/website/source/docs/post-processors/vsphere.html.markdown +++ b/website/source/docs/post-processors/vsphere.html.markdown @@ -21,35 +21,36 @@ each category, the available configuration keys are alphabetized. Required: -- `cluster` (string) - The cluster to upload the VM to. +- `cluster` (string) - The cluster to upload the VM to. -- `datacenter` (string) - The name of the datacenter within vSphere to add the - VM to. +- `datacenter` (string) - The name of the datacenter within vSphere to add the + VM to. -- `datastore` (string) - The name of the datastore to store this VM. This is - *not required* if `resource_pool` is specified. +- `datastore` (string) - The name of the datastore to store this VM. This is + *not required* if `resource_pool` is specified. -- `host` (string) - The vSphere host that will be contacted to perform the - VM upload. +- `host` (string) - The vSphere host that will be contacted to perform the + VM upload. -- `password` (string) - Password to use to authenticate to the vSphere endpoint. +- `password` (string) - Password to use to authenticate to the + vSphere endpoint. -- `resource_pool` (string) - The resource pool to upload the VM to. This is *not - required*. +- `resource_pool` (string) - The resource pool to upload the VM to. This is + *not required*. -- `username` (string) - The username to use to authenticate to the - vSphere endpoint. +- `username` (string) - The username to use to authenticate to the + vSphere endpoint. -- `vm_name` (string) - The name of the VM once it is uploaded. +- `vm_name` (string) - The name of the VM once it is uploaded. Optional: -- `disk_mode` (string) - Target disk format. See `ovftool` manual for - available options. By default, "thick" will be used. +- `disk_mode` (string) - Target disk format. See `ovftool` manual for + available options. By default, "thick" will be used. -- `insecure` (boolean) - Whether or not the connection to vSphere can be done - over an insecure connection. By default this is false. +- `insecure` (boolean) - Whether or not the connection to vSphere can be done + over an insecure connection. By default this is false. -- `vm_folder` (string) - The folder within the datastore to store the VM. +- `vm_folder` (string) - The folder within the datastore to store the VM. -- `vm_network` (string) - The name of the VM network this VM will be added to. +- `vm_network` (string) - The name of the VM network this VM will be added to. diff --git a/website/source/docs/provisioners/ansible-local.html.markdown b/website/source/docs/provisioners/ansible-local.html.markdown index 5682043c9..7fd084c0a 100644 --- a/website/source/docs/provisioners/ansible-local.html.markdown +++ b/website/source/docs/provisioners/ansible-local.html.markdown @@ -35,83 +35,70 @@ The reference of available configuration options is listed below. Required: -- `playbook_file` (string) - The playbook file to be executed by ansible. This - file must exist on your local system and will be uploaded to the - remote machine. +- `playbook_file` (string) - The playbook file to be executed by ansible. This + file must exist on your local system and will be uploaded to the + remote machine. Optional: -- `command` (string) - The command to invoke ansible. Defaults - to "ansible-playbook". +- `command` (string) - The command to invoke ansible. Defaults + to "ansible-playbook". -- `extra_arguments` (array of strings) - An array of extra arguments to pass to - the ansible command. By default, this is empty. +- `extra_arguments` (array of strings) - An array of extra arguments to pass + to the ansible command. By default, this is empty. -- `inventory_groups` (string) - A comma-separated list of groups to which packer - will assign the host `127.0.0.1`. A value of `my_group_1,my_group_2` will - generate an Ansible inventory like: +- `inventory_groups` (string) - A comma-separated list of groups to which + packer will assign the host `127.0.0.1`. A value of `my_group_1,my_group_2` + will generate an Ansible inventory like: - ``` {.text} - [my_group_1] - 127.0.0.1 - [my_group_2] - 127.0.0.1 - ``` +`{.text} [my_group_1] 127.0.0.1 [my_group_2] 127.0.0.1` -- `inventory_file` (string) - The inventory file to be used by ansible. This - file must exist on your local system and will be uploaded to the - remote machine. +- `inventory_file` (string) - The inventory file to be used by ansible. This + file must exist on your local system and will be uploaded to the + remote machine. - When using an inventory file, it's also required to `--limit` the hosts to the - specified host you're buiding. The `--limit` argument can be provided in the - `extra_arguments` option. +When using an inventory file, it's also required to `--limit` the hosts to the +specified host you're buiding. The `--limit` argument can be provided in the +`extra_arguments` option. - An example inventory file may look like: +An example inventory file may look like: - ``` {.text} - [chi-dbservers] - db-01 ansible_connection=local - db-02 ansible_connection=local +\`\`\` {.text} \[chi-dbservers\] db-01 ansible\_connection=local db-02 +ansible\_connection=local - [chi-appservers] - app-01 ansible_connection=local - app-02 ansible_connection=local +\[chi-appservers\] app-01 ansible\_connection=local app-02 +ansible\_connection=local - [chi:children] - chi-dbservers - chi-appservers +\[chi:children\] chi-dbservers chi-appservers - [dbservers:children] - chi-dbservers +\[dbservers:children\] chi-dbservers - [appservers:children] - chi-appservers - ``` +\[appservers:children\] chi-appservers \`\`\` -- `playbook_dir` (string) - a path to the complete ansible directory structure - on your local system to be copied to the remote machine as the - `staging_directory` before all other files and directories. +- `playbook_dir` (string) - a path to the complete ansible directory structure + on your local system to be copied to the remote machine as the + `staging_directory` before all other files and directories. -- `playbook_paths` (array of strings) - An array of paths to playbook files on - your local system. These will be uploaded to the remote machine under - `staging_directory`/playbooks. By default, this is empty. +- `playbook_paths` (array of strings) - An array of paths to playbook files on + your local system. These will be uploaded to the remote machine under + `staging_directory`/playbooks. By default, this is empty. -- `group_vars` (string) - a path to the directory containing ansible group - variables on your local system to be copied to the remote machine. By default, - this is empty. +- `group_vars` (string) - a path to the directory containing ansible group + variables on your local system to be copied to the remote machine. By + default, this is empty. -- `host_vars` (string) - a path to the directory containing ansible host - variables on your local system to be copied to the remote machine. By default, - this is empty. +- `host_vars` (string) - a path to the directory containing ansible host + variables on your local system to be copied to the remote machine. By + default, this is empty. -- `role_paths` (array of strings) - An array of paths to role directories on - your local system. These will be uploaded to the remote machine under - `staging_directory`/roles. By default, this is empty. +- `role_paths` (array of strings) - An array of paths to role directories on + your local system. These will be uploaded to the remote machine under + `staging_directory`/roles. By default, this is empty. -- `staging_directory` (string) - The directory where all the configuration of - Ansible by Packer will be placed. By default this - is "/tmp/packer-provisioner-ansible-local". This directory doesn't need to - exist but must have proper permissions so that the SSH user that Packer uses - is able to create directories and write into this folder. If the permissions - are not correct, use a shell provisioner prior to this to configure - it properly. +- `staging_directory` (string) - The directory where all the configuration of + Ansible by Packer will be placed. By default this + is "/tmp/packer-provisioner-ansible-local". This directory doesn't need to + exist but must have proper permissions so that the SSH user that Packer uses + is able to create directories and write into this folder. If the permissions + are not correct, use a shell provisioner prior to this to configure + it properly. diff --git a/website/source/docs/provisioners/chef-client.html.markdown b/website/source/docs/provisioners/chef-client.html.markdown index 81d097b7e..aca1a2717 100644 --- a/website/source/docs/provisioners/chef-client.html.markdown +++ b/website/source/docs/provisioners/chef-client.html.markdown @@ -40,70 +40,71 @@ is running must have knife on the path and configured globally, i.e, The reference of available configuration options is listed below. No configuration is actually required. -- `chef_environment` (string) - The name of the chef\_environment sent to the - Chef server. By default this is empty and will not use an environment. +- `chef_environment` (string) - The name of the chef\_environment sent to the + Chef server. By default this is empty and will not use an environment. -- `config_template` (string) - Path to a template that will be used for the Chef - configuration file. By default Packer only sets configuration it needs to - match the settings set in the provisioner configuration. If you need to set - configurations that the Packer provisioner doesn't support, then you should - use a custom configuration template. See the dedicated "Chef Configuration" - section below for more details. +- `config_template` (string) - Path to a template that will be used for the + Chef configuration file. By default Packer only sets configuration it needs + to match the settings set in the provisioner configuration. If you need to + set configurations that the Packer provisioner doesn't support, then you + should use a custom configuration template. See the dedicated "Chef + Configuration" section below for more details. -- `execute_command` (string) - The command used to execute Chef. This has - various [configuration template - variables](/docs/templates/configuration-templates.html) available. See below - for more information. +- `execute_command` (string) - The command used to execute Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See + below for more information. -- `install_command` (string) - The command used to install Chef. This has - various [configuration template - variables](/docs/templates/configuration-templates.html) available. See below - for more information. +- `install_command` (string) - The command used to install Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See + below for more information. -- `json` (object) - An arbitrary mapping of JSON that will be available as node - attributes while running Chef. +- `json` (object) - An arbitrary mapping of JSON that will be available as + node attributes while running Chef. -- `node_name` (string) - The name of the node to register with the Chef Server. - This is optional and by default is packer-{{uuid}}. +- `node_name` (string) - The name of the node to register with the + Chef Server. This is optional and by default is packer-{{uuid}}. -- `prevent_sudo` (boolean) - By default, the configured commands that are - executed to install and run Chef are executed with `sudo`. If this is true, - then the sudo will be omitted. +- `prevent_sudo` (boolean) - By default, the configured commands that are + executed to install and run Chef are executed with `sudo`. If this is true, + then the sudo will be omitted. -- `run_list` (array of strings) - The [run - list](http://docs.opscode.com/essentials_node_object_run_lists.html) for Chef. - By default this is empty, and will use the run list sent down by the - Chef Server. +- `run_list` (array of strings) - The [run + list](http://docs.opscode.com/essentials_node_object_run_lists.html) + for Chef. By default this is empty, and will use the run list sent down by + the Chef Server. -- `server_url` (string) - The URL to the Chef server. This is required. +- `server_url` (string) - The URL to the Chef server. This is required. -- `skip_clean_client` (boolean) - If true, Packer won't remove the client from - the Chef server after it is done running. By default, this is false. +- `skip_clean_client` (boolean) - If true, Packer won't remove the client from + the Chef server after it is done running. By default, this is false. -- `skip_clean_node` (boolean) - If true, Packer won't remove the node from the - Chef server after it is done running. By default, this is false. +- `skip_clean_node` (boolean) - If true, Packer won't remove the node from the + Chef server after it is done running. By default, this is false. -- `skip_install` (boolean) - If true, Chef will not automatically be installed - on the machine using the Opscode omnibus installers. +- `skip_install` (boolean) - If true, Chef will not automatically be installed + on the machine using the Opscode omnibus installers. -- `staging_directory` (string) - This is the directory where all the - configuration of Chef by Packer will be placed. By default this - is "/tmp/packer-chef-client". This directory doesn't need to exist but must - have proper permissions so that the SSH user that Packer uses is able to - create directories and write into this folder. If the permissions are not - correct, use a shell provisioner prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Chef by Packer will be placed. By default this + is "/tmp/packer-chef-client". This directory doesn't need to exist but must + have proper permissions so that the SSH user that Packer uses is able to + create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. -- `client_key` (string) - Path to client key. If not set, this defaults to a - file named client.pem in `staging_directory`. +- `client_key` (string) - Path to client key. If not set, this defaults to a + file named client.pem in `staging_directory`. -- `validation_client_name` (string) - Name of the validation client. If not set, - this won't be set in the configuration and the default that Chef uses will - be used. +- `validation_client_name` (string) - Name of the validation client. If not + set, this won't be set in the configuration and the default that Chef uses + will be used. -- `validation_key_path` (string) - Path to the validation key for communicating - with the Chef Server. This will be uploaded to the remote machine. If this is - NOT set, then it is your responsibility via other means (shell - provisioner, etc.) to get a validation key to where Chef expects it. +- `validation_key_path` (string) - Path to the validation key for + communicating with the Chef Server. This will be uploaded to the + remote machine. If this is NOT set, then it is your responsibility via other + means (shell provisioner, etc.) to get a validation key to where Chef + expects it. ## Chef Configuration @@ -135,9 +136,9 @@ This template is a [configuration template](/docs/templates/configuration-templates.html) and has a set of variables available to use: -- `NodeName` - The node name set in the configuration. -- `ServerUrl` - The URL of the Chef Server set in the configuration. -- `ValidationKeyPath` - Path to the validation key, if it is set. +- `NodeName` - The node name set in the configuration. +- `ServerUrl` - The URL of the Chef Server set in the configuration. +- `ValidationKeyPath` - Path to the validation key, if it is set. ## Execute Command @@ -155,10 +156,10 @@ This command can be customized using the `execute_command` configuration. As you can see from the default value above, the value of this configuration can contain various template variables, defined below: -- `ConfigPath` - The path to the Chef configuration file. file. -- `JsonPath` - The path to the JSON attributes file for the node. -- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the - value of the `prevent_sudo` configuration. +- `ConfigPath` - The path to the Chef configuration file. file. +- `JsonPath` - The path to the JSON attributes file for the node. +- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the + value of the `prevent_sudo` configuration. ## Install Command diff --git a/website/source/docs/provisioners/chef-solo.html.markdown b/website/source/docs/provisioners/chef-solo.html.markdown index 03b55c066..9534c32f1 100644 --- a/website/source/docs/provisioners/chef-solo.html.markdown +++ b/website/source/docs/provisioners/chef-solo.html.markdown @@ -36,71 +36,72 @@ directory relative to your working directory. The reference of available configuration options is listed below. No configuration is actually required, but at least `run_list` is recommended. -- `chef_environment` (string) - The name of the `chef_environment` sent to the - Chef server. By default this is empty and will not use an environment +- `chef_environment` (string) - The name of the `chef_environment` sent to the + Chef server. By default this is empty and will not use an environment -- `config_template` (string) - Path to a template that will be used for the Chef - configuration file. By default Packer only sets configuration it needs to - match the settings set in the provisioner configuration. If you need to set - configurations that the Packer provisioner doesn't support, then you should - use a custom configuration template. See the dedicated "Chef Configuration" - section below for more details. +- `config_template` (string) - Path to a template that will be used for the + Chef configuration file. By default Packer only sets configuration it needs + to match the settings set in the provisioner configuration. If you need to + set configurations that the Packer provisioner doesn't support, then you + should use a custom configuration template. See the dedicated "Chef + Configuration" section below for more details. -- `cookbook_paths` (array of strings) - This is an array of paths to "cookbooks" - directories on your local filesystem. These will be uploaded to the remote - machine in the directory specified by the `staging_directory`. By default, - this is empty. +- `cookbook_paths` (array of strings) - This is an array of paths to + "cookbooks" directories on your local filesystem. These will be uploaded to + the remote machine in the directory specified by the `staging_directory`. By + default, this is empty. -- `data_bags_path` (string) - The path to the "data\_bags" directory on your - local filesystem. These will be uploaded to the remote machine in the - directory specified by the `staging_directory`. By default, this is empty. +- `data_bags_path` (string) - The path to the "data\_bags" directory on your + local filesystem. These will be uploaded to the remote machine in the + directory specified by the `staging_directory`. By default, this is empty. -- `encrypted_data_bag_secret_path` (string) - The path to the file containing - the secret for encrypted data bags. By default, this is empty, so no secret - will be available. +- `encrypted_data_bag_secret_path` (string) - The path to the file containing + the secret for encrypted data bags. By default, this is empty, so no secret + will be available. -- `environments_path` (string) - The path to the "environments" directory on - your local filesystem. These will be uploaded to the remote machine in the - directory specified by the `staging_directory`. By default, this is empty. +- `environments_path` (string) - The path to the "environments" directory on + your local filesystem. These will be uploaded to the remote machine in the + directory specified by the `staging_directory`. By default, this is empty. -- `execute_command` (string) - The command used to execute Chef. This has - various [configuration template - variables](/docs/templates/configuration-templates.html) available. See below - for more information. +- `execute_command` (string) - The command used to execute Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See + below for more information. -- `install_command` (string) - The command used to install Chef. This has - various [configuration template - variables](/docs/templates/configuration-templates.html) available. See below - for more information. +- `install_command` (string) - The command used to install Chef. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See + below for more information. -- `json` (object) - An arbitrary mapping of JSON that will be available as node - attributes while running Chef. +- `json` (object) - An arbitrary mapping of JSON that will be available as + node attributes while running Chef. -- `prevent_sudo` (boolean) - By default, the configured commands that are - executed to install and run Chef are executed with `sudo`. If this is true, - then the sudo will be omitted. +- `prevent_sudo` (boolean) - By default, the configured commands that are + executed to install and run Chef are executed with `sudo`. If this is true, + then the sudo will be omitted. -- `remote_cookbook_paths` (array of strings) - A list of paths on the remote - machine where cookbooks will already exist. These may exist from a previous - provisioner or step. If specified, Chef will be configured to look for - cookbooks here. By default, this is empty. +- `remote_cookbook_paths` (array of strings) - A list of paths on the remote + machine where cookbooks will already exist. These may exist from a previous + provisioner or step. If specified, Chef will be configured to look for + cookbooks here. By default, this is empty. -- `roles_path` (string) - The path to the "roles" directory on your - local filesystem. These will be uploaded to the remote machine in the - directory specified by the `staging_directory`. By default, this is empty. +- `roles_path` (string) - The path to the "roles" directory on your + local filesystem. These will be uploaded to the remote machine in the + directory specified by the `staging_directory`. By default, this is empty. -- `run_list` (array of strings) - The [run - list](https://docs.chef.io/run_lists.html) for Chef. By default this is empty. +- `run_list` (array of strings) - The [run + list](https://docs.chef.io/run_lists.html) for Chef. By default this + is empty. -- `skip_install` (boolean) - If true, Chef will not automatically be installed - on the machine using the Chef omnibus installers. +- `skip_install` (boolean) - If true, Chef will not automatically be installed + on the machine using the Chef omnibus installers. -- `staging_directory` (string) - This is the directory where all the - configuration of Chef by Packer will be placed. By default this - is "/tmp/packer-chef-solo". This directory doesn't need to exist but must have - proper permissions so that the SSH user that Packer uses is able to create - directories and write into this folder. If the permissions are not correct, - use a shell provisioner prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Chef by Packer will be placed. By default this + is "/tmp/packer-chef-solo". This directory doesn't need to exist but must + have proper permissions so that the SSH user that Packer uses is able to + create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. ## Chef Configuration @@ -119,14 +120,14 @@ This template is a [configuration template](/docs/templates/configuration-templates.html) and has a set of variables available to use: -- `ChefEnvironment` - The current enabled environment. Only non-empty if the - environment path is set. -- `CookbookPaths` is the set of cookbook paths ready to embedded directly into a - Ruby array to configure Chef. -- `DataBagsPath` is the path to the data bags folder. -- `EncryptedDataBagSecretPath` - The path to the encrypted data bag secret -- `EnvironmentsPath` - The path to the environments folder. -- `RolesPath` - The path to the roles folder. +- `ChefEnvironment` - The current enabled environment. Only non-empty if the + environment path is set. +- `CookbookPaths` is the set of cookbook paths ready to embedded directly into + a Ruby array to configure Chef. +- `DataBagsPath` is the path to the data bags folder. +- `EncryptedDataBagSecretPath` - The path to the encrypted data bag secret +- `EnvironmentsPath` - The path to the environments folder. +- `RolesPath` - The path to the roles folder. ## Execute Command @@ -144,10 +145,10 @@ This command can be customized using the `execute_command` configuration. As you can see from the default value above, the value of this configuration can contain various template variables, defined below: -- `ConfigPath` - The path to the Chef configuration file. file. -- `JsonPath` - The path to the JSON attributes file for the node. -- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the - value of the `prevent_sudo` configuration. +- `ConfigPath` - The path to the Chef configuration file. file. +- `JsonPath` - The path to the JSON attributes file for the node. +- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the + value of the `prevent_sudo` configuration. ## Install Command diff --git a/website/source/docs/provisioners/file.html.markdown b/website/source/docs/provisioners/file.html.markdown index 3439b4dd6..7799721a5 100644 --- a/website/source/docs/provisioners/file.html.markdown +++ b/website/source/docs/provisioners/file.html.markdown @@ -32,19 +32,19 @@ The file provisioner can upload both single files and complete directories. The available configuration options are listed below. All elements are required. -- `source` (string) - The path to a local file or directory to upload to - the machine. The path can be absolute or relative. If it is relative, it is - relative to the working directory when Packer is executed. If this is a - directory, the existence of a trailing slash is important. Read below on - uploading directories. +- `source` (string) - The path to a local file or directory to upload to + the machine. The path can be absolute or relative. If it is relative, it is + relative to the working directory when Packer is executed. If this is a + directory, the existence of a trailing slash is important. Read below on + uploading directories. -- `destination` (string) - The path where the file will be uploaded to in - the machine. This value must be a writable location and any parent directories - must already exist. +- `destination` (string) - The path where the file will be uploaded to in + the machine. This value must be a writable location and any parent + directories must already exist. -- `direction` (string) - The direction of the file transfer. This defaults to - "upload." If it is set to "download" then the file "source" in the machine wll - be downloaded locally to "destination" +- `direction` (string) - The direction of the file transfer. This defaults to + "upload." If it is set to "download" then the file "source" in the machine + wll be downloaded locally to "destination" ## Directory Uploads diff --git a/website/source/docs/provisioners/powershell.html.markdown b/website/source/docs/provisioners/powershell.html.markdown index ebc56ec4c..4cd862616 100644 --- a/website/source/docs/provisioners/powershell.html.markdown +++ b/website/source/docs/provisioners/powershell.html.markdown @@ -32,52 +32,53 @@ required element is either "inline" or "script". Every other option is optional. Exactly *one* of the following is required: -- `inline` (array of strings) - This is an array of commands to execute. The - commands are concatenated by newlines and turned into a single file, so they - are all executed within the same context. This allows you to change - directories in one command and use something in the directory in the next and - so on. Inline scripts are the easiest way to pull off simple tasks within - the machine. +- `inline` (array of strings) - This is an array of commands to execute. The + commands are concatenated by newlines and turned into a single file, so they + are all executed within the same context. This allows you to change + directories in one command and use something in the directory in the next + and so on. Inline scripts are the easiest way to pull off simple tasks + within the machine. -- `script` (string) - The path to a script to upload and execute in the machine. - This path can be absolute or relative. If it is relative, it is relative to - the working directory when Packer is executed. +- `script` (string) - The path to a script to upload and execute in + the machine. This path can be absolute or relative. If it is relative, it is + relative to the working directory when Packer is executed. -- `scripts` (array of strings) - An array of scripts to execute. The scripts - will be uploaded and executed in the order specified. Each script is executed - in isolation, so state such as variables from one script won't carry on to - the next. +- `scripts` (array of strings) - An array of scripts to execute. The scripts + will be uploaded and executed in the order specified. Each script is + executed in isolation, so state such as variables from one script won't + carry on to the next. Optional parameters: -- `binary` (boolean) - If true, specifies that the script(s) are binary files, - and Packer should therefore not convert Windows line endings to Unix line - endings (if there are any). By default this is false. +- `binary` (boolean) - If true, specifies that the script(s) are binary files, + and Packer should therefore not convert Windows line endings to Unix line + endings (if there are any). By default this is false. -- `environment_vars` (array of strings) - An array of key/value pairs to inject - prior to the execute\_command. The format should be `key=value`. Packer - injects some environmental variables by default into the environment, as well, - which are covered in the section below. +- `environment_vars` (array of strings) - An array of key/value pairs to + inject prior to the execute\_command. The format should be `key=value`. + Packer injects some environmental variables by default into the environment, + as well, which are covered in the section below. -- `execute_command` (string) - The command to use to execute the script. By - default this is `powershell "& { {{.Vars}}{{.Path}}; exit $LastExitCode}"`. - The value of this is treated as [configuration - template](/docs/templates/configuration-templates.html). There are two - available variables: `Path`, which is the path to the script to run, and - `Vars`, which is the list of `environment_vars`, if configured. +- `execute_command` (string) - The command to use to execute the script. By + default this is `powershell "& { {{.Vars}}{{.Path}}; exit $LastExitCode}"`. + The value of this is treated as [configuration + template](/docs/templates/configuration-templates.html). There are two + available variables: `Path`, which is the path to the script to run, and + `Vars`, which is the list of `environment_vars`, if configured. -- `elevated_user` and `elevated_password` (string) - If specified, the - PowerShell script will be run with elevated privileges using the given - Windows user. +- `elevated_user` and `elevated_password` (string) - If specified, the + PowerShell script will be run with elevated privileges using the given + Windows user. -- `remote_path` (string) - The path where the script will be uploaded to in - the machine. This defaults to "/tmp/script.sh". This value must be a writable - location and any parent directories must already exist. +- `remote_path` (string) - The path where the script will be uploaded to in + the machine. This defaults to "/tmp/script.sh". This value must be a + writable location and any parent directories must already exist. -- `start_retry_timeout` (string) - The amount of time to attempt to *start* the - remote process. By default this is "5m" or 5 minutes. This setting exists in - order to deal with times when SSH may restart, such as a system reboot. Set - this to a higher value if reboots take a longer amount of time. +- `start_retry_timeout` (string) - The amount of time to attempt to *start* + the remote process. By default this is "5m" or 5 minutes. This setting + exists in order to deal with times when SSH may restart, such as a + system reboot. Set this to a higher value if reboots take a longer amount + of time. -- `valid_exit_codes` (list of ints) - Valid exit codes for the script. By - default this is just 0. +- `valid_exit_codes` (list of ints) - Valid exit codes for the script. By + default this is just 0. diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index ac5f4f628..7ef13265e 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -45,59 +45,58 @@ The reference of available configuration options is listed below. Required parameters: -- `manifest_file` (string) - This is either a path to a puppet manifest - (`.pp` file) *or* a directory containing multiple manifests that puppet will - apply (the ["main - manifest"](https://docs.puppetlabs.com/puppet/latest/reference/dirs_manifest.html)). - These file(s) must exist on your local system and will be uploaded to the - remote machine. +- `manifest_file` (string) - This is either a path to a puppet manifest + (`.pp` file) *or* a directory containing multiple manifests that puppet will + apply (the ["main + manifest"](https://docs.puppetlabs.com/puppet/latest/reference/dirs_manifest.html)). + These file(s) must exist on your local system and will be uploaded to the + remote machine. Optional parameters: -- `execute_command` (string) - The command used to execute Puppet. This has - various [configuration template - variables](/docs/templates/configuration-templates.html) available. See below - for more information. +- `execute_command` (string) - The command used to execute Puppet. This has + various [configuration template + variables](/docs/templates/configuration-templates.html) available. See + below for more information. -- `facter` (object of key/value strings) - Additional - [facts](http://puppetlabs.com/puppet/related-projects/facter) to make - available when Puppet is running. +- `facter` (object of key/value strings) - Additional + [facts](http://puppetlabs.com/puppet/related-projects/facter) to make + available when Puppet is running. -- `hiera_config_path` (string) - The path to a local file with hiera - configuration to be uploaded to the remote machine. Hiera data directories - must be uploaded using the file provisioner separately. +- `hiera_config_path` (string) - The path to a local file with hiera + configuration to be uploaded to the remote machine. Hiera data directories + must be uploaded using the file provisioner separately. -- `manifest_dir` (string) - The path to a local directory with manifests to be - uploaded to the remote machine. This is useful if your main manifest file - uses imports. This directory doesn't necessarily contain the `manifest_file`. - It is a separate directory that will be set as the "manifestdir" setting - on Puppet. +- `manifest_dir` (string) - The path to a local directory with manifests to be + uploaded to the remote machine. This is useful if your main manifest file + uses imports. This directory doesn't necessarily contain the + `manifest_file`. It is a separate directory that will be set as the + "manifestdir" setting on Puppet. - \~> `manifest_dir` is passed to `puppet apply` as the - `--manifestdir` option. This option was deprecated in puppet 3.6, and removed - in puppet 4.0. If you have multiple manifests you should use - `manifest_file` instead. +\~> `manifest_dir` is passed to `puppet apply` as the `--manifestdir` option. +This option was deprecated in puppet 3.6, and removed in puppet 4.0. If you have +multiple manifests you should use `manifest_file` instead. -- `module_paths` (array of strings) - This is an array of paths to module - directories on your local filesystem. These will be uploaded to the - remote machine. By default, this is empty. +- `module_paths` (array of strings) - This is an array of paths to module + directories on your local filesystem. These will be uploaded to the + remote machine. By default, this is empty. -- `prevent_sudo` (boolean) - By default, the configured commands that are - executed to run Puppet are executed with `sudo`. If this is true, then the - sudo will be omitted. +- `prevent_sudo` (boolean) - By default, the configured commands that are + executed to run Puppet are executed with `sudo`. If this is true, then the + sudo will be omitted. -- `staging_directory` (string) - This is the directory where all the - configuration of Puppet by Packer will be placed. By default this - is "/tmp/packer-puppet-masterless". This directory doesn't need to exist but - must have proper permissions so that the SSH user that Packer uses is able to - create directories and write into this folder. If the permissions are not - correct, use a shell provisioner prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Puppet by Packer will be placed. By default this + is "/tmp/packer-puppet-masterless". This directory doesn't need to exist but + must have proper permissions so that the SSH user that Packer uses is able + to create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. -- `working_directory` (string) - This is the directory from which the puppet - command will be run. When using hiera with a relative path, this option allows - to ensure that the paths are working properly. If not specified, defaults to - the value of specified `staging_directory` (or its default value if not - specified either). +- `working_directory` (string) - This is the directory from which the puppet + command will be run. When using hiera with a relative path, this option + allows to ensure that the paths are working properly. If not specified, + defaults to the value of specified `staging_directory` (or its default value + if not specified either). ## Execute Command @@ -119,15 +118,15 @@ This command can be customized using the `execute_command` configuration. As you can see from the default value above, the value of this configuration can contain various template variables, defined below: -- `WorkingDir` - The path from which Puppet will be executed. -- `FacterVars` - Shell-friendly string of environmental variables used to set - custom facts configured for this provisioner. -- `HieraConfigPath` - The path to a hiera configuration file. -- `ManifestFile` - The path on the remote machine to the manifest file for - Puppet to use. -- `ModulePath` - The paths to the module directories. -- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the - value of the `prevent_sudo` configuration. +- `WorkingDir` - The path from which Puppet will be executed. +- `FacterVars` - Shell-friendly string of environmental variables used to set + custom facts configured for this provisioner. +- `HieraConfigPath` - The path to a hiera configuration file. +- `ManifestFile` - The path on the remote machine to the manifest file for + Puppet to use. +- `ModulePath` - The paths to the module directories. +- `Sudo` - A boolean of whether to `sudo` the command or not, depending on the + value of the `prevent_sudo` configuration. ## Default Facts @@ -135,10 +134,10 @@ In addition to being able to specify custom Facter facts using the `facter` configuration, the provisioner automatically defines certain commonly useful facts: -- `packer_build_name` is set to the name of the build that Packer is running. - This is most useful when Packer is making multiple builds and you want to - distinguish them in your Hiera hierarchy. +- `packer_build_name` is set to the name of the build that Packer is running. + This is most useful when Packer is making multiple builds and you want to + distinguish them in your Hiera hierarchy. -- `packer_builder_type` is the type of the builder that was used to create the - machine that Puppet is running on. This is useful if you want to run only - certain parts of your Puppet code on systems built with certain builders. +- `packer_builder_type` is the type of the builder that was used to create the + machine that Puppet is running on. This is useful if you want to run only + certain parts of your Puppet code on systems built with certain builders. diff --git a/website/source/docs/provisioners/puppet-server.html.markdown b/website/source/docs/provisioners/puppet-server.html.markdown index 32bcadbe8..bf469956b 100644 --- a/website/source/docs/provisioners/puppet-server.html.markdown +++ b/website/source/docs/provisioners/puppet-server.html.markdown @@ -41,36 +41,36 @@ The reference of available configuration options is listed below. The provisioner takes various options. None are strictly required. They are listed below: -- `client_cert_path` (string) - Path to the client certificate for the node on - your disk. This defaults to nothing, in which case a client cert won't - be uploaded. +- `client_cert_path` (string) - Path to the client certificate for the node on + your disk. This defaults to nothing, in which case a client cert won't + be uploaded. -- `client_private_key_path` (string) - Path to the client private key for the - node on your disk. This defaults to nothing, in which case a client private - key won't be uploaded. +- `client_private_key_path` (string) - Path to the client private key for the + node on your disk. This defaults to nothing, in which case a client private + key won't be uploaded. -- `facter` (object of key/value strings) - Additional Facter facts to make - available to the Puppet run. +- `facter` (object of key/value strings) - Additional Facter facts to make + available to the Puppet run. -- `ignore_exit_codes` (boolean) - If true, Packer will never consider the - provisioner a failure. +- `ignore_exit_codes` (boolean) - If true, Packer will never consider the + provisioner a failure. -- `options` (string) - Additional command line options to pass to `puppet agent` - when Puppet is ran. +- `options` (string) - Additional command line options to pass to + `puppet agent` when Puppet is ran. -- `prevent_sudo` (boolean) - By default, the configured commands that are - executed to run Puppet are executed with `sudo`. If this is true, then the - sudo will be omitted. +- `prevent_sudo` (boolean) - By default, the configured commands that are + executed to run Puppet are executed with `sudo`. If this is true, then the + sudo will be omitted. -- `puppet_node` (string) - The name of the node. If this isn't set, the fully - qualified domain name will be used. +- `puppet_node` (string) - The name of the node. If this isn't set, the fully + qualified domain name will be used. -- `puppet_server` (string) - Hostname of the Puppet server. By default "puppet" - will be used. +- `puppet_server` (string) - Hostname of the Puppet server. By default + "puppet" will be used. -- `staging_directory` (string) - This is the directory where all the - configuration of Puppet by Packer will be placed. By default this - is "/tmp/packer-puppet-server". This directory doesn't need to exist but must - have proper permissions so that the SSH user that Packer uses is able to - create directories and write into this folder. If the permissions are not - correct, use a shell provisioner prior to this to configure it properly. +- `staging_directory` (string) - This is the directory where all the + configuration of Puppet by Packer will be placed. By default this + is "/tmp/packer-puppet-server". This directory doesn't need to exist but + must have proper permissions so that the SSH user that Packer uses is able + to create directories and write into this folder. If the permissions are not + correct, use a shell provisioner prior to this to configure it properly. diff --git a/website/source/docs/provisioners/salt-masterless.html.markdown b/website/source/docs/provisioners/salt-masterless.html.markdown index cc1ab1f7b..84171a071 100644 --- a/website/source/docs/provisioners/salt-masterless.html.markdown +++ b/website/source/docs/provisioners/salt-masterless.html.markdown @@ -31,28 +31,28 @@ required argument is the path to your local salt state tree. Optional: -- `bootstrap_args` (string) - Arguments to send to the bootstrap script. Usage - is somewhat documented on - [github](https://github.com/saltstack/salt-bootstrap), but the [script - itself](https://github.com/saltstack/salt-bootstrap/blob/develop/bootstrap-salt.sh) - has more detailed usage instructions. By default, no arguments are sent to - the script. +- `bootstrap_args` (string) - Arguments to send to the bootstrap script. Usage + is somewhat documented on + [github](https://github.com/saltstack/salt-bootstrap), but the [script + itself](https://github.com/saltstack/salt-bootstrap/blob/develop/bootstrap-salt.sh) + has more detailed usage instructions. By default, no arguments are sent to + the script. -- `local_pillar_roots` (string) - The path to your local [pillar - roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). - This will be uploaded to the `/srv/pillar` on the remote. +- `local_pillar_roots` (string) - The path to your local [pillar + roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). + This will be uploaded to the `/srv/pillar` on the remote. -- `local_state_tree` (string) - The path to your local [state - tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). - This will be uploaded to the `/srv/salt` on the remote. +- `local_state_tree` (string) - The path to your local [state + tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). + This will be uploaded to the `/srv/salt` on the remote. -- `minion_config` (string) - The path to your local [minion - config](http://docs.saltstack.com/topics/configuration.html). This will be - uploaded to the `/etc/salt` on the remote. +- `minion_config` (string) - The path to your local [minion + config](http://docs.saltstack.com/topics/configuration.html). This will be + uploaded to the `/etc/salt` on the remote. -- `skip_bootstrap` (boolean) - By default the salt provisioner runs [salt - bootstrap](https://github.com/saltstack/salt-bootstrap) to install salt. Set - this to true to skip this step. +- `skip_bootstrap` (boolean) - By default the salt provisioner runs [salt + bootstrap](https://github.com/saltstack/salt-bootstrap) to install salt. Set + this to true to skip this step. -- `temp_config_dir` (string) - Where your local state tree will be copied before - moving to the `/srv/salt` directory. Default is `/tmp/salt`. +- `temp_config_dir` (string) - Where your local state tree will be copied + before moving to the `/srv/salt` directory. Default is `/tmp/salt`. diff --git a/website/source/docs/provisioners/shell.html.markdown b/website/source/docs/provisioners/shell.html.markdown index 97015a847..9cd05ef12 100644 --- a/website/source/docs/provisioners/shell.html.markdown +++ b/website/source/docs/provisioners/shell.html.markdown @@ -37,55 +37,56 @@ required element is either "inline" or "script". Every other option is optional. Exactly *one* of the following is required: -- `inline` (array of strings) - This is an array of commands to execute. The - commands are concatenated by newlines and turned into a single file, so they - are all executed within the same context. This allows you to change - directories in one command and use something in the directory in the next and - so on. Inline scripts are the easiest way to pull off simple tasks within - the machine. +- `inline` (array of strings) - This is an array of commands to execute. The + commands are concatenated by newlines and turned into a single file, so they + are all executed within the same context. This allows you to change + directories in one command and use something in the directory in the next + and so on. Inline scripts are the easiest way to pull off simple tasks + within the machine. -- `script` (string) - The path to a script to upload and execute in the machine. - This path can be absolute or relative. If it is relative, it is relative to - the working directory when Packer is executed. +- `script` (string) - The path to a script to upload and execute in + the machine. This path can be absolute or relative. If it is relative, it is + relative to the working directory when Packer is executed. -- `scripts` (array of strings) - An array of scripts to execute. The scripts - will be uploaded and executed in the order specified. Each script is executed - in isolation, so state such as variables from one script won't carry on to - the next. +- `scripts` (array of strings) - An array of scripts to execute. The scripts + will be uploaded and executed in the order specified. Each script is + executed in isolation, so state such as variables from one script won't + carry on to the next. Optional parameters: -- `binary` (boolean) - If true, specifies that the script(s) are binary files, - and Packer should therefore not convert Windows line endings to Unix line - endings (if there are any). By default this is false. +- `binary` (boolean) - If true, specifies that the script(s) are binary files, + and Packer should therefore not convert Windows line endings to Unix line + endings (if there are any). By default this is false. -- `environment_vars` (array of strings) - An array of key/value pairs to inject - prior to the execute\_command. The format should be `key=value`. Packer - injects some environmental variables by default into the environment, as well, - which are covered in the section below. +- `environment_vars` (array of strings) - An array of key/value pairs to + inject prior to the execute\_command. The format should be `key=value`. + Packer injects some environmental variables by default into the environment, + as well, which are covered in the section below. -- `execute_command` (string) - The command to use to execute the script. By - default this is `chmod +x {{ .Path }}; {{ .Vars }} {{ .Path }}`. The value of - this is treated as [configuration - template](/docs/templates/configuration-templates.html). There are two - available variables: `Path`, which is the path to the script to run, and - `Vars`, which is the list of `environment_vars`, if configured. +- `execute_command` (string) - The command to use to execute the script. By + default this is `chmod +x {{ .Path }}; {{ .Vars }} {{ .Path }}`. The value + of this is treated as [configuration + template](/docs/templates/configuration-templates.html). There are two + available variables: `Path`, which is the path to the script to run, and + `Vars`, which is the list of `environment_vars`, if configured. -- `inline_shebang` (string) - The - [shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when - running commands specified by `inline`. By default, this is `/bin/sh -e`. If - you're not using `inline`, then this configuration has no effect. - **Important:** If you customize this, be sure to include something like the - `-e` flag, otherwise individual steps failing won't fail the provisioner. +- `inline_shebang` (string) - The + [shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when + running commands specified by `inline`. By default, this is `/bin/sh -e`. If + you're not using `inline`, then this configuration has no effect. + **Important:** If you customize this, be sure to include something like the + `-e` flag, otherwise individual steps failing won't fail the provisioner. -- `remote_path` (string) - The path where the script will be uploaded to in - the machine. This defaults to "/tmp/script.sh". This value must be a writable - location and any parent directories must already exist. +- `remote_path` (string) - The path where the script will be uploaded to in + the machine. This defaults to "/tmp/script.sh". This value must be a + writable location and any parent directories must already exist. -- `start_retry_timeout` (string) - The amount of time to attempt to *start* the - remote process. By default this is "5m" or 5 minutes. This setting exists in - order to deal with times when SSH may restart, such as a system reboot. Set - this to a higher value if reboots take a longer amount of time. +- `start_retry_timeout` (string) - The amount of time to attempt to *start* + the remote process. By default this is "5m" or 5 minutes. This setting + exists in order to deal with times when SSH may restart, such as a + system reboot. Set this to a higher value if reboots take a longer amount + of time. ## Execute Command Example @@ -128,13 +129,13 @@ In addition to being able to specify custom environmental variables using the `environment_vars` configuration, the provisioner automatically defines certain commonly useful environmental variables: -- `PACKER_BUILD_NAME` is set to the name of the build that Packer is running. - This is most useful when Packer is making multiple builds and you want to - distinguish them slightly from a common provisioning script. +- `PACKER_BUILD_NAME` is set to the name of the build that Packer is running. + This is most useful when Packer is making multiple builds and you want to + distinguish them slightly from a common provisioning script. -- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create the - machine that the script is running on. This is useful if you want to run only - certain parts of the script on systems built with certain builders. +- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create the + machine that the script is running on. This is useful if you want to run + only certain parts of the script on systems built with certain builders. ## Handling Reboots @@ -181,46 +182,41 @@ provisioner](/docs/provisioners/file.html) (more secure) or using `ssh-keyscan` to populate the file (less secure). An example of the latter accessing github would be: - { - "type": "shell", - "inline": [ - "sudo apt-get install -y git", - "ssh-keyscan github.com >> ~/.ssh/known_hosts", - "git clone git@github.com:exampleorg/myprivaterepo.git" - ] - } +{ "type": "shell", "inline": \[ "sudo apt-get install -y git", "ssh-keyscan +github.com >> \~/.ssh/known\_hosts", "git clone +git@github.com:exampleorg/myprivaterepo.git" \] } ## Troubleshooting *My shell script doesn't work correctly on Ubuntu* -- On Ubuntu, the `/bin/sh` shell is - [dash](http://en.wikipedia.org/wiki/Debian_Almquist_shell). If your script has - [bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell))-specific commands in - it, then put `#!/bin/bash` at the top of your script. Differences between dash - and bash can be found on the - [DashAsBinSh](https://wiki.ubuntu.com/DashAsBinSh) Ubuntu wiki page. +- On Ubuntu, the `/bin/sh` shell is + [dash](http://en.wikipedia.org/wiki/Debian_Almquist_shell). If your script + has [bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell))-specific commands + in it, then put `#!/bin/bash` at the top of your script. Differences between + dash and bash can be found on the + [DashAsBinSh](https://wiki.ubuntu.com/DashAsBinSh) Ubuntu wiki page. *My shell works when I login but fails with the shell provisioner* -- See the above tip. More than likely, your login shell is using `/bin/bash` - while the provisioner is using `/bin/sh`. +- See the above tip. More than likely, your login shell is using `/bin/bash` + while the provisioner is using `/bin/sh`. *My installs hang when using `apt-get` or `yum`* -- Make sure you add a `-y` to the command to prevent it from requiring user - input before proceeding. +- Make sure you add a `-y` to the command to prevent it from requiring user + input before proceeding. *How do I tell what my shell script is doing?* -- Adding a `-x` flag to the shebang at the top of the script (`#!/bin/sh -x`) - will echo the script statements as it is executing. +- Adding a `-x` flag to the shebang at the top of the script (`#!/bin/sh -x`) + will echo the script statements as it is executing. *My builds don't always work the same* -- Some distributions start the SSH daemon before other core services which can - create race conditions. Your first provisioner can tell the machine to wait - until it completely boots. +- Some distributions start the SSH daemon before other core services which can + create race conditions. Your first provisioner can tell the machine to wait + until it completely boots. ``` {.javascript} { diff --git a/website/source/docs/templates/configuration-templates.html.markdown b/website/source/docs/templates/configuration-templates.html.markdown index 9bc8f835e..c78f13956 100644 --- a/website/source/docs/templates/configuration-templates.html.markdown +++ b/website/source/docs/templates/configuration-templates.html.markdown @@ -57,17 +57,17 @@ While some configuration settings have local variables specific to only that configuration, a set of functions are available globally for use in *any string* in Packer templates. These are listed below for reference. -- `build_name` - The name of the build being run. -- `build_type` - The type of the builder being used currently. -- `isotime [FORMAT]` - UTC time, which can be - [formatted](http://golang.org/pkg/time/#example_Time_Format). See more - examples below. -- `lower` - Lowercases the string. -- `pwd` - The working directory while executing Packer. -- `template_dir` - The directory to the template for the build. -- `timestamp` - The current Unix timestamp in UTC. -- `uuid` - Returns a random UUID. -- `upper` - Uppercases the string. +- `build_name` - The name of the build being run. +- `build_type` - The type of the builder being used currently. +- `isotime [FORMAT]` - UTC time, which can be + [formatted](http://golang.org/pkg/time/#example_Time_Format). See more + examples below. +- `lower` - Lowercases the string. +- `pwd` - The working directory while executing Packer. +- `template_dir` - The directory to the template for the build. +- `timestamp` - The current Unix timestamp in UTC. +- `uuid` - Returns a random UUID. +- `upper` - Uppercases the string. ### isotime Format @@ -112,7 +112,8 @@ Timezone Numeric -- +- + 01 @@ -147,19 +148,24 @@ Monday (Mon) January (Jan) -- +- + -- +- + -- +- + -- +- + -- +- + MST @@ -205,6 +211,6 @@ Please note that double quote characters need escaping inside of templates: Specific to Amazon builders: -- `clean_ami_name` - AMI names can only contain certain characters. This - function will replace illegal characters with a '-" character. Example usage - since ":" is not a legal AMI name is: `{{isotime | clean_ami_name}}`. +- `clean_ami_name` - AMI names can only contain certain characters. This + function will replace illegal characters with a '-" character. Example usage + since ":" is not a legal AMI name is: `{{isotime | clean_ami_name}}`. diff --git a/website/source/docs/templates/introduction.html.markdown b/website/source/docs/templates/introduction.html.markdown index 1d67ea196..c48dc6c73 100644 --- a/website/source/docs/templates/introduction.html.markdown +++ b/website/source/docs/templates/introduction.html.markdown @@ -27,40 +27,41 @@ A template is a JSON object that has a set of keys configuring various components of Packer. The available keys within a template are listed below. Along with each key, it is noted whether it is required or not. -- `builders` (*required*) is an array of one or more objects that defines the - builders that will be used to create machine images for this template, and - configures each of those builders. For more information on how to define and - configure a builder, read the sub-section on [configuring builders in - templates](/docs/templates/builders.html). +- `builders` (*required*) is an array of one or more objects that defines the + builders that will be used to create machine images for this template, and + configures each of those builders. For more information on how to define and + configure a builder, read the sub-section on [configuring builders in + templates](/docs/templates/builders.html). -- `description` (optional) is a string providing a description of what the - template does. This output is used only in the [inspect - command](/docs/command-line/inspect.html). +- `description` (optional) is a string providing a description of what the + template does. This output is used only in the [inspect + command](/docs/command-line/inspect.html). -- `min_packer_version` (optional) is a string that has a minimum Packer version - that is required to parse the template. This can be used to ensure that proper - versions of Packer are used with the template. A max version can't be - specified because Packer retains backwards compatibility with `packer fix`. +- `min_packer_version` (optional) is a string that has a minimum Packer + version that is required to parse the template. This can be used to ensure + that proper versions of Packer are used with the template. A max version + can't be specified because Packer retains backwards compatibility with + `packer fix`. -- `post-processors` (optional) is an array of one or more objects that defines - the various post-processing steps to take with the built images. If not - specified, then no post-processing will be done. For more information on what - post-processors do and how they're defined, read the sub-section on - [configuring post-processors in - templates](/docs/templates/post-processors.html). +- `post-processors` (optional) is an array of one or more objects that defines + the various post-processing steps to take with the built images. If not + specified, then no post-processing will be done. For more information on + what post-processors do and how they're defined, read the sub-section on + [configuring post-processors in + templates](/docs/templates/post-processors.html). -- `provisioners` (optional) is an array of one or more objects that defines the - provisioners that will be used to install and configure software for the - machines created by each of the builders. If it is not specified, then no - provisioners will be run. For more information on how to define and configure - a provisioner, read the sub-section on [configuring provisioners in - templates](/docs/templates/provisioners.html). +- `provisioners` (optional) is an array of one or more objects that defines + the provisioners that will be used to install and configure software for the + machines created by each of the builders. If it is not specified, then no + provisioners will be run. For more information on how to define and + configure a provisioner, read the sub-section on [configuring provisioners + in templates](/docs/templates/provisioners.html). -- `variables` (optional) is an array of one or more key/value strings that - defines user variables contained in the template. If it is not specified, then - no variables are defined. For more information on how to define and use user - variables, read the sub-section on [user variables in - templates](/docs/templates/user-variables.html). +- `variables` (optional) is an array of one or more key/value strings that + defines user variables contained in the template. If it is not specified, + then no variables are defined. For more information on how to define and use + user variables, read the sub-section on [user variables in + templates](/docs/templates/user-variables.html). ## Comments diff --git a/website/source/docs/templates/push.html.markdown b/website/source/docs/templates/push.html.markdown index 3ca2c2de2..b46bef3e8 100644 --- a/website/source/docs/templates/push.html.markdown +++ b/website/source/docs/templates/push.html.markdown @@ -37,31 +37,31 @@ each category, the available configuration keys are alphabetized. ### Required -- `name` (string) - Name of the build configuration in the build service. If - this doesn't exist, it will be created (by default). +- `name` (string) - Name of the build configuration in the build service. If + this doesn't exist, it will be created (by default). ### Optional -- `address` (string) - The address of the build service to use. By default this - is `https://atlas.hashicorp.com`. +- `address` (string) - The address of the build service to use. By default + this is `https://atlas.hashicorp.com`. -- `base_dir` (string) - The base directory of the files to upload. This will be - the current working directory when the build service executes your template. - This path is relative to the template. +- `base_dir` (string) - The base directory of the files to upload. This will + be the current working directory when the build service executes + your template. This path is relative to the template. -- `include` (array of strings) - Glob patterns to include relative to the - `base_dir`. If this is specified, only files that match the include pattern - are included. +- `include` (array of strings) - Glob patterns to include relative to the + `base_dir`. If this is specified, only files that match the include pattern + are included. -- `exclude` (array of strings) - Glob patterns to exclude relative to the - `base_dir`. +- `exclude` (array of strings) - Glob patterns to exclude relative to the + `base_dir`. -- `token` (string) - An access token to use to authenticate to the - build service. +- `token` (string) - An access token to use to authenticate to the + build service. -- `vcs` (boolean) - If true, Packer will detect your VCS (if there is one) and - only upload the files that are tracked by the VCS. This is useful for - automatically excluding ignored files. This defaults to false. +- `vcs` (boolean) - If true, Packer will detect your VCS (if there is one) and + only upload the files that are tracked by the VCS. This is useful for + automatically excluding ignored files. This defaults to false. ## Examples diff --git a/website/source/intro/platforms.html.markdown b/website/source/intro/platforms.html.markdown index 586c0c4ec..86d71545e 100644 --- a/website/source/intro/platforms.html.markdown +++ b/website/source/intro/platforms.html.markdown @@ -33,40 +33,42 @@ is noted. They are listed in alphabetical order. For more detailed information on supported configuration parameters and usage, please see the appropriate [documentation page within the documentation section](/docs). -- ***Amazon EC2 (AMI)***. Both EBS-backed and instance-store AMIs within - [EC2](http://aws.amazon.com/ec2/), optionally distributed to multiple regions. +- ***Amazon EC2 (AMI)***. Both EBS-backed and instance-store AMIs within + [EC2](http://aws.amazon.com/ec2/), optionally distributed to + multiple regions. -- ***DigitalOcean***. Snapshots for [DigitalOcean](http://www.digitalocean.com/) - that can be used to start a pre-configured DigitalOcean instance of any size. +- ***DigitalOcean***. Snapshots for + [DigitalOcean](http://www.digitalocean.com/) that can be used to start a + pre-configured DigitalOcean instance of any size. -- ***Docker***. Snapshots for [Docker](http://www.docker.io/) that can be used - to start a pre-configured Docker instance. +- ***Docker***. Snapshots for [Docker](http://www.docker.io/) that can be used + to start a pre-configured Docker instance. -- ***Google Compute Engine***. Snapshots for [Google Compute - Engine](https://cloud.google.com/products/compute-engine) that can be used to - start a pre-configured Google Compute Engine instance. +- ***Google Compute Engine***. Snapshots for [Google Compute + Engine](https://cloud.google.com/products/compute-engine) that can be used + to start a pre-configured Google Compute Engine instance. -- ***OpenStack***. Images for [OpenStack](http://www.openstack.org/) that can be - used to start pre-configured OpenStack servers. +- ***OpenStack***. Images for [OpenStack](http://www.openstack.org/) that can + be used to start pre-configured OpenStack servers. -- ***Parallels (PVM)***. Exported virtual machines for - [Parallels](http://www.parallels.com/downloads/desktop/), including virtual - machine metadata such as RAM, CPUs, etc. These virtual machines are portable - and can be started on any platform Parallels runs on. +- ***Parallels (PVM)***. Exported virtual machines for + [Parallels](http://www.parallels.com/downloads/desktop/), including virtual + machine metadata such as RAM, CPUs, etc. These virtual machines are portable + and can be started on any platform Parallels runs on. -- ***QEMU***. Images for [KVM](http://www.linux-kvm.org/) or - [Xen](http://www.xenproject.org/) that can be used to start pre-configured KVM - or Xen instances. +- ***QEMU***. Images for [KVM](http://www.linux-kvm.org/) or + [Xen](http://www.xenproject.org/) that can be used to start pre-configured + KVM or Xen instances. -- ***VirtualBox (OVF)***. Exported virtual machines for - [VirtualBox](https://www.virtualbox.org/), including virtual machine metadata - such as RAM, CPUs, etc. These virtual machines are portable and can be started - on any platform VirtualBox runs on. +- ***VirtualBox (OVF)***. Exported virtual machines for + [VirtualBox](https://www.virtualbox.org/), including virtual machine + metadata such as RAM, CPUs, etc. These virtual machines are portable and can + be started on any platform VirtualBox runs on. -- ***VMware (VMX)***. Exported virtual machines for - [VMware](http://www.vmware.com/) that can be run within any desktop products - such as Fusion, Player, or Workstation, as well as server products such - as vSphere. +- ***VMware (VMX)***. Exported virtual machines for + [VMware](http://www.vmware.com/) that can be run within any desktop products + such as Fusion, Player, or Workstation, as well as server products such + as vSphere. As previously mentioned, these are just the target image types that Packer ships with out of the box. You can always [extend Packer through From 1e9459a0675ddf635b04860a4e6afee62e1d8be0 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 23 Jul 2015 00:02:18 -0700 Subject: [PATCH 677/956] Changed push docs to more clearly explain how they work with Atlas --- .../docs/command-line/push.html.markdown | 33 +++++++++---------- 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index 764333967..96e5b3e20 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -1,28 +1,21 @@ --- description: | - The `packer push` Packer command takes a template and pushes it to a build - service that will automatically build this Packer template. + The `packer push` command uploads a template and other required files to the Atlas build service, which will run your packer build for you. layout: docs page_title: 'Push - Command-Line' ... # Command-Line: Push -The `packer push` Packer command takes a template and pushes it to a Packer -build service such as [HashiCorp's Atlas](https://atlas.hashicorp.com). The -build service will automatically build your Packer template and expose the -artifacts. +The `packer push` command uploads a template and other required files to the Atlas service, which will run your packer build for you. [Learn more about Packer in Atlas.](https://atlas.hashicorp.com/help/packer/features) -External build services such as HashiCorp's Atlas make it easy to iterate on -Packer templates, especially when the builder you are running may not be easily -accessable (such as developing `qemu` builders on Mac or Windows). +Running builds remotely makes it easier to iterate on packer builds that are not supported on your operating system, for example, building docker or QEMU while developing on Mac or Windows. Also, the hard work of building VMs is offloaded to dedicated servers with more CPU, memory, and network resources. -!> The Packer build service will receive the raw copy of your Packer template -when you push. **If you have sensitive data in your Packer template, you should -move that data into Packer variables or environment variables!** +When you use push to run a build in Atlas, you may also want to store your build artifacts in Atlas. In order to do that you will also need to configure the [Atlas post-processor](/docs/post-processors/atlas.html). This is optional, and both the post-processor and push commands can be used independently. -For the `push` command to work, the [push -configuration](/docs/templates/push.html) must be completed within the template. +!> The push command uploads your template and other files, like provisioning scripts, to Atlas. Take care not to upload files that you don't intend to, like secrets or large binaries. **If you have secrets in your Packer template, you should [move them into environment variables](https://packer.io/docs/templates/user-variables.html).** + +Most push behavior is [configured in your packer template](/docs/templates/push.html). You can override or supplement your configuration using the options below. ## Options @@ -30,12 +23,16 @@ configuration](/docs/templates/push.html) must be completed within the template. template much like a VCS commit message. This message will be passed to the Packer build service. This option is also available as a short option `-m`. -- `-token` - An access token for authenticating the push to the Packer build - service such as Atlas. This can also be specified within the push - configuration in the template. +- `-token` - Your access token for the Atlas API. + +-> Login to Atlas to [generate an Atlas Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to configure your token is to set it to the `ATLAS_TOKEN` environment variable, but you can also use `-token` on the command line. - `-name` - The name of the build in the service. This typically looks like - `hashicorp/precise64`. + `hashicorp/precise64`, which follows the form `/`. This must be specified here or in your template. + +- `-var` - Set a variable in your packer template. This option can be used multiple times. This is useful for setting version numbers for your build. + +- `-var-file` - Set template variables from a file. ## Examples From a77ee557ac8dff754a025ffcc827cd91436fa443 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 14:49:22 -0700 Subject: [PATCH 678/956] Starting rework of atlas post-processor page --- .../docs/post-processors/atlas.html.markdown | 22 +++++++------------ 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 4f2cb3640..8839830d2 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -11,9 +11,9 @@ page_title: 'Atlas Post-Processor' Type: `atlas` -The Atlas post-processor for Packer receives an artifact from a Packer build and -uploads it to Atlas. [Atlas](https://atlas.hashicorp.com) hosts and serves -artifacts, allowing you to version and distribute them in a simple way. +The Atlas post-processor uploads artifacts from your packer builds to Atlas for hosting. Artifacts hosted in Atlas are are automatically made available for use with Vagrant and Terraform, and Atlas provides additional features for managing versions and releases. [Learn more about packer in Atlas.](https://atlas.hashicorp.com/help/packer/features) + +You can also use the push command to [run packer builds in Atlas](/docs/command-line/push.html). The push command and Atlas post-processor can be used together or independently. ## Workflow @@ -25,8 +25,7 @@ location in Atlas. Here is an example workflow: -1. Packer builds an AMI with the [Amazon AMI - builder](/docs/builders/amazon.html) +1. Packer builds an AMI with the [Amazon AMI builder](/docs/builders/amazon.html) 2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. The `atlas` post-processor is configured with the name of the AMI, for example `hashicorp/foobar`, to create the artifact in Atlas or update the @@ -40,24 +39,19 @@ The configuration allows you to specify and access the artifact in Atlas. ### Required: -- `token` (string) - Your access token for the Atlas API. This can be - generated on your [tokens - page](https://atlas.hashicorp.com/settings/tokens). Alternatively you can - export your Atlas token as an environmental variable and remove it from - the configuration. +- `token` (string) - Your access token for the Atlas API. + +-> Login to Atlas to [generate an Atlas Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to configure your token is to set it to the `ATLAS_TOKEN` environment variable, but you can also use `token` configuration option. - `artifact` (string) - The shorthand tag for your artifact that maps to Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. - You must have access to the organization, hashicorp in this example, in + You must have access to the organization—hashicorp in this example—in order to add an artifact to the organization in Atlas. - `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will always be `amazon.ami`. This field must be defined because Atlas can host other artifact types, such as Vagrant boxes. --> **Note:** If you want to upload Vagrant boxes to Atlas, use the [Atlas -post-processor](/docs/post-processors/atlas.html). - ### Optional: - `atlas_url` (string) - Override the base URL for Atlas. This is useful if From 7a6eb966c0d85a115990fa76e13704b376464b1a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 14:49:44 -0700 Subject: [PATCH 679/956] We actually use PACKER_ACC not TF_ACC --- helper/builder/testing/testing.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helper/builder/testing/testing.go b/helper/builder/testing/testing.go index 522d7a265..0bfb136ae 100644 --- a/helper/builder/testing/testing.go +++ b/helper/builder/testing/testing.go @@ -64,7 +64,7 @@ type TestT interface { // Test performs an acceptance test on a backend with the given test case. // -// Tests are not run unless an environmental variable "TF_ACC" is +// Tests are not run unless an environmental variable "PACKER_ACC" is // set to some non-empty value. This is to avoid test cases surprising // a user by creating real resources. // From 30850b851d6dcad5979abd6d9787918c9e2a3c0f Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 14:54:46 -0700 Subject: [PATCH 680/956] Reformat --- .../docs/command-line/push.html.markdown | 38 ++++++++++++++----- .../docs/post-processors/atlas.html.markdown | 22 ++++++++--- 2 files changed, 45 insertions(+), 15 deletions(-) diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index 96e5b3e20..06e5a3c98 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -1,21 +1,36 @@ --- description: | - The `packer push` command uploads a template and other required files to the Atlas build service, which will run your packer build for you. + The `packer push` command uploads a template and other required files to the + Atlas build service, which will run your packer build for you. layout: docs page_title: 'Push - Command-Line' ... # Command-Line: Push -The `packer push` command uploads a template and other required files to the Atlas service, which will run your packer build for you. [Learn more about Packer in Atlas.](https://atlas.hashicorp.com/help/packer/features) +The `packer push` command uploads a template and other required files to the +Atlas service, which will run your packer build for you. [Learn more about +Packer in Atlas.](https://atlas.hashicorp.com/help/packer/features) -Running builds remotely makes it easier to iterate on packer builds that are not supported on your operating system, for example, building docker or QEMU while developing on Mac or Windows. Also, the hard work of building VMs is offloaded to dedicated servers with more CPU, memory, and network resources. +Running builds remotely makes it easier to iterate on packer builds that are not +supported on your operating system, for example, building docker or QEMU while +developing on Mac or Windows. Also, the hard work of building VMs is offloaded +to dedicated servers with more CPU, memory, and network resources. -When you use push to run a build in Atlas, you may also want to store your build artifacts in Atlas. In order to do that you will also need to configure the [Atlas post-processor](/docs/post-processors/atlas.html). This is optional, and both the post-processor and push commands can be used independently. +When you use push to run a build in Atlas, you may also want to store your build +artifacts in Atlas. In order to do that you will also need to configure the +[Atlas post-processor](/docs/post-processors/atlas.html). This is optional, and +both the post-processor and push commands can be used independently. -!> The push command uploads your template and other files, like provisioning scripts, to Atlas. Take care not to upload files that you don't intend to, like secrets or large binaries. **If you have secrets in your Packer template, you should [move them into environment variables](https://packer.io/docs/templates/user-variables.html).** +!> The push command uploads your template and other files, like provisioning +scripts, to Atlas. Take care not to upload files that you don't intend to, like +secrets or large binaries. **If you have secrets in your Packer template, you +should [move them into environment +variables](https://packer.io/docs/templates/user-variables.html).** -Most push behavior is [configured in your packer template](/docs/templates/push.html). You can override or supplement your configuration using the options below. +Most push behavior is [configured in your packer +template](/docs/templates/push.html). You can override or supplement your +configuration using the options below. ## Options @@ -25,12 +40,17 @@ Most push behavior is [configured in your packer template](/docs/templates/push. - `-token` - Your access token for the Atlas API. --> Login to Atlas to [generate an Atlas Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to configure your token is to set it to the `ATLAS_TOKEN` environment variable, but you can also use `-token` on the command line. +-> Login to Atlas to [generate an Atlas +Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to +configure your token is to set it to the `ATLAS_TOKEN` environment variable, but +you can also use `-token` on the command line. - `-name` - The name of the build in the service. This typically looks like - `hashicorp/precise64`, which follows the form `/`. This must be specified here or in your template. + `hashicorp/precise64`, which follows the form `/`. This + must be specified here or in your template. -- `-var` - Set a variable in your packer template. This option can be used multiple times. This is useful for setting version numbers for your build. +- `-var` - Set a variable in your packer template. This option can be used + multiple times. This is useful for setting version numbers for your build. - `-var-file` - Set template variables from a file. diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 8839830d2..435bec7c4 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -11,9 +11,15 @@ page_title: 'Atlas Post-Processor' Type: `atlas` -The Atlas post-processor uploads artifacts from your packer builds to Atlas for hosting. Artifacts hosted in Atlas are are automatically made available for use with Vagrant and Terraform, and Atlas provides additional features for managing versions and releases. [Learn more about packer in Atlas.](https://atlas.hashicorp.com/help/packer/features) +The Atlas post-processor uploads artifacts from your packer builds to Atlas for +hosting. Artifacts hosted in Atlas are are automatically made available for use +with Vagrant and Terraform, and Atlas provides additional features for managing +versions and releases. [Learn more about packer in +Atlas.](https://atlas.hashicorp.com/help/packer/features) -You can also use the push command to [run packer builds in Atlas](/docs/command-line/push.html). The push command and Atlas post-processor can be used together or independently. +You can also use the push command to [run packer builds in +Atlas](/docs/command-line/push.html). The push command and Atlas post-processor +can be used together or independently. ## Workflow @@ -25,7 +31,8 @@ location in Atlas. Here is an example workflow: -1. Packer builds an AMI with the [Amazon AMI builder](/docs/builders/amazon.html) +1. Packer builds an AMI with the [Amazon AMI + builder](/docs/builders/amazon.html) 2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. The `atlas` post-processor is configured with the name of the AMI, for example `hashicorp/foobar`, to create the artifact in Atlas or update the @@ -41,12 +48,15 @@ The configuration allows you to specify and access the artifact in Atlas. - `token` (string) - Your access token for the Atlas API. --> Login to Atlas to [generate an Atlas Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to configure your token is to set it to the `ATLAS_TOKEN` environment variable, but you can also use `token` configuration option. +-> Login to Atlas to [generate an Atlas +Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to +configure your token is to set it to the `ATLAS_TOKEN` environment variable, but +you can also use `token` configuration option. - `artifact` (string) - The shorthand tag for your artifact that maps to Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. - You must have access to the organization—hashicorp in this example—in - order to add an artifact to the organization in Atlas. + You must have access to the organization—hashicorp in this example—in order + to add an artifact to the organization in Atlas. - `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will always be `amazon.ami`. This field must be defined because Atlas can host From 64604ee955bcef6b05f0366cb79ecea296e72ec1 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 18:41:42 -0700 Subject: [PATCH 681/956] More succinct phrasing for cracklib conflict --- .../intro/getting-started/setup.html.markdown | 42 +++++-------------- 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/website/source/intro/getting-started/setup.html.markdown b/website/source/intro/getting-started/setup.html.markdown index 5e4734e08..ba7d95cf0 100644 --- a/website/source/intro/getting-started/setup.html.markdown +++ b/website/source/intro/getting-started/setup.html.markdown @@ -58,34 +58,6 @@ If you get an error that `packer` could not be found, then your PATH environment variable was not setup properly. Please go back and ensure that your PATH variable contains the directory which has Packer installed. -The `packer` binary may conflict with the cracklib-supplied packer binary -on RPM-based systems like Fedora, RHEL or CentOS. If this happens, running -`packer` will result in no output or something like this: - -```text -$ packer -/usr/share/cracklib/pw_dict.pwd: Permission denied -/usr/share/cracklib/pw_dict: Permission denied -``` - -In this case you may wish to symlink the `packer` binary to `packer.io` -and use that instead. e.g. - -```text -ln -s /usr/local/bin/packer /usr/local/bin/packer.io -``` - -Then replace `packer` with `packer.io` when following the rest of the -documentation. - -Alternatively you could change your `$PATH` so that the right packer -binary is selected first, however this may cause issues when attempting -to change passwords in the future. - -```text -export PATH="/path/to/packer/directory:$PATH" -``` - Otherwise, Packer is installed and you're ready to go! ## Alternative Installation Methods @@ -97,6 +69,14 @@ are alternatives available. If you're using OS X and [Homebrew](http://brew.sh), you can install Packer: -```text -$ brew install packer -``` + $ brew install packer + +## Troubleshooting + +On some RedHat-based Linux distributions there is another tool named `packer` installed by default. You can check for this using `which -a packer`. If you get an error like this it indicates there is a name conflict. + + $ packer + /usr/share/cracklib/pw_dict.pwd: Permission denied + /usr/share/cracklib/pw_dict: Permission denied + +To fix this, you can create a symlink to packer that uses a different name like `packer.io`, or invoke the `packer` binary you want using its absolute path, e.g. `/usr/local/packer`. From b533a4b833d87359d9c436796f7b34850465ca79 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 18:55:36 -0700 Subject: [PATCH 682/956] Added a note on permissions required for IAM roles. Thanks @bmatsuo --- .../source/docs/builders/amazon.html.markdown | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index ad336ad1c..8d6c07543 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -70,3 +70,24 @@ The following policy document provides the minimal set permissions necessary for }] } ``` + +## Troubleshooting + +### Attaching IAM Policies to Roles + +IAM policies can be associated with user or roles. If you use packer with IAM roles, you may encounter an error like this one: + + ==> amazon-ebs: Error launching source instance: You are not authorized to perform this operation. + +You can read more about why this happens on the [Amazon Security Blog](http://blogs.aws.amazon.com/security/post/Tx3M0IFB5XBOCQX/Granting-Permission-to-Launch-EC2-Instances-with-IAM-Roles-PassRole-Permission). The example policy below may help packer work with IAM roles. Note that this example provides more than the minimal set of permissions needed for packer to work, but specifics will depend on your use-case. + +```json +{ + "Sid": "PackerIAMPassRole", + "Effect": "Allow", + "Action": "iam:PassRole", + "Resource": [ + "*" + ] +} +``` From c3e39c2f0d5f0a8c3486c68456c6ad0ce062c7f6 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 20:38:14 -0700 Subject: [PATCH 683/956] Updated docs on how AWS credentials are resolved --- .../docs/builders/amazon-chroot.html.markdown | 7 ++--- .../docs/builders/amazon-ebs.html.markdown | 8 ++--- .../builders/amazon-instance.html.markdown | 9 ++---- .../source/docs/builders/amazon.html.markdown | 29 +++++++++++++++++++ 4 files changed, 35 insertions(+), 18 deletions(-) diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index b3d1644dd..7e1a23ccb 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -57,10 +57,7 @@ can be configured for this builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will search the standard [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file using environment variable `AWS_PROFILE` as the profile name, will use the `[default]` entry, - or will fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`. - Finally, if Packer is running on an EC2 instance it will check the instance metadata for IAM role keys. +* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) * `ami_name` (string) - The name of the resulting AMI that will appear when managing AMIs in the AWS console or via APIs. This must be unique. @@ -68,7 +65,7 @@ can be configured for this builder. [configuration templates](/docs/templates/configuration-templates.html) for more info) * `secret_key` (string) - The secret key used to communicate with AWS. - Lookup behavior is as above for `access_key` except the variables are `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY`. + [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) * `source_ami` (string) - The source AMI whose root volume will be copied and provisioned on the currently running instance. This must be an diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index fc78901a6..6413899c8 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -37,10 +37,7 @@ can be configured for this builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will search the standard [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file using environment variable `AWS_PROFILE` as the profile name, will use the `[default]` entry, - or will fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`. - Finally, if Packer is running on an EC2 instance it will check the instance metadata for IAM role keys. +* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) * `ami_name` (string) - The name of the resulting AMI that will appear when managing AMIs in the AWS console or via APIs. This must be unique. @@ -53,8 +50,7 @@ can be configured for this builder. * `region` (string) - The name of the region, such as "us-east-1", in which to launch the EC2 instance to create the AMI. -* `secret_key` (string) - The secret key used to communicate with AWS. - Lookup behavior is as above for `access_key` except the variables are `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` +* `secret_key` (string) - The secret key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) * `source_ami` (string) - The initial AMI used as a base for the newly created machine. diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 81e425c9a..565d77594 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -42,10 +42,7 @@ can be configured for this builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. - If not specified, Packer will search the standard [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file using environment variable `AWS_PROFILE` as the profile name, will use the `[default]` entry, - or will fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`. - Finally, if Packer is running on an EC2 instance it will check the instance metadata for IAM role keys. +* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) * `account_id` (string) - Your AWS account ID. This is required for bundling the AMI. This is _not the same_ as the access key. You can find your @@ -65,9 +62,7 @@ can be configured for this builder. * `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. This bucket will be created if it doesn't exist. -* `secret_key` (string) - The secret key used to communicate with AWS. - Lookup behavior is as above for `access_key` except the variables are `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` - +* `secret_key` (string) - The secret key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) * `source_ami` (string) - The initial AMI used as a base for the newly created machine. diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index 783018d95..736f61068 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -31,6 +31,35 @@ AMI. Packer supports the following builders at the moment: [amazon-ebs builder](/docs/builders/amazon-ebs.html). It is much easier to use and Amazon generally recommends EBS-backed images nowadays. +
    ## Specifying Amazon Credentials
    + +When you use any of the amazon builders, you must provide credentials to the API in the form of an access key id and secret. These look like: + + access key id: AKIAIOSFODNN7EXAMPLE + secret access key: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + +If you use other AWS tools you may already have these configured. If so, packer will try to use them, *unless* they are specified in your packer template. Credentials are resolved in the following order: + +1. Values hard-coded in the packer template are always authoritative. +2. *Variables* in the packer template may be resolved from command-line flags or from environment variables. Please read about [User Variables](https://packer.io/docs/templates/user-variables.html) for details. +3. If no credentials are found, packer falls back to automatic lookup. + +### Automatic Lookup + +If no AWS credentials are found in a packer template, we proceed on to the following steps: + +1. Lookup via environment variables. + - First `AWS_ACCESS_KEY_ID`, then `AWS_ACCESS_KEY` + - First `AWS_SECRET_ACCESS_KEY`, then `AWS_SECRET_KEY` +2. Look for [local AWS configuration files](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + - First `~/.aws/credentials` + - Next based on `AWS_PROFILE` +3. Lookup an IAM role for the current EC2 instance (if you're running in EC2) + +~> **Subtle details of automatic lookup may change over time.** The most reliable way to specify your configuration is by setting them in template variables (directly or indirectly), or by using the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables. + +Environment variables provide the best portability, allowing you to run your packer build on your workstation, in Atlas, or on another build server. + ## Using an IAM Instance Profile If AWS keys are not specified in the template, Packer will consult the [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file, try the standard AWS environment variables, and then From 3fe2d2f5bc49406bb5d8a68134a2a05208f1902a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 20:55:08 -0700 Subject: [PATCH 684/956] Reformat --- .../docs/builders/amazon-chroot.html.markdown | 21 ++-- .../docs/builders/amazon-ebs.html.markdown | 30 +++--- .../builders/amazon-instance.html.markdown | 53 +++++----- .../source/docs/builders/amazon.html.markdown | 97 ++++++++++++------- .../intro/getting-started/setup.html.markdown | 8 +- 5 files changed, 121 insertions(+), 88 deletions(-) diff --git a/website/source/docs/builders/amazon-chroot.html.markdown b/website/source/docs/builders/amazon-chroot.html.markdown index fb824d488..8cc633caa 100644 --- a/website/source/docs/builders/amazon-chroot.html.markdown +++ b/website/source/docs/builders/amazon-chroot.html.markdown @@ -60,19 +60,20 @@ builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) +- `access_key` (string) - The access key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) -* `ami_name` (string) - The name of the resulting AMI that will appear - when managing AMIs in the AWS console or via APIs. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `secret_key` (string) - The secret key used to communicate with AWS. - [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) +- `secret_key` (string) - The secret key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) -* `source_ami` (string) - The source AMI whose root volume will be copied - and provisioned on the currently running instance. This must be an - EBS-backed AMI with a root volume snapshot that you have access to. +- `source_ami` (string) - The source AMI whose root volume will be copied and + provisioned on the currently running instance. This must be an EBS-backed + AMI with a root volume snapshot that you have access to. ### Optional: diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index e89f525e6..f97404d19 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -40,26 +40,28 @@ builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) +- `access_key` (string) - The access key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) -* `ami_name` (string) - The name of the resulting AMI that will appear - when managing AMIs in the AWS console or via APIs. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `instance_type` (string) - The EC2 instance type to use while building - the AMI, such as "m1.small". +- `instance_type` (string) - The EC2 instance type to use while building the + AMI, such as "m1.small". -* `region` (string) - The name of the region, such as "us-east-1", in which - to launch the EC2 instance to create the AMI. +- `region` (string) - The name of the region, such as "us-east-1", in which to + launch the EC2 instance to create the AMI. -* `secret_key` (string) - The secret key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) +- `secret_key` (string) - The secret key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) -* `source_ami` (string) - The initial AMI used as a base for the newly - created machine. +- `source_ami` (string) - The initial AMI used as a base for the newly + created machine. -* `ssh_username` (string) - The username to use in order to communicate - over SSH to the running machine. +- `ssh_username` (string) - The username to use in order to communicate over + SSH to the running machine. ### Optional: diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index cec98a5c0..13ab1f293 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -45,41 +45,44 @@ builder. ### Required: -* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) +- `access_key` (string) - The access key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) -* `account_id` (string) - Your AWS account ID. This is required for bundling - the AMI. This is _not the same_ as the access key. You can find your - account ID in the security credentials page of your AWS account. +- `account_id` (string) - Your AWS account ID. This is required for bundling + the AMI. This is *not the same* as the access key. You can find your account + ID in the security credentials page of your AWS account. -* `ami_name` (string) - The name of the resulting AMI that will appear - when managing AMIs in the AWS console or via APIs. This must be unique. - To help make this unique, use a function like `timestamp` (see - [configuration templates](/docs/templates/configuration-templates.html) for more info) +- `ami_name` (string) - The name of the resulting AMI that will appear when + managing AMIs in the AWS console or via APIs. This must be unique. To help + make this unique, use a function like `timestamp` (see [configuration + templates](/docs/templates/configuration-templates.html) for more info) -* `instance_type` (string) - The EC2 instance type to use while building - the AMI, such as "m1.small". +- `instance_type` (string) - The EC2 instance type to use while building the + AMI, such as "m1.small". -* `region` (string) - The name of the region, such as "us-east-1", in which - to launch the EC2 instance to create the AMI. +- `region` (string) - The name of the region, such as "us-east-1", in which to + launch the EC2 instance to create the AMI. -* `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. - This bucket will be created if it doesn't exist. +- `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. This + bucket will be created if it doesn't exist. -* `secret_key` (string) - The secret key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) +- `secret_key` (string) - The secret key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) -* `source_ami` (string) - The initial AMI used as a base for the newly - created machine. +- `source_ami` (string) - The initial AMI used as a base for the newly + created machine. -* `ssh_username` (string) - The username to use in order to communicate - over SSH to the running machine. +- `ssh_username` (string) - The username to use in order to communicate over + SSH to the running machine. -* `x509_cert_path` (string) - The local path to a valid X509 certificate for - your AWS account. This is used for bundling the AMI. This X509 certificate - must be registered with your account from the security credentials page - in the AWS console. +- `x509_cert_path` (string) - The local path to a valid X509 certificate for + your AWS account. This is used for bundling the AMI. This X509 certificate + must be registered with your account from the security credentials page in + the AWS console. -* `x509_key_path` (string) - The local path to the private key for the X509 - certificate specified by `x509_cert_path`. This is used for bundling the AMI. +- `x509_key_path` (string) - The local path to the private key for the X509 + certificate specified by `x509_cert_path`. This is used for bundling + the AMI. ### Optional: diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index c81e463ec..3eb79ac1e 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -9,57 +9,75 @@ page_title: Amazon AMI Builder # Amazon AMI Builder Packer is able to create Amazon AMIs. To achieve this, Packer comes with -multiple builders depending on the strategy you want to use to build the -AMI. Packer supports the following builders at the moment: +multiple builders depending on the strategy you want to use to build the AMI. +Packer supports the following builders at the moment: -* [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs - by launching a source AMI and re-packaging it into a new AMI after - provisioning. If in doubt, use this builder, which is the easiest to get - started with. +- [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs by + launching a source AMI and re-packaging it into a new AMI + after provisioning. If in doubt, use this builder, which is the easiest to + get started with. -* [amazon-instance](/docs/builders/amazon-instance.html) - Create - instance-store AMIs by launching and provisioning a source instance, then - rebundling it and uploading it to S3. +- [amazon-instance](/docs/builders/amazon-instance.html) - Create + instance-store AMIs by launching and provisioning a source instance, then + rebundling it and uploading it to S3. -* [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs - from an existing EC2 instance by mounting the root device and using a - [Chroot](http://en.wikipedia.org/wiki/Chroot) environment to provision - that device. This is an **advanced builder and should not be used by - newcomers**. However, it is also the fastest way to build an EBS-backed - AMI since no new EC2 instance needs to be launched. +- [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs + from an existing EC2 instance by mounting the root device and using a + [Chroot](http://en.wikipedia.org/wiki/Chroot) environment to provision + that device. This is an **advanced builder and should not be used by + newcomers**. However, it is also the fastest way to build an EBS-backed AMI + since no new EC2 instance needs to be launched. --> **Don't know which builder to use?** If in doubt, use the -[amazon-ebs builder](/docs/builders/amazon-ebs.html). It is -much easier to use and Amazon generally recommends EBS-backed images nowadays. +-> **Don't know which builder to use?** If in doubt, use the [amazon-ebs +builder](/docs/builders/amazon-ebs.html). It is much easier to use and Amazon +generally recommends EBS-backed images nowadays. -
    ## Specifying Amazon Credentials
    +
    -When you use any of the amazon builders, you must provide credentials to the API in the form of an access key id and secret. These look like: +\#\# Specifying Amazon Credentials + +
    + +When you use any of the amazon builders, you must provide credentials to the API +in the form of an access key id and secret. These look like: access key id: AKIAIOSFODNN7EXAMPLE secret access key: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY -If you use other AWS tools you may already have these configured. If so, packer will try to use them, *unless* they are specified in your packer template. Credentials are resolved in the following order: +If you use other AWS tools you may already have these configured. If so, packer +will try to use them, *unless* they are specified in your packer template. +Credentials are resolved in the following order: -1. Values hard-coded in the packer template are always authoritative. -2. *Variables* in the packer template may be resolved from command-line flags or from environment variables. Please read about [User Variables](https://packer.io/docs/templates/user-variables.html) for details. -3. If no credentials are found, packer falls back to automatic lookup. +1. Values hard-coded in the packer template are always authoritative. +2. *Variables* in the packer template may be resolved from command-line flags + or from environment variables. Please read about [User + Variables](https://packer.io/docs/templates/user-variables.html) + for details. +3. If no credentials are found, packer falls back to automatic lookup. ### Automatic Lookup -If no AWS credentials are found in a packer template, we proceed on to the following steps: +If no AWS credentials are found in a packer template, we proceed on to the +following steps: -1. Lookup via environment variables. - - First `AWS_ACCESS_KEY_ID`, then `AWS_ACCESS_KEY` - - First `AWS_SECRET_ACCESS_KEY`, then `AWS_SECRET_KEY` -2. Look for [local AWS configuration files](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) - - First `~/.aws/credentials` - - Next based on `AWS_PROFILE` -3. Lookup an IAM role for the current EC2 instance (if you're running in EC2) +1. Lookup via environment variables. + - First `AWS_ACCESS_KEY_ID`, then `AWS_ACCESS_KEY` + - First `AWS_SECRET_ACCESS_KEY`, then `AWS_SECRET_KEY` -~> **Subtle details of automatic lookup may change over time.** The most reliable way to specify your configuration is by setting them in template variables (directly or indirectly), or by using the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables. +2. Look for [local AWS configuration + files](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) + - First `~/.aws/credentials` + - Next based on `AWS_PROFILE` -Environment variables provide the best portability, allowing you to run your packer build on your workstation, in Atlas, or on another build server. +3. Lookup an IAM role for the current EC2 instance (if you're running in EC2) + +\~> **Subtle details of automatic lookup may change over time.** The most +reliable way to specify your configuration is by setting them in template +variables (directly or indirectly), or by using the `AWS_ACCESS_KEY_ID` and +`AWS_SECRET_ACCESS_KEY` environment variables. + +Environment variables provide the best portability, allowing you to run your +packer build on your workstation, in Atlas, or on another build server. ## Using an IAM Instance Profile @@ -108,13 +126,18 @@ Packer to work: ### Attaching IAM Policies to Roles -IAM policies can be associated with user or roles. If you use packer with IAM roles, you may encounter an error like this one: +IAM policies can be associated with user or roles. If you use packer with IAM +roles, you may encounter an error like this one: ==> amazon-ebs: Error launching source instance: You are not authorized to perform this operation. -You can read more about why this happens on the [Amazon Security Blog](http://blogs.aws.amazon.com/security/post/Tx3M0IFB5XBOCQX/Granting-Permission-to-Launch-EC2-Instances-with-IAM-Roles-PassRole-Permission). The example policy below may help packer work with IAM roles. Note that this example provides more than the minimal set of permissions needed for packer to work, but specifics will depend on your use-case. +You can read more about why this happens on the [Amazon Security +Blog](http://blogs.aws.amazon.com/security/post/Tx3M0IFB5XBOCQX/Granting-Permission-to-Launch-EC2-Instances-with-IAM-Roles-PassRole-Permission). +The example policy below may help packer work with IAM roles. Note that this +example provides more than the minimal set of permissions needed for packer to +work, but specifics will depend on your use-case. -```json +``` {.json} { "Sid": "PackerIAMPassRole", "Effect": "Allow", diff --git a/website/source/intro/getting-started/setup.html.markdown b/website/source/intro/getting-started/setup.html.markdown index 2151019c4..181f93edb 100644 --- a/website/source/intro/getting-started/setup.html.markdown +++ b/website/source/intro/getting-started/setup.html.markdown @@ -77,10 +77,14 @@ If you're using OS X and [Homebrew](http://brew.sh), you can install Packer: ## Troubleshooting -On some RedHat-based Linux distributions there is another tool named `packer` installed by default. You can check for this using `which -a packer`. If you get an error like this it indicates there is a name conflict. +On some RedHat-based Linux distributions there is another tool named `packer` +installed by default. You can check for this using `which -a packer`. If you get +an error like this it indicates there is a name conflict. $ packer /usr/share/cracklib/pw_dict.pwd: Permission denied /usr/share/cracklib/pw_dict: Permission denied -To fix this, you can create a symlink to packer that uses a different name like `packer.io`, or invoke the `packer` binary you want using its absolute path, e.g. `/usr/local/packer`. +To fix this, you can create a symlink to packer that uses a different name like +`packer.io`, or invoke the `packer` binary you want using its absolute path, +e.g. `/usr/local/packer`. From 54afe10ad10f8fc841d1f4f46a4a5feeca977d7c Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 20:59:40 -0700 Subject: [PATCH 685/956] Make the anchor work with the reformatter --- website/source/docs/builders/amazon.html.markdown | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index 3eb79ac1e..a85e22d1a 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -32,11 +32,9 @@ Packer supports the following builders at the moment: builder](/docs/builders/amazon-ebs.html). It is much easier to use and Amazon generally recommends EBS-backed images nowadays. -
    + -\#\# Specifying Amazon Credentials - -
    +## Specifying Amazon Credentials When you use any of the amazon builders, you must provide credentials to the API in the form of an access key id and secret. These look like: From e0be4efefef5ce6037d77ad996051b34db3753b6 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 24 Jul 2015 21:00:24 -0700 Subject: [PATCH 686/956] Make the anchor work with reformat --- website/source/docs/builders/amazon.html.markdown | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index 3eb79ac1e..a85e22d1a 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -32,11 +32,9 @@ Packer supports the following builders at the moment: builder](/docs/builders/amazon-ebs.html). It is much easier to use and Amazon generally recommends EBS-backed images nowadays. -
    + -\#\# Specifying Amazon Credentials - -
    +## Specifying Amazon Credentials When you use any of the amazon builders, you must provide credentials to the API in the form of an access key id and secret. These look like: From 88ebc2f7e8a175c292db122be7e67602b0afca64 Mon Sep 17 00:00:00 2001 From: Gonzalo Peci Date: Tue, 14 Jul 2015 15:19:24 +1200 Subject: [PATCH 687/956] Add s.SSHPort variable as the port WinRM uses to connect. This is needed on any builder where the port used to connect is not the guest winrm port but a nated port on the host. Similar behavior is used by the SSH communicator. --- helper/communicator/step_connect.go | 1 + helper/communicator/step_connect_winrm.go | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/helper/communicator/step_connect.go b/helper/communicator/step_connect.go index 0c1522330..e72be3ba8 100644 --- a/helper/communicator/step_connect.go +++ b/helper/communicator/step_connect.go @@ -53,6 +53,7 @@ func (s *StepConnect) Run(state multistep.StateBag) multistep.StepAction { Config: s.Config, Host: s.Host, WinRMConfig: s.WinRMConfig, + WinRMPort: s.SSHPort, }, } for k, v := range s.CustomConnect { diff --git a/helper/communicator/step_connect_winrm.go b/helper/communicator/step_connect_winrm.go index bdd0c1499..44244b37a 100644 --- a/helper/communicator/step_connect_winrm.go +++ b/helper/communicator/step_connect_winrm.go @@ -25,6 +25,7 @@ type StepConnectWinRM struct { Config *Config Host func(multistep.StateBag) (string, error) WinRMConfig func(multistep.StateBag) (*WinRMConfig, error) + WinRMPort func(multistep.StateBag) (int, error) } func (s *StepConnectWinRM) Run(state multistep.StateBag) multistep.StepAction { @@ -96,6 +97,13 @@ func (s *StepConnectWinRM) waitForWinRM(state multistep.StateBag, cancel <-chan continue } port := s.Config.WinRMPort + if s.WinRMPort != nil { + port, err = s.WinRMPort(state) + if err != nil { + log.Printf("[DEBUG] Error getting WinRM port: %s", err) + continue + } + } user := s.Config.WinRMUser password := s.Config.WinRMPassword From ef873ba210efde9e92c11fc89c712bd632de7bf8 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Sun, 26 Jul 2015 16:33:56 -0700 Subject: [PATCH 688/956] Update version file so builds from master don't masquerade as 0.8.2 --- version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version.go b/version.go index b858802e6..84958092f 100644 --- a/version.go +++ b/version.go @@ -4,9 +4,9 @@ package main var GitCommit string // The main version number that is being run at the moment. -const Version = "0.8.2" +const Version = "0.8.3" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" From e7900ec5ef9145ff52d800b0d0c0d9395a071978 Mon Sep 17 00:00:00 2001 From: Brian Fletcher Date: Mon, 27 Jul 2015 21:00:11 +0100 Subject: [PATCH 689/956] Fix example code for digital ocean builder Remove command after size --- .../source/intro/getting-started/parallel-builds.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/intro/getting-started/parallel-builds.html.markdown b/website/source/intro/getting-started/parallel-builds.html.markdown index 626033ef2..57b689d7e 100644 --- a/website/source/intro/getting-started/parallel-builds.html.markdown +++ b/website/source/intro/getting-started/parallel-builds.html.markdown @@ -67,7 +67,7 @@ array. "api_token": "{{user `do_api_token`}}", "image": "ubuntu-14-04-x64", "region": "nyc3", - "size": "512mb", + "size": "512mb" } ``` From ce54dba2d3b62418fe8c23180f236241a78cac75 Mon Sep 17 00:00:00 2001 From: Hazel Smith Date: Mon, 27 Jul 2015 23:00:06 +0100 Subject: [PATCH 690/956] openstack builder: log which IP address SSH will use --- builder/openstack/ssh.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/builder/openstack/ssh.go b/builder/openstack/ssh.go index 3e7350d11..87a219b22 100644 --- a/builder/openstack/ssh.go +++ b/builder/openstack/ssh.go @@ -23,6 +23,7 @@ func CommHost( // If we have a specific interface, try that if sshinterface != "" { if addr := sshAddrFromPool(s, sshinterface); addr != "" { + log.Printf("[DEBUG] Using IP address %s from specified interface %s for SSH", addr, sshinterface) return addr, nil } } @@ -30,15 +31,18 @@ func CommHost( // If we have a floating IP, use that ip := state.Get("access_ip").(*floatingip.FloatingIP) if ip != nil && ip.IP != "" { + log.Printf("[DEBUG] Using floating IP %s for SSH", ip.IP) return ip.IP, nil } if s.AccessIPv4 != "" { + log.Printf("[DEBUG] Using AccessIPv4 %s for SSH", s.AccessIPv4) return s.AccessIPv4, nil } // Try to get it from the requested interface if addr := sshAddrFromPool(s, sshinterface); addr != "" { + log.Printf("[DEBUG] Using IP address %s for SSH", addr) return addr, nil } From b47eb4cea90a0e39b1d4ff38a5da232401304380 Mon Sep 17 00:00:00 2001 From: Hazel Smith Date: Mon, 27 Jul 2015 23:05:51 +0100 Subject: [PATCH 691/956] openstack builder: support using existing keypair --- builder/openstack/builder.go | 2 ++ builder/openstack/run_config.go | 1 + builder/openstack/step_key_pair.go | 23 +++++++++++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index d6b528695..d15713339 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -77,6 +77,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName), + KeyPairName: b.config.SSHKeyPairName, + PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, }, &StepRunSourceServer{ Name: b.config.ImageName, diff --git a/builder/openstack/run_config.go b/builder/openstack/run_config.go index 128e36b5b..00f34c9c4 100644 --- a/builder/openstack/run_config.go +++ b/builder/openstack/run_config.go @@ -11,6 +11,7 @@ import ( // image and details on how to access that launched image. type RunConfig struct { Comm communicator.Config `mapstructure:",squash"` + SSHKeyPairName string `mapstructure:"ssh_keypair_name"` SSHInterface string `mapstructure:"ssh_interface"` SourceImage string `mapstructure:"source_image"` diff --git a/builder/openstack/step_key_pair.go b/builder/openstack/step_key_pair.go index 06bcbf9ea..97dbf7515 100644 --- a/builder/openstack/step_key_pair.go +++ b/builder/openstack/step_key_pair.go @@ -2,6 +2,7 @@ package openstack import ( "fmt" + "io/ioutil" "os" "runtime" @@ -14,10 +15,27 @@ import ( type StepKeyPair struct { Debug bool DebugKeyPath string + KeyPairName string + PrivateKeyFile string + keyName string } func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { + if s.PrivateKeyFile != "" { + privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile) + if err != nil { + state.Put("error", fmt.Errorf( + "Error loading configured private key file: %s", err)) + return multistep.ActionHalt + } + + state.Put("keyPair", s.KeyPairName) + state.Put("privateKey", string(privateKeyBytes)) + + return multistep.ActionContinue + } + config := state.Get("config").(Config) ui := state.Get("ui").(packer.Ui) @@ -81,6 +99,11 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { } func (s *StepKeyPair) Cleanup(state multistep.StateBag) { + // If we used an SSH private key file, do not go about deleting + // keypairs + if s.PrivateKeyFile != "" { + return + } // If no key name is set, then we never created it, so just return if s.keyName == "" { return From a7da0ffde1550b249ac059f10e4b67ce6854b6f1 Mon Sep 17 00:00:00 2001 From: Hazel Smith Date: Mon, 27 Jul 2015 23:07:25 +0100 Subject: [PATCH 692/956] openstack: store updated accessIPv4 from RackConnect --- builder/openstack/step_wait_for_rackconnect.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builder/openstack/step_wait_for_rackconnect.go b/builder/openstack/step_wait_for_rackconnect.go index 6263bd17d..7ab42a8f4 100644 --- a/builder/openstack/step_wait_for_rackconnect.go +++ b/builder/openstack/step_wait_for_rackconnect.go @@ -39,6 +39,7 @@ func (s *StepWaitForRackConnect) Run(state multistep.StateBag) multistep.StepAct } if server.Metadata["rackconnect_automation_status"] == "DEPLOYED" { + state.Put("server", server) break } From 715662f60b2ae8f251766c92f7d607539cdfb650 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 27 Jul 2015 16:42:06 -0700 Subject: [PATCH 693/956] Reformat --- builder/docker/communicator.go | 4 ++-- builder/docker/step_connect_docker.go | 2 +- builder/openstack/builder.go | 8 ++++---- builder/openstack/run_config.go | 4 ++-- builder/openstack/step_key_pair.go | 10 +++++----- provisioner/chef-client/provisioner.go | 8 ++++---- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 63ef4cd5b..4fcd9b658 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -24,8 +24,8 @@ type Communicator struct { HostDir string ContainerDir string Version *version.Version - Config *Config - lock sync.Mutex + Config *Config + lock sync.Mutex } func (c *Communicator) Start(remote *packer.RemoteCmd) error { diff --git a/builder/docker/step_connect_docker.go b/builder/docker/step_connect_docker.go index 315cfc204..f84d369c2 100644 --- a/builder/docker/step_connect_docker.go +++ b/builder/docker/step_connect_docker.go @@ -26,7 +26,7 @@ func (s *StepConnectDocker) Run(state multistep.StateBag) multistep.StepAction { HostDir: tempDir, ContainerDir: "/packer-files", Version: version, - Config: config, + Config: config, } state.Put("communicator", comm) diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index d15713339..9f4c9e7bc 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -75,10 +75,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Flavor: b.config.Flavor, }, &StepKeyPair{ - Debug: b.config.PackerDebug, - DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.SSHKeyPairName, - PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName), + KeyPairName: b.config.SSHKeyPairName, + PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, }, &StepRunSourceServer{ Name: b.config.ImageName, diff --git a/builder/openstack/run_config.go b/builder/openstack/run_config.go index 00f34c9c4..a8b8638dc 100644 --- a/builder/openstack/run_config.go +++ b/builder/openstack/run_config.go @@ -10,9 +10,9 @@ import ( // RunConfig contains configuration for running an instance from a source // image and details on how to access that launched image. type RunConfig struct { - Comm communicator.Config `mapstructure:",squash"` + Comm communicator.Config `mapstructure:",squash"` SSHKeyPairName string `mapstructure:"ssh_keypair_name"` - SSHInterface string `mapstructure:"ssh_interface"` + SSHInterface string `mapstructure:"ssh_interface"` SourceImage string `mapstructure:"source_image"` Flavor string `mapstructure:"flavor"` diff --git a/builder/openstack/step_key_pair.go b/builder/openstack/step_key_pair.go index 97dbf7515..f17d76f35 100644 --- a/builder/openstack/step_key_pair.go +++ b/builder/openstack/step_key_pair.go @@ -13,12 +13,12 @@ import ( ) type StepKeyPair struct { - Debug bool - DebugKeyPath string - KeyPairName string - PrivateKeyFile string + Debug bool + DebugKeyPath string + KeyPairName string + PrivateKeyFile string - keyName string + keyName string } func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index 498033925..62b3732de 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -287,10 +287,10 @@ func (p *Provisioner) createKnifeConfig(ui packer.Ui, comm packer.Communicator, ctx := p.config.ctx ctx.Data = &ConfigTemplate{ - NodeName: nodeName, - ServerUrl: serverUrl, - ClientKey: clientKey, - SslVerifyMode: sslVerifyMode, + NodeName: nodeName, + ServerUrl: serverUrl, + ClientKey: clientKey, + SslVerifyMode: sslVerifyMode, } configString, err := interpolate.Render(tpl, &ctx) if err != nil { From 73a157b78d56a50478d4bc5d80d06f700274711e Mon Sep 17 00:00:00 2001 From: Patrick Lucas Date: Mon, 27 Jul 2015 19:32:21 -0700 Subject: [PATCH 694/956] builder/googlecompute: Document use_internal_ip --- website/source/docs/builders/googlecompute.markdown | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/builders/googlecompute.markdown b/website/source/docs/builders/googlecompute.markdown index a572a0371..7e6df9823 100644 --- a/website/source/docs/builders/googlecompute.markdown +++ b/website/source/docs/builders/googlecompute.markdown @@ -129,6 +129,9 @@ can be configured for this builder. * `tags` (array of strings) +* `use_internal_ip` (boolean) - If true, use the instance's internal IP instead + of its external IP during building. + ## Gotchas Centos images have root ssh access disabled by default. Set `ssh_username` to any user, which will be created by packer with sudo access. From 21107b0027e431116a7f80af10642a8c5e84d1c8 Mon Sep 17 00:00:00 2001 From: Gonzalo Peci Date: Mon, 27 Jul 2015 12:43:05 +1200 Subject: [PATCH 695/956] Fix wrong command type being used when running elevated provisioner. --- provisioner/powershell/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index 31ba2b34a..a862ef9b3 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -399,7 +399,7 @@ func (p *Provisioner) createCommandText() (command string, err error) { Vars: flattenedEnvVars, Path: p.config.RemotePath, } - command, err = interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) + command, err = interpolate.Render(p.config.ElevatedExecuteCommand, &p.config.ctx) if err != nil { return "", fmt.Errorf("Error processing command: %s", err) } From f90f2f685d281d290fa5ae36f602cdb6be8fb759 Mon Sep 17 00:00:00 2001 From: AmirAli Moinfar Date: Mon, 27 Jul 2015 07:00:57 -0400 Subject: [PATCH 696/956] Fix semantic errors in messages --- provisioner/salt-masterless/provisioner.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/provisioner/salt-masterless/provisioner.go b/provisioner/salt-masterless/provisioner.go index f856ca01d..439f0e590 100644 --- a/provisioner/salt-masterless/provisioner.go +++ b/provisioner/salt-masterless/provisioner.go @@ -116,9 +116,9 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } } - ui.Message(fmt.Sprintf("Creating remote directory: %s", p.config.TempConfigDir)) + ui.Message(fmt.Sprintf("Creating remote temporary directory: %s", p.config.TempConfigDir)) if err := p.createDir(ui, comm, p.config.TempConfigDir); err != nil { - return fmt.Errorf("Error creating remote salt state directory: %s", err) + return fmt.Errorf("Error creating remote temporary directory: %s", err) } if p.config.MinionConfig != "" { @@ -216,7 +216,7 @@ func (p *Provisioner) moveFile(ui packer.Ui, comm packer.Communicator, dst, src err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) } - return fmt.Errorf("Unable to move %s/minion to /etc/salt/minion: %s", p.config.TempConfigDir, err) + return fmt.Errorf("Unable to move %s to %s: %s", src, dst, err) } return nil } From b88afbf3c9306fd7a82d49727e8a4008b1b41ec4 Mon Sep 17 00:00:00 2001 From: AmirAli Moinfar Date: Tue, 28 Jul 2015 01:38:40 -0400 Subject: [PATCH 697/956] Revise documentation for minion config * Update link to salt minion config. * Clarify that minion config is a file. --- website/source/docs/provisioners/salt-masterless.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/provisioners/salt-masterless.html.markdown b/website/source/docs/provisioners/salt-masterless.html.markdown index 84171a071..679a0f6eb 100644 --- a/website/source/docs/provisioners/salt-masterless.html.markdown +++ b/website/source/docs/provisioners/salt-masterless.html.markdown @@ -47,7 +47,7 @@ Optional: This will be uploaded to the `/srv/salt` on the remote. - `minion_config` (string) - The path to your local [minion - config](http://docs.saltstack.com/topics/configuration.html). This will be + config file](http://docs.saltstack.com/ref/configuration/minion.html). This will be uploaded to the `/etc/salt` on the remote. - `skip_bootstrap` (boolean) - By default the salt provisioner runs [salt From 769c82b1710dc9665f6cfda64baee78c714ec1a7 Mon Sep 17 00:00:00 2001 From: AmirAli Moinfar Date: Mon, 27 Jul 2015 06:36:39 -0400 Subject: [PATCH 698/956] Support for setting salt remote directory * It is possible to set remote salt tree through `remote_state_tree` argument. * It is possible to set remote pillar root through `remote_pillar_roots` argument. * Directories `remote_state_tree` and `remote_pillar_roots` are emptied before use. --- provisioner/salt-masterless/provisioner.go | 50 +++++++++++++++++++--- 1 file changed, 43 insertions(+), 7 deletions(-) diff --git a/provisioner/salt-masterless/provisioner.go b/provisioner/salt-masterless/provisioner.go index f856ca01d..f308009fb 100644 --- a/provisioner/salt-masterless/provisioner.go +++ b/provisioner/salt-masterless/provisioner.go @@ -15,6 +15,8 @@ import ( ) const DefaultTempConfigDir = "/tmp/salt" +const DefaultStateTreeDir = "/srv/salt" +const DefaultPillarRootDir = "/srv/pillar" type Config struct { common.PackerConfig `mapstructure:",squash"` @@ -34,6 +36,12 @@ type Config struct { // Local path to the salt pillar roots LocalPillarRoots string `mapstructure:"local_pillar_roots"` + // Remote path to the salt state tree + RemoteStateTree string `mapstructure:"remote_state_tree"` + + // Remote path to the salt pillar roots + RemotePillarRoots string `mapstructure:"remote_pillar_roots"` + // Where files will be copied before moving to the /srv/salt directory TempConfigDir string `mapstructure:"temp_config_dir"` @@ -60,6 +68,14 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { p.config.TempConfigDir = DefaultTempConfigDir } + if p.config.RemoteStateTree == "" { + p.config.RemoteStateTree = DefaultStateTreeDir + } + + if p.config.RemotePillarRoots == "" { + p.config.RemotePillarRoots = DefaultPillarRootDir + } + var errs *packer.MultiError // require a salt state tree @@ -144,11 +160,14 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return fmt.Errorf("Error uploading local state tree to remote: %s", err) } - // move state tree into /srv/salt + // move state tree from temporary directory src = filepath.ToSlash(filepath.Join(p.config.TempConfigDir, "states")) - dst = "/srv/salt" + dst = p.config.RemoteStateTree + if err = p.removeDir(ui, comm, dst); err != nil { + return fmt.Errorf("Unable to clear salt tree: %s", err) + } if err = p.moveFile(ui, comm, dst, src); err != nil { - return fmt.Errorf("Unable to move %s/states to /srv/salt: %s", p.config.TempConfigDir, err) + return fmt.Errorf("Unable to move %s/states to %s: %s", p.config.TempConfigDir, dst, err) } if p.config.LocalPillarRoots != "" { @@ -159,16 +178,19 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return fmt.Errorf("Error uploading local pillar roots to remote: %s", err) } - // move pillar tree into /srv/pillar + // move pillar root from temporary directory src = filepath.ToSlash(filepath.Join(p.config.TempConfigDir, "pillar")) - dst = "/srv/pillar" + dst = p.config.RemotePillarRoots + if err = p.removeDir(ui, comm, dst); err != nil { + return fmt.Errorf("Unable to clear pillat root: %s", err) + } if err = p.moveFile(ui, comm, dst, src); err != nil { - return fmt.Errorf("Unable to move %s/pillar to /srv/pillar: %s", p.config.TempConfigDir, err) + return fmt.Errorf("Unable to move %s/pillar to %s: %s", p.config.TempConfigDir, dst, err) } } ui.Message("Running highstate") - cmd := &packer.RemoteCmd{Command: p.sudo("salt-call --local state.highstate -l info --retcode-passthrough")} + cmd := &packer.RemoteCmd{Command: fmt.Sprintf(p.sudo("salt-call --local state.highstate --file-root=%s --pillar-root=%s -l info --retcode-passthrough"),p.config.RemoteStateTree, p.config.RemotePillarRoots)} if err = cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 { if err == nil { err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) @@ -235,6 +257,20 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri return nil } +func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error { + ui.Message(fmt.Sprintf("Removing directory: %s", dir)) + cmd := &packer.RemoteCmd{ + Command: fmt.Sprintf("rm -rf '%s'", dir), + } + if err := cmd.StartWithUi(comm, ui); err != nil { + return err + } + if cmd.ExitStatus != 0 { + return fmt.Errorf("Non-zero exit status.") + } + return nil +} + func (p *Provisioner) uploadDir(ui packer.Ui, comm packer.Communicator, dst, src string, ignore []string) error { if err := p.createDir(ui, comm, dst); err != nil { return err From 63be0e3ea1e0c2f17915883fff18821a726682f1 Mon Sep 17 00:00:00 2001 From: AmirAli Moinfar Date: Tue, 28 Jul 2015 01:37:26 -0400 Subject: [PATCH 699/956] Add documentation for salt remote directories --- .../docs/provisioners/salt-masterless.html.markdown | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/website/source/docs/provisioners/salt-masterless.html.markdown b/website/source/docs/provisioners/salt-masterless.html.markdown index 84171a071..19242ae44 100644 --- a/website/source/docs/provisioners/salt-masterless.html.markdown +++ b/website/source/docs/provisioners/salt-masterless.html.markdown @@ -38,13 +38,21 @@ Optional: has more detailed usage instructions. By default, no arguments are sent to the script. +- `remote_pillar_roots` (string) - The path to your remote [pillar + roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). + default: `/srv/pillar`. + +- `remote_state_tree` (string) - The path to your remote [state + tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). + default: `/srv/salt`. + - `local_pillar_roots` (string) - The path to your local [pillar roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). - This will be uploaded to the `/srv/pillar` on the remote. + This will be uploaded to the `remote_pillar_roots` on the remote. - `local_state_tree` (string) - The path to your local [state tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). - This will be uploaded to the `/srv/salt` on the remote. + This will be uploaded to the `remote_state_tree` on the remote. - `minion_config` (string) - The path to your local [minion config](http://docs.saltstack.com/topics/configuration.html). This will be From eba0e9eaf8fe1c43feba1a7a9ff2e9832a277b7c Mon Sep 17 00:00:00 2001 From: AmirAli Moinfar Date: Mon, 27 Jul 2015 06:28:19 -0400 Subject: [PATCH 700/956] Ensure that `/etc/salt` exists Make sure that directory `/etc/salt` exists before copying salt minion file. --- provisioner/salt-masterless/provisioner.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/provisioner/salt-masterless/provisioner.go b/provisioner/salt-masterless/provisioner.go index f856ca01d..573e93861 100644 --- a/provisioner/salt-masterless/provisioner.go +++ b/provisioner/salt-masterless/provisioner.go @@ -130,6 +130,10 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } // move minion config into /etc/salt + ui.Message(fmt.Sprintf("Make sure directory %s exists", "/etc/salt")) + if err := p.createDir(ui, comm, "/etc/salt"); err != nil { + return fmt.Errorf("Error creating remote salt configuration directory: %s", err) + } src = filepath.ToSlash(filepath.Join(p.config.TempConfigDir, "minion")) dst = "/etc/salt/minion" if err = p.moveFile(ui, comm, dst, src); err != nil { From 223e35fc65d32ff2d4433078c4920ad672a9e64e Mon Sep 17 00:00:00 2001 From: Olivier Tremblay Date: Tue, 28 Jul 2015 07:45:02 -0400 Subject: [PATCH 701/956] Makes StepDownload's TargetPath customizable. I exposed TargetPath as a config file option "target_path". I don't like the name, but it follows the naming convention. The purpose of TargetPath stands unmodified, and it enables a fair amount of customization. --- builder/parallels/iso/builder.go | 2 ++ builder/qemu/builder.go | 2 ++ builder/virtualbox/iso/builder.go | 2 ++ builder/vmware/iso/builder.go | 2 ++ common/packer_config.go | 11 ++++++----- common/step_download.go | 2 +- 6 files changed, 15 insertions(+), 6 deletions(-) diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index 4a75b0b47..c8982f268 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -46,6 +46,7 @@ type Config struct { ISOChecksumType string `mapstructure:"iso_checksum_type"` ISOUrls []string `mapstructure:"iso_urls"` VMName string `mapstructure:"vm_name"` + TargetPath string `mapstructure:"target_path"` RawSingleISOUrl string `mapstructure:"iso_url"` @@ -218,6 +219,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Description: "ISO", ResultKey: "iso_path", Url: b.config.ISOUrls, + TargetPath: b.config.TargetPath, }, ¶llelscommon.StepOutputDir{ Force: b.config.PackerForce, diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index 4ac22b59b..8369495d8 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -105,6 +105,7 @@ type Config struct { ShutdownCommand string `mapstructure:"shutdown_command"` SSHHostPortMin uint `mapstructure:"ssh_host_port_min"` SSHHostPortMax uint `mapstructure:"ssh_host_port_max"` + TargetPath string `mapstructure:"target_path"` VNCPortMin uint `mapstructure:"vnc_port_min"` VNCPortMax uint `mapstructure:"vnc_port_max"` VMName string `mapstructure:"vm_name"` @@ -384,6 +385,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Description: "ISO", ResultKey: "iso_path", Url: b.config.ISOUrls, + TargetPath: b.config.TargetPath, }, new(stepPrepareOutputDir), &common.StepCreateFloppy{ diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go index 0758e9bdd..8a19678c0 100644 --- a/builder/virtualbox/iso/builder.go +++ b/builder/virtualbox/iso/builder.go @@ -49,6 +49,7 @@ type Config struct { ISOChecksumType string `mapstructure:"iso_checksum_type"` ISOInterface string `mapstructure:"iso_interface"` ISOUrls []string `mapstructure:"iso_urls"` + TargetPath string `mapstructure:"target_path"` VMName string `mapstructure:"vm_name"` RawSingleISOUrl string `mapstructure:"iso_url"` @@ -234,6 +235,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ResultKey: "iso_path", Url: b.config.ISOUrls, Extension: "iso", + TargetPath: b.config.TargetPath, }, &vboxcommon.StepOutputDir{ Force: b.config.PackerForce, diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index f2489c50f..c35719dc9 100755 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -49,6 +49,7 @@ type Config struct { VMName string `mapstructure:"vm_name"` BootCommand []string `mapstructure:"boot_command"` SkipCompaction bool `mapstructure:"skip_compaction"` + TargetPath string `mapstructure:"target_path"` VMXTemplatePath string `mapstructure:"vmx_template_path"` VMXDiskTemplatePath string `mapstructure:"vmx_disk_template_path"` @@ -260,6 +261,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Description: "ISO", ResultKey: "iso_path", Url: b.config.ISOUrls, + TargetPath: b.config.TargetPath, }, &vmwcommon.StepOutputDir{ Force: b.config.PackerForce, diff --git a/common/packer_config.go b/common/packer_config.go index 2ef86e582..67a8a8efb 100644 --- a/common/packer_config.go +++ b/common/packer_config.go @@ -4,9 +4,10 @@ package common // are sent by packer, properly tagged already so mapstructure can load // them. Embed this structure into your configuration class to get it. type PackerConfig struct { - PackerBuildName string `mapstructure:"packer_build_name"` - PackerBuilderType string `mapstructure:"packer_builder_type"` - PackerDebug bool `mapstructure:"packer_debug"` - PackerForce bool `mapstructure:"packer_force"` - PackerUserVars map[string]string `mapstructure:"packer_user_variables"` + PackerBuildName string `mapstructure:"packer_build_name"` + PackerBuilderType string `mapstructure:"packer_builder_type"` + PackerDebug bool `mapstructure:"packer_debug"` + PackerForce bool `mapstructure:"packer_force"` + PackerUserVars map[string]string `mapstructure:"packer_user_variables"` + PackerIsoTargetPath string `mapstructure:"packer_iso_target_path"` } diff --git a/common/step_download.go b/common/step_download.go index b8bd60b5e..458117bac 100644 --- a/common/step_download.go +++ b/common/step_download.go @@ -71,7 +71,7 @@ func (s *StepDownload) Run(state multistep.StateBag) multistep.StepAction { // if we force a certain extension we hash the URL and add // the extension to force it. cacheKey := url - if s.Extension != "" { + if s.Extension != "" { //HERE. hash := sha1.Sum([]byte(url)) cacheKey = fmt.Sprintf( "%s.%s", hex.EncodeToString(hash[:]), s.Extension) From 97e16aeed9ba4d0ba0df18f20085988b3c6d21a7 Mon Sep 17 00:00:00 2001 From: Kevin Fishner Date: Tue, 28 Jul 2015 15:04:19 -0700 Subject: [PATCH 702/956] add updated analytics --- website/source/layouts/adroll.html | 17 +++++++++++++++++ website/source/layouts/layout.erb | 1 + 2 files changed, 18 insertions(+) create mode 100644 website/source/layouts/adroll.html diff --git a/website/source/layouts/adroll.html b/website/source/layouts/adroll.html new file mode 100644 index 000000000..bc5b32c40 --- /dev/null +++ b/website/source/layouts/adroll.html @@ -0,0 +1,17 @@ + diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index f66adb067..c809b6b55 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -71,5 +71,6 @@ <%= partial "layouts/google-analytics.html" %> + <%= partial "layouts/adroll.html" %> From 8741a6df2374b97a2baceeaf2c903203eb924982 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 28 Jul 2015 15:29:20 -0700 Subject: [PATCH 703/956] Renamed .markdown to .html.markdown to be consistent with other filenames --- .../{googlecompute.markdown => googlecompute.html.markdown} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename website/source/docs/builders/{googlecompute.markdown => googlecompute.html.markdown} (100%) diff --git a/website/source/docs/builders/googlecompute.markdown b/website/source/docs/builders/googlecompute.html.markdown similarity index 100% rename from website/source/docs/builders/googlecompute.markdown rename to website/source/docs/builders/googlecompute.html.markdown From 0c7654358aa9ded65f103d0a4d805f0e2b9da643 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 28 Jul 2015 15:30:32 -0700 Subject: [PATCH 704/956] Reformat --- .../docs/builders/googlecompute.html.markdown | 148 ++++++++++-------- 1 file changed, 80 insertions(+), 68 deletions(-) diff --git a/website/source/docs/builders/googlecompute.html.markdown b/website/source/docs/builders/googlecompute.html.markdown index 7e6df9823..56fdafdcd 100644 --- a/website/source/docs/builders/googlecompute.html.markdown +++ b/website/source/docs/builders/googlecompute.html.markdown @@ -1,36 +1,44 @@ --- -layout: "docs" -page_title: "Google Compute Builder" -description: |- - The `googlecompute` Packer builder is able to create images for use with Google Compute Engine (GCE) based on existing images. Google Compute Engine doesn't allow the creation of images from scratch. ---- +description: | + The `googlecompute` Packer builder is able to create images for use with Google + Compute Engine (GCE) based on existing images. Google Compute Engine doesn't + allow the creation of images from scratch. +layout: docs +page_title: Google Compute Builder +... # Google Compute Builder Type: `googlecompute` -The `googlecompute` Packer builder is able to create [images](https://developers.google.com/compute/docs/images) for use with -[Google Compute Engine](https://cloud.google.com/products/compute-engine)(GCE) based on existing images. Google -Compute Engine doesn't allow the creation of images from scratch. +The `googlecompute` Packer builder is able to create +[images](https://developers.google.com/compute/docs/images) for use with [Google +Compute Engine](https://cloud.google.com/products/compute-engine)(GCE) based on +existing images. Google Compute Engine doesn't allow the creation of images from +scratch. ## Authentication -Authenticating with Google Cloud services requires at most one JSON file, -called the _account file_. The _account file_ is **not** required if you are running -the `googlecompute` Packer builder from a GCE instance with a properly-configured -[Compute Engine Service Account](https://cloud.google.com/compute/docs/authentication). +Authenticating with Google Cloud services requires at most one JSON file, called +the *account file*. The *account file* is **not** required if you are running +the `googlecompute` Packer builder from a GCE instance with a +properly-configured [Compute Engine Service +Account](https://cloud.google.com/compute/docs/authentication). ### Running With a Compute Engine Service Account -If you run the `googlecompute` Packer builder from a GCE instance, you can configure that -instance to use a [Compute Engine Service Account](https://cloud.google.com/compute/docs/authentication). This will allow Packer to authenticate -to Google Cloud without having to bake in a separate credential/authentication file. -To create a GCE instance that uses a service account, provide the required scopes when -launching the instance. +If you run the `googlecompute` Packer builder from a GCE instance, you can +configure that instance to use a [Compute Engine Service +Account](https://cloud.google.com/compute/docs/authentication). This will allow +Packer to authenticate to Google Cloud without having to bake in a separate +credential/authentication file. + +To create a GCE instance that uses a service account, provide the required +scopes when launching the instance. For `gcloud`, do this via the `--scopes` parameter: -```sh +``` {.sh} gcloud compute --project YOUR_PROJECT instances create "INSTANCE-NAME" ... \ --scopes "https://www.googleapis.com/auth/compute" \ "https://www.googleapis.com/auth/devstorage.full_control" \ @@ -39,38 +47,39 @@ gcloud compute --project YOUR_PROJECT instances create "INSTANCE-NAME" ... \ For the [Google Developers Console](https://console.developers.google.com): -1. Choose "Show advanced options" -2. Tick "Enable Compute Engine service account" -3. Choose "Read Write" for Compute -4. Chose "Full" for "Storage" +1. Choose "Show advanced options" +2. Tick "Enable Compute Engine service account" +3. Choose "Read Write" for Compute +4. Chose "Full" for "Storage" **The service account will be used automatically by Packer as long as there is -no _account file_ specified in the Packer configuration file.** +no *account file* specified in the Packer configuration file.** ### Running Without a Compute Engine Service Account -The [Google Developers Console](https://console.developers.google.com) allows you to -create and download a credential file that will let you use the `googlecompute` Packer -builder anywhere. To make -the process more straightforwarded, it is documented here. +The [Google Developers Console](https://console.developers.google.com) allows +you to create and download a credential file that will let you use the +`googlecompute` Packer builder anywhere. To make the process more +straightforwarded, it is documented here. -1. Log into the [Google Developers Console](https://console.developers.google.com) - and select a project. +1. Log into the [Google Developers + Console](https://console.developers.google.com) and select a project. -2. Under the "APIs & Auth" section, click "Credentials." +2. Under the "APIs & Auth" section, click "Credentials." -3. Click the "Create new Client ID" button, select "Service account", and click "Create Client ID" +3. Click the "Create new Client ID" button, select "Service account", and click + "Create Client ID" -4. Click "Generate new JSON key" for the Service Account you just created. A JSON file will be downloaded automatically. This is your - _account file_. +4. Click "Generate new JSON key" for the Service Account you just created. A + JSON file will be downloaded automatically. This is your *account file*. ## Basic Example -Below is a fully functioning example. It doesn't do anything useful, -since no provisioners are defined, but it will effectively repackage an -existing GCE image. The account file is obtained in the previous section. +Below is a fully functioning example. It doesn't do anything useful, since no +provisioners are defined, but it will effectively repackage an existing GCE +image. The account file is obtained in the previous section. -```javascript +``` {.javascript} { "type": "googlecompute", "account_file": "account.json", @@ -82,58 +91,61 @@ existing GCE image. The account file is obtained in the previous section. ## Configuration Reference -Configuration options are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +Configuration options are organized below into two categories: required and +optional. Within each category, the available options are alphabetized and +described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: -* `project_id` (string) - The project ID that will be used to launch instances - and store images. +- `project_id` (string) - The project ID that will be used to launch instances + and store images. -* `source_image` (string) - The source image to use to create the new image - from. Example: `"debian-7-wheezy-v20150127"` +- `source_image` (string) - The source image to use to create the new + image from. Example: `"debian-7-wheezy-v20150127"` -* `zone` (string) - The zone in which to launch the instance used to create - the image. Example: `"us-central1-a"` +- `zone` (string) - The zone in which to launch the instance used to create + the image. Example: `"us-central1-a"` ### Optional: -* `account_file` (string) - The JSON file containing your account credentials. - Not required if you run Packer on a GCE instance with a service account. - Instructions for creating file or using service accounts are above. +- `account_file` (string) - The JSON file containing your account credentials. + Not required if you run Packer on a GCE instance with a service account. + Instructions for creating file or using service accounts are above. -* `disk_size` (integer) - The size of the disk in GB. - This defaults to `10`, which is 10GB. +- `disk_size` (integer) - The size of the disk in GB. This defaults to `10`, + which is 10GB. -* `image_name` (string) - The unique name of the resulting image. - Defaults to `"packer-{{timestamp}}"`. +- `image_name` (string) - The unique name of the resulting image. Defaults to + `"packer-{{timestamp}}"`. -* `image_description` (string) - The description of the resulting image. +- `image_description` (string) - The description of the resulting image. -* `instance_name` (string) - A name to give the launched instance. Beware - that this must be unique. Defaults to `"packer-{{uuid}}"`. +- `instance_name` (string) - A name to give the launched instance. Beware that + this must be unique. Defaults to `"packer-{{uuid}}"`. -* `machine_type` (string) - The machine type. Defaults to `"n1-standard-1"`. +- `machine_type` (string) - The machine type. Defaults to `"n1-standard-1"`. -* `metadata` (object of key/value strings) +- `metadata` (object of key/value strings) -* `network` (string) - The Google Compute network to use for the launched - instance. Defaults to `"default"`. +- `network` (string) - The Google Compute network to use for the + launched instance. Defaults to `"default"`. -* `state_timeout` (string) - The time to wait for instance state changes. - Defaults to `"5m"`. +- `state_timeout` (string) - The time to wait for instance state changes. + Defaults to `"5m"`. -* `tags` (array of strings) +- `tags` (array of strings) -* `use_internal_ip` (boolean) - If true, use the instance's internal IP instead - of its external IP during building. +- `use_internal_ip` (boolean) - If true, use the instance's internal IP + instead of its external IP during building. ## Gotchas -Centos images have root ssh access disabled by default. Set `ssh_username` to any user, which will be created by packer with sudo access. +Centos images have root ssh access disabled by default. Set `ssh_username` to +any user, which will be created by packer with sudo access. -The machine type must have a scratch disk, which means you can't use an `f1-micro` or `g1-small` to build images. +The machine type must have a scratch disk, which means you can't use an +`f1-micro` or `g1-small` to build images. From 1420e2494cf7014c31df6c4914447c21ae1cea1d Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 28 Jul 2015 16:24:43 -0700 Subject: [PATCH 705/956] Added note on 5g upload limit, and workaround --- .../docs/command-line/push.html.markdown | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index 06e5a3c98..140c996d3 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -67,3 +67,27 @@ Push a Packer template with a custom token: ``` {.shell} $ packer push -token ABCD1234 template.json ``` + +## Limits + +`push` is limited to 5gb upload when pushing to Atlas. To be clear, packer *can* +build artifacts larger than 5gb, and Atlas *can* store artifacts larger than +5gb. However, the initial payload you push to *start* the build cannot exceed +5gb. If your boot ISO is larger than 5gb (for example if you are building OSX +images), you will need to put your boot ISO in an external web service and +download it during the packer run. + +The easiest way to host these in a secure fashion is to upload your ISO to +[Amazon +S3](http://docs.aws.amazon.com/AmazonS3/latest/dev/ShareObjectPreSignedURL.html) +or [Google Cloud +Storage](https://cloud.google.com/storage/docs/gsutil/commands/signurl) and +download it using a signed URL. You can inject the signed URL into your build by +using a build variable (environment variable) in Atlas. Example: + +![Configure your signed URL in the Atlas build variables +menu](/assets/images/packer-signed-urls.png) + +You will also need to [configure your packer +template](http://stormchaser.local:4567/docs/templates/user-variables.html) to +use the variable injected by Atlas (or via `push -var`). From 6fac13868b5e5fc47cd98719197f32450c7e3a5d Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 28 Jul 2015 16:46:45 -0700 Subject: [PATCH 706/956] Added screenshot for Atlas build variables --- .../source/assets/images/packer-signed-urls.png | Bin 0 -> 40501 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 website/source/assets/images/packer-signed-urls.png diff --git a/website/source/assets/images/packer-signed-urls.png b/website/source/assets/images/packer-signed-urls.png new file mode 100644 index 0000000000000000000000000000000000000000..4e9e17010d7d44c0b4cb21e62ccccd116ea8a4b3 GIT binary patch literal 40501 zcmd42Ra9JC7Viy&KoJ~*yCfmF6Wk#{f(8%n?ry=|-7QFPC%8*+celdb^=`WRoRdzE z@8Ns6V_Zf(6nocRI@erl{^oxNN=u3&zQTP40Re$1_EAU{0^)@+1O%ia2nM(^F5M&p z0r9H9NKjB(Oi++m+S)?j$W#vk;$vWJ9PAf4Bg~$bt1&zpk}d>-rru{eM5li`=Eq^5$o29W`DJx!tJtLcjFFD zyEA`0ayjAzBSYMfs8W3L)Pwklp&KC3xQHz?%7CzN1OZRX_hO6BZ%_=2(cAl>ZMnM# z_9Gpd!)JZ{2e)MpL}gmK)E5x1LoD&xQlo>8ameYceXidN%W8EpGdiV!Q- z>Q|poIk|EDYx4G6t0%^HtIPV`V)_-caA;{RakH&1$K$^5_p6`4_V-60Kcfym`M-_Y z5Q>xnRg6V)7m7)qMVArN(i^7TEX+PCOTNDI8D@I7$gQ7NXNuh7g!#^&37kly@-sfY z?NzdYj7oqm58Wcu7sm9S(|E&=a50u#%fT$evw>QXXDPqJK`G$#4e1tNLI*K5a->MZ zFf1prGzJzoemwuC3=%Si0Rw*37@-&O;MMm(;M?Ckv?H+n`lKaZ_B}m*jpj$5fuH!{ zARkYelce??4BY@++&=O#L|Q9R9X2t(5`^jowekxgc^Heq0XD>iaUT=md!J4O7<6_P zE*Obc1{Qs#ghbUB^foYH^^-gQN(1<;bu43}MTnUxQt2jXogSzWuYJG9DS^*mY=|)r z2usfn)L=%i@pd8+0x^`)hAkBoOUBD{Pq?gMm=92jLgjQIC@o^#B53>wkgO+2*YiRX zzW2pT&37~KeS9xJKpD5fS-xWSK3ie@31jYQZb`%i`Jf4Egjru9OYkBF!O9R*eh2*! zqy?8`SQO&tRPN6b(~IQI&JuS*)o7Q4XL-E)XPJ`+zG`5c7%O(fG{ z1Pl9Rv=(mBtG)JMEzShEyEd)kS05nN5e+)(j*))8F7XNP_SH_wQ>4eA^o&sq3Kx4k@EDjk5nTLFa2?J)#&%#V2Zw*DOkU8)N z`AbDCgs`&uM_9Js+QQz_Izw=WJ%(j!exVn^hDCr!_Z4f^)jFkrlR!D3JRm>dHh{84 zh8dirb5T~KL}bd$3fF+78o(W3)>)(FQszAahmVvOz|_vZ>SbyA6^0hMmdF9$0d6`N zuidRZ`gqq7z7dNDhOQO7B5`bY9ek~RPwyT?EI2~)0mTdSwOvI3b(JDB*3zF|D1o#O z?IJ+3lU^Im3cfsMhObfFRoqpIG?H2D@_P0PjO^;QH#I&e`%j0 zS+mrI-Ur;L31n`|etpjngV2X0PP6%HlYKM#wS3=~pf_EybL8^0ljM_B7a~c-#bkpq z>hDu&)JWY_Zo|$K5cowDyVd+?DrIuNZl!!mAMl};pZd!BkyXA%x<>NSZ$AKEQc`h9 zE{rOk%0cKbXn`0*crvSAj(wtdB7356f&@1%S5#*-%J5|sdKK;5&&HI-SXZBW+#Bl~ z_|pg5*J|-YS$QMD+j-l_+ZvcZXtinAXy%&pwPcR6#9r#XRD78x&@7PM?&eDugdi-{ zrQ4+zG#E4-Ga~LME)iQ1gY;fa9#<~+Jtw72%vr3|dtL=kc?5ZZFH=A8W$KCmO2aDeF`nvDv0}DBYKcEm(L-;sGM5bEcy_2OXbJq{p+Jv3?m2jIxZzH53Y7- z2d)~fT#|FrY7%==Xwq<1fQr4!iHb|rTy=N#Wc7=xX_ILa-YJspnH}=o{qdcN#EHrA z(22#|`6AA&yo{c){;9%gmjbC#hf$4UjUOJv5GI-i5vGYF)$IA8_CXgzumjEE*ZakRIqI(c z{8@Axb{mZ|{4<3!2TVUq{9x-~m$=swbp4;>XyPo2Ly8;~{EG-x*QnX_&RB;I(*~ZR;lKSdHn>2zwj$E6gs8OyWH5 zI1XQpar^sDr#;91#L<9dnBC)S-Li0p?}6;jX}f%X?4HUd#{jeDTZV9^<)4XlfeSpTqMzlmMtXgH@P*$M%N0|@x;jO= zr@GwrV9Tjy(XEPpzOY)ycNpRsGAW^QKz4`$cT#O3_HcG#^@Njuc(X5w{n=KGS`2@^ z;jI7KBHA^IGU{!#cT{0?o*bzhuH4Cw@*iMmYcE}X(Il0*q$a#slnbTHN z)uq!)EqV{FFk5~dc$uMAc=K#VW_^pyRkJQOgP3b^8#FDO37fCMt~0oG)hvoE4mO7? z$V)7u($^TKP?2ePN0l7qPUD~qSJ;lUPVO$BM$Ea$qOQ!(Hm} zOZ^Vqhw2SI^$vUPD-UU~tH^|xG)H?Ly>1^)DK%h<4URm^R?l86jcdYnrg7zF ziMz_%PlsLFZec%*-_xk=$?o=F3R;qEp>i#~Zoc)N^uG*T2?P_#@k)N)@{qm%ayENt z1CH;d>yT5{u+ord!ErTtIJ|n;vE4ndilEURR)KsSW$DTVaZ~`AA@qVz*!881@etWC z3^RDo0Pdb(_-1>y3^HR1;~P#P=C>l)NO>&foJa%_#sQ4hxVWf$gYP}6Dg%S0EM!yP zb)9dy*AT5>AYAv7)6$I7(jFCj1Kbe!=6l#Jc+eRGo{qK>g_#~)Y-`P6o=~44_yeIW ziet>Dfg^0H(HA9KrO%%@bS%thwRA1C^=KWx*45{b6%zT+8sho>zw`)88Q=_#niD_`f(p?x(_P;&;vZwM1_$x%9BY=p<{K z7BEa!jYtT<(V&1b-#~*%$Vp z8>hOy+j&@Nbl~LFOXofqxxzQ^yjQ#@8#BylxBDfTUdzj@cF}&S)?%3>KHdyc!|fur z>F%(h=HYT)HJ!)JSkL9*V%n_r8{|}j!;y;7c<$%Q_j0*2<_!l5R5bNu%SBmw(_j~9 zp7v%N@X-6hL>7}c7K^2YdfOe@^@PbnMLgG^am6at1(BE!WtweJs@2BwvdyFGL8}jq z^75GUqcN(RNV=crXz^95M;kjhL5dZ>4QHz}8imp}nR->j zVSWGORm&4j>SieGT(AAC9v0_8)%gB`&2V)hlfm56!}1f(S|)9c<*Go#MwG;UqCwgZ z35H3hWtSsvjiq<#uCh%Qf{lqEtg=e4#-(_RsrKDVah>=OIB#cO&M!aKbTc?CIprOc_Ym4I zSoO>%FzDQ06=!8-bu+l~ZA3jCcOaL>Dui&-CM#*Rcr>&x-M&pLcVcss)pC$5a2q07 zH%phFIXHZS{@c{fltI7RlIeJHegjT-@np5$_&9IfPg5)$jQO~~Lye5X*y++v=(K^l z9Jy4cgsWhNXVF-G@N+%vF?Qw@XiSg(K!O%5k*lS)EI$v~P~9QtQKL=ju12MPsG93Z z*W}X77hjSJ+NN{a;4WilA9csomkqbOg=W)g4%OLV9DQldyYCy;!#ImnYt0{RD3IxJ zp%QMbt4~j@RU7QhRHW2O^z?1GmP%zE>XWNVorY#NCK-NTC|&#+#DvwcREl#ZrA=EM z*OELpy7Tt)W`)AIO5zd{h(|-}E(e<(W7{(ZIv_5NEyt!fIt_Iy__PLOp6k};$J;#> zBZ)X_hDL?)JgL%H5uUNAYVX_pU`eMBFmwtNATasCdN^-Qeve<{M1S+uvd6N&TU5B4 z4||llvN8@$LAT4Cc=BA0nTbKFUECGYm!e+y&J!XUb^P%cKBM$g)HGHLPh4L3Y%Z6k9-&OJIJ;sC>h3q2>fcjs z$HW4~CeyupN%!pvi)#v&hVdGW5D@MGqr7rO(NSx#ztE;}exPmGXXJG_{q_PWKaw{Y zIfX23b(=+Mpms`a>Al1tW=Tn6yTPTu#aKnPJ`rue{C8oiLIh5=IHRG}#)%yHpKe5S zA$UDz%T1l`%taG(Y#NIC1^26cE08^8YAc2FY7RUyuWd+l9OcbazD=|y18Oou=HMZJsW)FZUN^WEX@&@HB_sn!!P zP;Pg<4L4iK^9yx0O#yd{NABROr&VN+-F$joiO%p z7lOp`s~+CR^4ky4%fWQbhFEI0G8fJ0Qe2}s^Bd)upt7fpTE((o5zI8y$Bk%nx@_5{Ag`R|l z$Qo}7tU%h)C{67`RKh;*)5DR+l^2SXatypc$lj|vRva{z<^-IlR(T*pTeo#oT_5Qktw+8S{BXo?X_h-W{ ztHM7Cz}Nj6k7Ewk9>?B9GyX*GhT*)Aj&`U4_&!U3?U>NL)*Q_iMaJW_FB@6^DXHr=5I(OmUz`g*6eV)2 zno21*?FWBISPlHFm%?e6;_-MhRjSc~>%WTHSvdVeE)VyvLz^6d+;tPv!jmDb%I$JC zuiI+Yy>w-m1X?XJJq~l8}t-6j92o`7fkqZMq;q===HVCI2~Qu*e$O3Eanq3xJ*y-+Kb4m zX6ZF;?bh`NR(Zq3%y?YVR8on=&t2D9-5te<)4fKG*17H6_qdClePZ`y&z0ggryG5{ z*YwX?XI6rGxg)mPm606+(y8Y0!E6thyed0#1#`Ne_>EZQRwfT6c>DV64s%~s5!lbP z%~hL#IcGo>n$%ysdfNqk-}!OCc?(Tw+0LcAo{G_7J4tP8f%bH&g%m=*e{G}<^s9xo zz@&mb1SXV+-A8(@PRAYF>kTM(yV{FX# zW*;B!cC=Dm+PEP38oe^cH;pjJg!^5u(^xuDr_oP>^3aCK3G+U6+xe|Lju zy#hA^HMim6qN-oP>r-t_9?QPE71rcS{OyG4b_Yj?m?QWTJR1`Ve=QMUp%5!Vy@YcU znh7D|U4p0bLC*@1bXBrX3*%ZsYX@(y_}|F?kfe27NzjQ2^+n-@hPJ6fLFOlqO3%4M z709$nHLKgmIZ)a@uWTntd()^HAV}@=bkgj0cgez~U-aqY4Ebt8BM9|=vedmRa|zVe z!6|sN1C4!QWy-uk7Zcgf+D94hx;it`-AcAUY#$u)CWR3@ctDpClbU~n@m&N40fP_I zbmDOBvU>wE1LAVBv^vP5OYjxvF3-h;oExLjSKd>yHBj&Lz@6?ehDf zbszLvD1TK?^8~TI|F{ITTVz}F*mlGgGmRKJe1t#kN8C{R*+5oWfo%`0j2}Hdz2-M8 zpMi!~S>zjtHbYsN@pT}iPPDU1B(7sVx`bo*6Q0ynU+p1`b}uJu$F2l_%n*CVs}N=Z z){d{`5rj1xI*$r#3p_r$1@w&(Z1tfPud=A(??)@5nufo;CB*@3)9>f!^C=qj#u*nP zhh;a17CkFeLK74&H%EtM_d7G9w}Dq-PI{vzgPKq_(;}g`o7`rzRsKix7mON=3?z2pLm?U3lU!DSlXtg7YkQ}5~OKruq~llJ(z@;}Chi|^@mo^@>M zdeyLe5p0Sw*P~6^hK>Ou{Q?T#xRLk4&t>8`1CGJbO{aZSOD?{AT7=jVkLv^-N*O-B zEZbbm9#0SI_)#LDr4D4?eXvA%*-N{qcxe#XGx&6JXZ(}brx8b$!#oWN+#r4- z*vweRv%@-Yno7@iZQlg`4@|JgQWQ{eH?DQqW}9;r0dO`wD-Aq)Fj~G~gpTzt@vH!| zxo#@sqWVAzgskwck0|;+k)-k+MVu1A|kMzye`+K0gR=yr*>hh`w6YSE zr~kFs>)0Q-A6-xosHFvZ@dY4J6Z=qzQh34b9kD0Z^vI#Y@6kdrXi!zC>4-YTKlnL0 z`0Tty}G&D5}a`e9L{8%a9UxbS!P%txXQ0M^=z#;WJE6Ee;hzU5DU0mmB4^&QcGXG=+X# z6h6eBL7h6x$IYEqTitbVVq7YMA;0dVxV)I}3MuP-FR^xcW`%LN9^dznQk`Dk^;!4# z7-CP9c#GqF?`=wv298~y?+2&7;@bPlrUdlvuY_A=qmh#5AE0rwULs3nt>*Mq<%?;j9pgwL>iN;Pl<`htzU4BZU2M$#;xy|4ly!UKuB8>N z+e2Ct1=nh6%AxSR&B@Ivf@oMT#zw6l;Xpl|W}17_LEEjbFr15v3Xu*Y*uE%SFFGBZ zU0s}{omd`(A(>IG{f0_hB=kWkK|}FE>?!CsGO!_@u_8b-4YTL?oAdiuh-bJDJ!RwC zT&^>E^Ct)4TLpd0#|UdO#{4Z9e2)1&zXL!bN_$lD&%!A10rb#bt~mJn*gb!bi23HD zB1=8@{x=K$7m4sCBhFO#a9Qx1clq}*FbXX~S|Ui8^Ph9!e@=&%K;5r;)LDXm>)ta) z;gbbei&(i~-ajXif76xMVL;uhGllwpw)Ueqp8(B!%!Ai|cCY;z&)~aEQvcanF}RpF zi^=OlzVKLE34;Fr(}Z>M5#Z>?k*-92STvNA5AkP>D4va|(%U*q(gnnS-s(jLhVI_- z%lrQsIyRtRPJ%zY{&PJ3`bE zmJ=`yacnV^AOCUk|E}&-5Evs~eEFb1M|C3<7}ft5J0cxd8ih&w)i4-~u7<^AYNPOD z?05i(89+fCw%eMi9Cnm5%JK>db1CdLrZ=aX-7dd}soy{w(R>Z|2SuXcgbK2<4j#`l zSrRJvy;!Ndz25%7m`0^4-^8kK83b3P-ZZy0m~2q2T1%Utl+13Edt!GqQ=SdfS%fM( z_5SzO^g4*o$*C?g7{Cq1ilypy<|2zZX(mu<%z;%L8o?rJ5Kw7HpygNroHS-GXEY<< zdZ+@$e`W%g&aCauc#Ygo4$%yOiFfA=XY`R}v_#q*Z zLb?ht!Jx~re62sM_H!&yl3dd%hsA%}xo6fJV7*txiy;12lOa(J3!sBeGi3`EiYu(v zyK$thmRydSl6R{|o&H#4%ra2IV42{ab$h^V&h^u^n0mt?_U%?gfMXTa%K`eryK_S1 zew|52H|;;%1i-_~Ub1lP&AiHYX^!oO0G0ag?|R$@YkyEZJ=g9FADwIRKMsQ&je;Zd za*%C#CIpA6NFtuL=A;`>Ran^fs^kqT*DX6{@a*-;`awZqNyW_>STx#nG)wq$*yC}0 z$$5{4>iw?+TOyBpE0^<8Q8yfxSD&ws0d6+m8HkR4so(~vT53qH6-vG)GOPWMZzGQ6 z8};R>F&gi#MQ!tAGimdBS)=<44Ufh6i}t*U>k~|cCzaVBs5I=AG~5$+ z^TQIvm|VWK;=1~#*B|F(;d*tr7@fv6eE>vOXWo4C<_*PwF;nkY&F;JI{>a-0s;hZs zA1VOD{fyagJ|^L|xS+w|aG1i3{6Yc<=TZ;}I3|z)QuDp%A2-am{#M#nAhga1TPx_O z@hohWhV!(Sj7ioPH2u~=^VxYI1 zGUlb~O#-qP_P{E?+=!N%t1=pQb-9|i;N6?8%#-ps8)Rz&hu0tz@hsRcIe~Z2@@e@j zg8U%7K>$92@tBdG3my*#iKx zTfYx(JX(QM9aNT^-J0?duROQg*cNT07XdS7)og`m04yDe1C zKO8z#_Ju>N6I!nYzRTru-<|m32io`mR~?fO^B1WPP2w{z|8^1J<)rC@+YF4lNFJ<} z{q?H_Fvkqw-gdjt&$hEx!=8QTGers(xYPiUc8&gE z67E4ane|B*)*byIiJ%{L)&XGfl~>qfTYyz89aD_Ky@`pIOe)es;Y%QL*)L~NPi$>DPoTLa3zMiw?^dscF%Y9KYbRzDm7Do(AY0@6qHOzyKfOP%ls} z1N`?io^Io@%wo80wm#`(M&^$?;c+`xEem7I(+d%MA@c`(6X1(-0DQ-#|A=x&gBP)` z`=U28kD)DfwqH+3grq!PjPy!+k3fnv*TES9$;JiS0Fz|uvX#^Zi!G5*qkiIIT9oBOj#4MSJYAyhS>Pk)7-plEQNT*aL7X9`{ z5}%NHE3V3Tl9K$!tbkp(3O&=;G266LZ(<$T$`lrr_m*2)=z@{7CXNrao^FOc3Vf9k zoaHikoB-T) zOI^k8#ADSTz&4pKN%a^O;JqI_sU2LVAnZq1QzG?#uk_)G`*NnkKY0lt{O*MKpN=Ee zKR@kAdF-B{;4EHWY8XQ53 zi9xavxOoc4ysku1_QD$($O+txnKNm>&(c7 zFp)COX7Z%cRc@Y+{dupEC~Ri7ZP5vpm(`P0LPx?bNcpg-2#z!zO^<(S2D}n-2N~py z<14%pE`o;isr9GV;F!Yph774M-F!&^mA?9hG30|;`G~F4KQ#XkXC2E@M)=>bX%hlmQ*NZz89^S{3aJQT94=3Rs=Lf3` z33x%R{3#<_rt__J3}s#?Y%eXY$(ekJ;p+xcIj6hB2viByK%zr0;Ng)NX1plKsFN%W z`!;y^-2w!Ms8V=IMpu1TI#q`=P}Tx;%6K1kq|}wjpz4OGSv!LFBCkL7ra#?-)l+PU zbTeK-dfaTrD-P#fycBsPF-^XMTBhQ<`N}!x421D>nf`>krHEdUWl}Lg?1c%|<1Z9a}W2S)=4wD)6f*5w)`e|yf!w3m9 zICev}{4I+|M#JMWue!um&c}gc3ImsJBcbDprwU?@&EN_|Di}!Z-0^TbI<`}W0m#7C zjt2(+I*ink5$=-M6`b9=!y2cWp)bEIjkXgV3Rl@Nu zA^evE2XztJ*0)IaE(}G-NUskaE<$(LcY>`iQE~dDreRKDhEcDAfF(We4lo$sB9@v3 zsh=>|XX@Ugv_p`k>}mE0gn->vs9FDd^O$6{2#VE@@E5faGCSS))%^f{au2kUGwI7i zno~8fXQf@~j){EG(xNrJ@H|9J<5j2;c5U;i$x&JG+3U9o;O*1S2;k2n+V66u}jLnKUYQK2uMBlwG@oFw>=s3smWBd{ze(*PdDM>j( zWLsFdTJ5q}thcKmt(N-hq@B?V_nP@Ua&(m}>;vghfR`d!xh98_qPkNKmaJ`98H&Ci zmY3p$z%}L_NHuG`Ki^^PEDFQ`@Aoly$c0RUc_VPtTnLv$cVHdP#wj6Lz#oea+Dy>0 z4;RVPW{CZr9+-Pp*(Z!^`5l61>lJ1=hBi!QPl_;C<*zNaH*q+r{$jX>Axd!^Na@aZeC8?XfN^N}Oz3L*FAB95-3FRvT5 zzwpm*E9_pT1rIF0J$uL!`S3Q4M&LE7ftLAuDj|{3mj2F`2SbjdlTNFL<1nUobTPu) zIvnt4tJil@Uuoi8J>Li*mAXx|SLcOfhx5$wI)59nP#>7xL`B6t4@4XPi`W`sL%*V& zc;y)oriWrwH$}t|zH=%ptWv)oWp}-OJjM{u@)^oe$=oj7ZNqEj)TRFDRsK`D;_j2?udCGwIjPUe_~3R|76DnO zub^Q+@}g+^kADZ1d>y3HBO^V2zT(9gp0{sM8u_Tb_^+ujw@NluY(Rs0G9kpW$5TE| zS|r@l);8>fo_^ASene5}mt|4fC76xFfoFnpv4SA7PlbzC1Gm`=X$$8G*nvP=)pYfGi#s!w#NCGtM`=F-IP%cvT(eYVPU`nc&TQyT^xFc? z$T}k|dpk!4Y-35Cm6FoyQhYEfdS5MNfAPD*PopmK{7JI^!OD|pD^%NuvLcC!u;$_< zS4ISome_QHa>#ucX0sUagUhd9eL~qv!6LLPQDr0xL8l`qRCok-$@5|74HZVIP$w^t zOFQX%o0f;mGcXPz0^!!e8!E-Szi~-WJP?bXsN+=^L85+S1;cAa6qe_|)C#7)N!xz- zl|wBwb}_AS)I>?$6b{Y@G9WxLQFZZJ>dLfwCBs0b)}=_}M+C+zCnYw&_+VitS*LK> z$&S7N5M`r}kH0?p+kr;jcCnMSH?*Vz+>Rrl@J_t$%AI4V(;^Pcmwl7H{rC(%gzzs> z)x{G?;TtRwv$|Qj*Zl%?yF>)=l=XR2E%nT)s3a>QKB7c}q!fzDV{k_U9firQn+I?< z?xg^TZaDc5_Z{H_buPS6S+_Hzl9;ZP_WuOUh)S{H;9Eb zP+Vjwu8IcQHD?eu~eO-W3RhiAYl7zh@~X95P07A zzHWl()R5}0gLeck)PTdVKWmgIjmNamn?ODpG(PGL!EX~68||kjb~Nwsq*O}mHYj7p zV#B{_&)%DcNRM<_J@Bo`pfSAVpE3J07FPx zcry+6N2LFn6Ch&REds{rf0goY;>QbfUCl{Rk<rZT$IX#|FQpiIG zdACmdkPFTxG4}nhj(;bSW)?8p8c4kS^)7t|z*j|fds9<+Qbc&{*56}eF+QV@YkrXx zB%yaaUhy0IQZ4t_*78hnAkh+-jbznT)GRSxhxZ)J8l+bPdrOxik?H;Q$+H;aIVgW- zQ(6vl2QFvpQ%&*%&ebJOGk`d19o%iU>|UM(C&shLSvOX*r>^Zi_fBkr_J4*D0{Rt? zC_q-!D*@?2oyzxT9-vZXxGLp#xKL-`L*Ot|ATMd8r#0>IRm)=FN0HrzNhQ_6E;6O* z?3{{NwS6OfmQ!|nC+o$=zvMy_=nsYJ#%>kKpJj$89P-cJb>cCJ+<@8c9QDzZ)5&1P-{&pEuG2g@#wldlg8~b2MLS6vh1)oHRwpP z0tD51&(bX#r845YXmKvbrGlT_Y}w-Zs8f!Eq8K$g;R&I;GTJ^I>gNh|YAr5_!UkpX z_1iryO{!W4Ny%n&nVp2mDn>d6od*+IZ!QPLE0M7}i$&87!#Y(i{)eYeh4bIW( zxfU7>rAyr0dLL1R)dkH znraEv10`Et@x)8Nv}(OctMOFDl38->eceAp7k+)~YV9Bsf9kWVp?>&|3p(qYROgBumhrKG}VlY)M7bLAL!_2ihW+Yi;+EHJ*# z*8Oue_9pUCA5w$A!r{FAoTTdwi!3=a|ABYyhl!k})4b&@=4pd$Z#~{2hFt&e7}%O5 zqLjy=O_0cjeMkF)0%7-NfC?5;otfni>&vi0 zA2BWLc5}0{zxb%x6@+mKNbvx9)1lEe<8u+ckr3heTAMnUBL#&!?q3wW`skoOC-dz_WrueMz z3{`^yT8oq`?=^=3h^z)yu;Rt#u+6{NpRF9y`vK%+Jaa`gFZXBdnu*kyFXL8cC_zWye%l|;Y&w)vKq*Zr3Fd8z57gBocWaoaI8V$5Le-@ zP8;tKRY zHNR%?>=VDD*11|K^{b&#_>N}W<*fIITQ%f^#*KPMvwK6O(E0y$Kj|G!Rjm@L2}7iI z6Qgnbf9)E~#8Bmq9ZG!UR+-V-!YNppQ;kkGs!h(-)rO;4O2cvolu%HvqSqz8zb^?~ zB2V#_Zvn~KTZ(R`ALg~2(464h_D3?k2b0-`gR8ZWozn0oxb( z0@45Yza%ct1l5n7!w<%qz|)paC;4`}6Ti}lhJBPP^*@)V70$`?K{<}07_BzQw&vy= zKU;y}7<6BMe{m5t9q-di@J~Q6tJ>&Tj)+Z9Y^2JV^KS6VN^0oN?Q86u0y+p_+-jLU z2Asx@i&!$uAf@%>^_C7NY+EPchJ)&m(0~MG9JR9c!F;V$QuolOo%c*z?@72ca?Ivy z#3ZO~@1j`6pY6mL<-vzlh}}+XOPeB6!UX=Bh7&H1wOD_qyUzNk6`E;AV2i(4 z1#&2-2Oa=aEWFwY1@fZCQ2p_AQmNNwuLw>&?$?Rlhxf;%G3unD2dp?fOR52KE}>$b zd;UG~RU=dKp4{}?sf*i_dIX0nAkAPWIl_EDNfKD+9xUk8#@xuosun2%4ne6jWNwlI1Tb1QR?``J z7a;p`uv=JK1E`H~wP_slmK@g+?*Qqteh1PQhy6yR=v+>`y77JwV-COreR+7H zU`{&YPG{0XtBuCr8o6KUM8A`&c}A5rXG5Ixfa+2Unfp8<)p_^FRGp1MdfE@S#|-8t zVh}p#TUR6jRCgdpQ~`*|hiO~xt>+E1L-z-mhO2<&&7CaH%k@SanRsCP+VF1$W=0># zDj4^*Gv?$CB!GPY0n@xxEjG8a?N}5OV1pCT_Q2qzx}N@ebm!9U40y{Yk(WRPc_c%2 zKdKjEP)AeA$mupCwGZnpRRhS}Q&}~7wg^n`%(IImf@!P61oO?7-C`~*IO+3ljSR|F zmY$6Udh%aJqfo4(e6-Z`5IiE%IRqr0OnW0p>;d$ItzH1gi=Oxwioez491GXusch$NNJ)63c?? ztR5R;)%|sd3))rir=55;`^2V;Db?l>^Tqm`JG{*ZEFEG$9TXV^CA*!muI+0g%T~xt z%9f_xg2JM#to9*xa0>2SHqkS|eO6u5$bYcc<^T&jA;g8^vyWkQ-PZt;o%tc&C)fFB zjGuw5{s2z*SX#d84#Cj@&@k`@!PEee)KUi$=fLHR4)RkX3g7N(O4)3s!5x3?vw#}e zmSLOuImZHMc-@>?pH|A9S3F_wq0EHP=^=XDP;{gz)$SN`4C>u?=6YIZ>D*6(cLlGP zOe+UjC)1sxcW(8KHA36ZGx^c-)3W_O+8EO$56@rBjjF^}*X;D?&JJ-qREjpc66-ai zKLpVMsAAZnixKw+%9{`I7_>Hf6e zT_g7#7iVv^LqXL{4>TA|Ti@%l1EgD2vf~gJc{swkQW!zCxS^x4z}{2@GHpERw(lkJ zzlKESx^e6k{0Sryi4KG(p7Vas*toD?rxm6@ChQ{~u&W`U-(lFfn1?8XPhPa$L9DF2T zGA{wK%{*{wH+lh!bbbSOgAeFic^&#P2m-=29#7+WQgu)r+6?y;HQX?G=3?H<-LsP& zVy~<{;*e4lAom^nbqV!KsXoiI8i0&QA$Ud480hq;X*#uWl+IPo6F@4>u;Zjx-<&uX zU*ev_f&jJfel2!OSfyrO%_X#amP+5r^D?@4sG5cYu@)KgQxUB}w0-jI65fFcjkxMu z(I!(NzqC6zF7;r=bGiDkxPBh?Y`IX}2C3rfyo{P*WK-n7U#MZ`feNYd(_2$E3ME=X zW83#$;d3PCsrWV=a?jX8s&?zAgcE{<&s%g)0VIi6n{BSLi5_~Th=(U0U8}=UjkpYb zIG2D0!4va(I1`8*H6K8xfVK0a;8@@ihCS0bm3#)vD&UgGa?h-zU=2A5-Y^UI}omMM|h!Xg35wsYi(jrTB=+R!aau z<`8tvvbzf%J%~U%hXr#>lOLfIH=HIT`!x$RF7I*USdPDRmf_N^qf}m)Wd}U%&sKHk zoi}%F$nGoP2t8wu=D8~%vb_%LBYHfB<*j&lR_ZS{;!r)mpfwQ-ai))hQb4B@@v6=D z-7C(u0J26 zo2eheUBVf2(VIz|+{QRH4Oax~Q%HgHc$Mna)()kPHbZ+g^3$kOr%(* zM~{Pq52vUN7GtT8Oc$o(X0Us66@`nBCxe&UG5-6zfr{76?PNA){=-T>Pk_E^Rahn( z7YkB^1h&2mcZ(M#*ZbHB$hO~$G5WtaQl8jmE+=A$de5Cy%CpQA)sK(1^&PeQ4DbWf zii*>hBwJ{{l);6)NtD=87i&kN zF;IUBr-ONgaph9aIyC4FICggwDzgkwtlZQVIpWQs%n1oNN~z?YOY7(E#ecq|Ad7v+ zI(aczk8o3FT$~gu91yk_WcD2e6aebD<)UAp{QZOX(LO^%$F0Q5oA}8#*}po&^$SKc{+T zQTZC_*0)Y|Rr|i1Ho60{pi&KG)XcaaZi!=pE{tj|mS5{>^p5Adj#SR?A`fr% z=bLRWD2uP*Hr9U4H65kZ-=c&YsYl#f#2@hU-$(QchzpL zD8oQ{$96zjJ{)f=^;>}+NGaljyfa(ita7`~SiMXXvqcowyrnfcAgaYfcy@;30Zf7I zHK@1_IBq@5Eo?4GT7dU|3YX9PB%#kVo&~CP=$c`eOO$a%J;!Op*!2!ahr9;j?_wAb zSj*eV3o&8V`G0|k58nmwb}F1(B(DENu23Pp>NLc*rSPt+&?*lvqpKf`8M?7t7K}Im zj$xxk?!Ky99TmE~-E|eaX7ueC%FetTwIOxZH;kBcbq6)`_rZ*C4`Do5(KX*qG;(eG z25cWbyp3-2v_DpDZAzYGxN+C5Z=S89lUo$0GlyaT=U6`}Iu(qWHdOEGchs9|(MfqM zISndIeRzz;p__%oBhqfTnjbfJep103b4nm+-a)PD_2qkXIQ6cvf@M?Mr%bGH4B}?I zf#e*4tEQ)-k}gLK5~S!Q&W!{CCG0x>UY;;!^SsJeT8-!26lE7(H355~CG+b37s=Cw}Du z_ykq1I5tx49UW)#xqYafu4s-xG%^+xX1cfL-U)?q!wh46Tr>(PWv?L~HywjEk4&XAf>;EMdj{dgxY{T5tO4n?h=E7TuD7zSoHqqb}h3i}oE z<~lLFyOJ&%2RsCCp{b1*O_1Kt)AxppkoDf2yzOgueLV4@_t5);J*&jlD-nXG=n-0C%2^(5@j#&9r2<;k$WEYAY z>6w_CILG3($)nj_fhS;Jb-c*C=&+PvTqw$;Y@qp*eMe!O;vH#hv1k35f}|-x@i_k*6sOWv<|Nt1d| zt)o@ZDOJ8uP^57^{jPd`JW-%Y8k%FPP=TGG$N+yHL!OeLuom{j3>Jj#C7xG_Oz6E| z&;0~;_%S%IZsiLh>!|x21L>cPZv9Lub9(n(s9EsywY;yU{4cl^Q7DJIQOX}1_Ft); zm6#S~S%n^b0@E^lRWFtqf#iyIt4u^AYik|);8;DssO(MZV}!NY@x1+o4aWrL^gGY7 z*DG@wWXyj(jQK2)n#}?Z1-(62vfQ!}(>{w2b9KM|OKm$G40&1dfgrvy;&YO6tgMI0g9K(ZJSXIc9*XVnDi7vrU@ev{>cKszz;M%fC5Ku=OOHuK0k^9 zflnOnG=bv+6UEgL6Ic0o*gJ4c(Fcwneo2(Al+nG6Wzw!74@yDxHtQNs-^MV!HuyH} zJfvlK@O8aERPh3!5l}Usys#i3=up> zYsL{oixJr7_s=h?o)HbXh-FngM5=y_^$)8B$FmlYfC86@=QgL_zgdBI?*M|Tq=U{W z@$cXCQ;+>*0w4b4j}IkWC2xc;Ht_rLf4v2T(-ZlRZUHBIy5YM0sXIyz|NhNW5xMz) z9UB3Q{t1u+i#tCXBmVPS!Ef$m9e3dCO#)@Dxj;uM{_a$*5SMlGlmq+H@!)J-NwNBS z?3i^oH`UqP#!tq7^k9G(E6yYJZZ`T_^#Y*b#2QWjUF_7VkqtIK^Si6!J-9Y-Tkqwx z=MPS(O#fQhlhoSBHfp=dCM2nOLxTFcvk9eaGbJv?=dH*s#Z@~EiIWo6Mb0``&jNO4 z%G%`#HF2kF8q(kVD{f)<6apxsffUX@Qul4PR6eIdr=97qa3BsP@*($NXC#^J;o3kY zFcS^Ky|B@j?_O7G2^Ce{IKZr4H5RtDs2dQ96G?hik=qPFuQln$=8|w~v(lHCt1afk z8xu8BO;lDl_5OXC@)Z#8Jh0ezoyD}QQ1ABwwo&yw`Siy~h7tL-r9hog^7+Ge+M%9h8otIrgl_ilZmHIYPnY`?1d_2ih_-rY?#C(*U7 zF+m-*mW+kW+TB$;XFwgV)@R4idH5~1<&kwp3e#7*KhNJMS2i=#-lNI=qz2*kU3kI+ zFvjEG{@f`v%0v5bYjo7gk}lATt#tNB+Dmi2vi#&N=XGnUH2W@68eNX7wSm{EI}wlU zmkMSdLD%G2t1a;kGpe6R8q4LlVGv#LtW{5Kt>t@u7qIhwlLuQX4xag*L37-hCf#_w zT!>&vclW5E`@p+a#3mW;*)K=M_(aPvW9l<=+TH?!$@}`f z4!ve2eg2=pT6%O)NGP<;?du1wLepbo$-{k87OnEZ!PMH#nFq8QZ&#M~SC7xPlS-Vv zdN%XenW_a&!V>8O1-6D{pShgvml1uDW4ak&FGgOFJQq>m|R=5q<@KOCes;OwR7zf zSC%xy)z#apygw@YW4Z+MmKX}|ST-IJ>~M5Y=wf?sWSmaGTBbItyN=f+uSMIYX!c%I zx@J;(ty95~a}m42dVRfxT3v@gglFk&uQs)T3pdM-sg?>;Jx{J}h|ZHNSjn2Y`Ffq_ zjAN1JgM(f9(+Z-M)$GcyvFQg>Ejs<%PZ#gT2*{D6T3<#7@iu7C)JMk&3^dL z+SU{lwiE`O|Kd|B3zxzoTtdUFR{g0Yv?AW*&XP{n_OpD^Tikpl-jmzF=uOeW z5Kzp&j4z4&H+B0mTkUtebwu%BXbiEiYU>?P;qVODTLV(;<8DJF5s9V1aEGCsMNZ$YxR@s7{4WvOed6 zuCZLwfBM-~UzjPh=ihS`xfYm~reb;*c~srD=kDRB|4Yd66zMfx6zdCy6T~>tf6t;~ z5|6bJm@2k$UG>p#ZcU~EXFnvKN_y`RAwmMm3SHBsw7CU_r2tl$oVz~`10Gl+|W z>eh2>`Aph%tcJ#XzqAGay<(X>C>)#$nmqGMP$Cl`fPuT5#vNWZ0wNS-T7>3vTKwtX z!leTu7`$~SrK0~H103>|A2x(EH@D_nW|Tv|tOygbI^WAbE-ev*Pc9cXQ<`ms?hq6l zVGE`3&xI>FVls-+!N(?Gm{CPG;B9h%G6 z$A7L!-?hS(_(0iR4epMLu)9?#Q%yMl1A|nRKRj*n(33@}_P|kb`(vRod<$h^|K8%; zo|-geAU4>BXXkW;;?KaH0q{_vCO{CuGlaM&bFM;=x>XHmB`vV1u-=}m zn3JgmIW^M&c9X+S8g1uREVh=%w{#L@CQn28aMj>m+LNXWUJ!D`>m zxRz}-JSKzBdD{#Xk8$sP%h}pPy+PplvGmuVZ0ETKM8L^=l=>B+OAvN)!KKV>?Yi4^ zQVcqOLaEoiK}X%@1IqI}`T(MPmjU>?<)Dc*!HNZN-{Sr7R=6Q+xht|P(WC`5)~!i$ zfBAt|=4Q_4BW-$NuQ10o3jwWY2V441u{V5(zosJK{4yC#GjF**D%n8$7&!I2Ni>t5q z&B<-wG5?GvbH_af;;L=X_#u>KxD=`cNTF&6A8CC?*VQsV{`ph-;g`I2P5f{1^aKfa zU$EW2SJrafu$z{UkT4B5AnBZ@-O3h5{y^6ej+%G_=#oO0N&{@}vV1HeR_kew z7E7>=#*-+5vAMnz0*)Orkzo+_BK$_2;2AJPv{6XaCQpx7TdKW}Oak(po7qHuk5jvc z9A;7T!XPV)>|HDJ(ijM^TD46Gd$R*rpJutk^`Vn4i5m*xB2oK8W3e#9ibUNvJ(0a^ zK+7)L%oz4F9VCdHJ^m!Y%b;4kCydMKJid54JEW>Ui)j8q^A~WhY~}Ob*2x@u_7p_j zcV+c%nb39!$2eKs)%`Vu%7s5&A53q;Y%3CfPx(`??){gR&&y6nU)9tM^_{vGdOnoB zt-#2>@eOE3jb_y!eiAG^isg+K6(iwE9Ql+=IAswyxqe<%I35KxJSxjlA8{M2yTxwO zU(T%4FbUFGw)q>TB6M|c>;uA6cg7U(xWEoT_q?IEDYawKjeq6cNl^kvPAg&G!1a-S zj%VGgwpuD%x-oy$#|#YYHmdCk%Y*B8vBQ)o1$aFo>-9%`RX$m^ zU{RhI!XJ{!?%6DTK7IC-Jb(=II7m^-pI|-NY#T^%*(Ygpm5}l7x2*()zllY0JBTIa zJQklJ7@d4Sc^%bhX2^qotYm`e8>aeXILd50G=tqu+JrQ zc%_6JRf$6(3F^+>v99T&&8>cxeeSv=ug{1F+1^;iq0FDbIre$`Ut(kqaYVG?CuUA# zN|I+6?D`=Dp0~~+aOUc2jk~45Ytapr@{xUNcckOFuzHmOyX~AnOXM}q75X_Hna7?F z+)3Gt5n#%kdhSVa|NHw7hs(nSFib01w2wHj$o+h!?1OhlRCEz3ZpoTC+lQL2?_j;@ zr0(`6vx8HGn{R!7xVuj1mR8PFir?K~wm_W4#AE{<8%hF5INg2_?S$Su;CNj(0_SAs z`Xss%PCOE&+A7-z!iEKo=_-Xh&s}W^W9|Ib!_Haxz6UqMGoa>^g8SAw<7M|~64DE- zgN1E8S}s>vnz|{3jpspjh8HfXK4)tHkN>$%%at=(XR92xD2FpppwVYLLe92Qw3~`B43rV zT3k5=+&-b0CXm<+PR`R7$P9i`L*Tm%bcy?CXKeZ_k&2k@bd;$yq4mdcwKvxn?$Pha zU(FZfWoRxpU=0nq7y|v*zVVBB_-Y%j5V>04!-y?A)3qBQlv`=o6BPT%IzlwAZRxrT z@++=&-+(AIjar*;XjzD;Fu4T2{SOa4EZS{9V@hr@w%BGY zXKgJJ-BUKXarEh%rBIeP3KlolSC)_*I~%KlAilGiEKiSl1+e7ymcz2?>6M2Lf;mmM z>Ukpwt%A~pAz%wE_z%E&&ubzF@`F4X)>sBN_nBpjLTZ|V*cz6v8qIH>DG7JWww zXo;ibUmN68Zp_-9+L#*77T2+X<%fCm@}L|mgj1rj4q-H8?TjYwp$Nv4c+kxqVf9jh z&v1H|FZVjhAw0riA+53mo!R(eDGH~@@4=Mc?p_wG2ck;8c)E-7?a*Nu+{SuI7U;T^ zONyrC0PJCeq*!68XP=SDw&(T0jgoNJ?>?d++Yx!L1B{-!-&vIihkAiudG`*XgPBxU zhU*fFw>4Z7dE@5@^t-3!lfY*?kVmD7Y2&Jz%p=D`>x;73g@Gmf@UA#6-6eQbTA5JYO@c9t5jnW`6nEZH2i4Tx^$z!!{P}hX(w23dy3F? z;P;OFp8f{~!K1j7%%x~Ca*CfxFWypE#M)C~Bcf(S7EC`&#k2^fzspXmkg<)t>aphU zy*El>#48C4(;xH*06Klf4c-{ReLgze4P;&*@z6W{rtc-X#O5J?B)HIsDlD-4*K%VM z=~OUtzBQBjDg&NLC8X&ybXmIB*xsxiHeUqKZv!!JH>U_32g+P;$S?7ncjvxgh&{=~ zPv?q`Ja;g+^d!^b5D6!<<>QBNGCvkRc9bA6UN=~=&|`_5;UrU;j1V;7DwGUq0#at? zH$}6h^G*GsWe2@=oGuncI+aN$#Y~QplSq^-Go1xlV-bimDB@zcia7X-;x^4v+v*RbzORS*1c ze`KG%^nWo*m@5lI*`Tjnm3Tf$nl2W`n-+}9m+SZn>-KgwRr27I>Tr2SO!h2d0nzqY z$+p#`*!FsM{K%Fs!=+I{PcSk1tJ`1$oFA#;lD7O33xv>E&^~YwWQ~m+!XDnW)!pk9 z1*u7ftO0c9uJ7u+g6YsmpXHC3gS^b9E3Z)viKIhx5vH++z2YDBT}4C*y}D{Ic+jKe zc_GQI3<}ScML`%=w%lwimW9WO z$kfN2Wr;WCFLyFYuO&pxWE`K8uiV#?kjFA7$k3E9uXt%3o8X#0h0iHqHlRjrx&EUB--+m5*6c2JRw;kv7U4_Pzyo-qr8jYZ-3iX> zQz0}Az@(gb-*qFTrWJ1al9g62c-YTwIo9XpwXXb??0{zGTmpAdSe#;vpkB<4y(WA$=ZgVffbWnf~V)Rpn+L zPBxE4c-71fPD0y-oR-jbi845JPrQ}PwLG^rK6ti$bB7vyE+uHP@ja77HQ*X?+rtdw zrzlS&Ky|viDA8O!*z>yVaPZ17PkCmn+A@y6xt?JyV(4+2Vo`%EKi_A4K zNxxvcwX|knvvhF3WV0` zfjPLgKb~0blp)LhHfi*NU2cCJdj4D(5$={96+M2(gm9uofe?GOO~N*uI8<{8lpw0Vl;A_YS*+ZAKUUarLpx?eft^@ z##D+e)hhN7T^cGyI%=Ba9$Z;3y*?ls*1P%s)*OdHrG_#W6q7bM=ZKT;>ppkKQ&DxpyKfR(k?sU4GYvGKt`Y{Wx@ zNY+Gh59R9H5#@e32{uaT?S$5OI+r?Z^XM`vH46QUWOqd%YMq!JuC$eG&1z_Azog8W z6;RYr@#In3D8>$?OhUzEG2mKPvTf`ZP%0CxSV)n@DOAljYf2-X*{wCx%Q> zr4cS)>iaJ~YrE|R;xY;KQCEyh6*0Z8GX*Y1O*85ZD5%*qsk2kuPRDd$%9&4e)`VIvCH9ca@cW_7!KsELXB8?)4L#XY)r}LZs+-$0PQk zBbCQ%Yb^>=L_h2kxo&6D{1h7>Z9|sP2jp46LE3_y(=w9E_EG$X^d#Z#c7K zT#iiNSnJov`NqC{eQXo(tw}8z-%KaHt2|(93%ZF%<+jo%ebIH)_v28HiSgCB#`*eX* zE3`SlDf?g~BuUN$LN6w-10xl!hezn*e|9Co3Ew>c5j92lnjdtLZh+Y2aFbPPIwZ%! z(jeI*5aqTR8s5iGjfoHJ=bul(2JmlwS2VoE8S z>ZFN2Cma*&9=VSde&N8G>JU5c<&&6swjZl<<}&SQe#K{>JdSu{Pq+@g_M)w_QVx7) z5O#HXU`|+A7%$LSGUghhRlJ;2#?TZzovYRF#ZxuL?3z96UY^9>`GFErZ8P`U?%{T>JcMaU5q!-zb~M!uY@(o z`M7+vnq4LJ6*d#ql>J5>4NiW+bhL%{C%zXXso#NU*^t-iR>`>j)aM>9jngj37t^;py>2itr_if;6I z-r!M%S>ps<%K-;EV)1OE4a#dLkI)-FQ-%f@_DP4gM13vh-Kp2zO^&O75oOP*pNid4 zqTe7*jY-O5^H@t`7mvl=6bPNYQKo0)+`2mA$~n$)FJ&qcsd3W%gu}&f80s-5ApKq} zlLtcHPTl!#Su_8^-zf30B4iILt-4wg8J`rIjPQj;S&oz5v5{u%6BO}bV>|)3Ge*^~ z_1s_P(!UpW!C0B>riBc~Jd-(T~OLn_=CF#FkcVSFfm zApMv=onpWW!KNdZ9{MNs^fa5CJZqeuIa^-j52#%%lMSTMS<0%tSpSoYpd6+L^gidi zzWmt#(29S(Fp~uiR-!Lo$LfD4a=>fA<+!;o9D%s2hj%d3{r@}xP%t3n>72hNpseqC zG8eWWU?u^H<88B?ls}EI0W_JzU9bbSoqx5MQZW=XnTdLu;twz%yej~n^`(AYsPkvT z02KsURumxizYfoG(13e=dey({E#Pf=$^^Ke>L|0I|2cJdVwwI~k@KHd8v!d{oZNOu zsYvCMbMRBFU*1=&8fm#d0`YuOkce^=wd$s~V*~7GYTWx%`b^ai-*N9N0Fc9G3CZ#N z_m&<>k?TbOmmqKjsy!@wIS!Dumr~{@^W62L*B2@|R~}o+GnERAI~CIZ6DMdu1^0lu z9T;5zOO5gqUf{HSk((o}{=%Hg*(7IYhQ&ldzQj?agyU=0^nb#E{^?NSQXo>p8hB^8 zbFIVw25R(#1OLmFaIf!-n##n{7T!+s+l1 z*D&Azd<6WcY9=bvS+`{vzh2J>7gg;aE;;|CrNMJ~yv=Q!Ff%U^ufa{%>~;O7o`k!u zoy$hgz}6eb#h1y1#Z%{RSF6gZcOAKnAg)*P!J9|V{_IAXT=2UQO(jWG=0P}D37^bA zSQo)au&lh374?OQHJ9)ooVAAJf@DwUbzS3VaS4OLi+GQl&3lzM9!lmn1UeR}j`=BD zT;}#6O$#Cn7>*fF@R+7PdiLLS37f(^f#o)GEo3d9W~CoEnLPf-*!DMHmiY*aK40+y zB*28LJ>~~t(JyFdzTK=;NwdUcr2klYn7+ST_?jI?(Zy3XUX^2yU!(3$)BWC$8*sd0d^`H(&$Suki8^iQD;i z3Q_w>v%EMmG_Ok+sKARcn_uuW%}{z>^ivD z+N~w&7F2m`XQ=TXCtKW%>lkvun&3hbVdukH`UGCnZ(1F*BGha6j|qndttRH!8eKkV z)W2|oaEzW;0RfOgh~R;-wSH3Mp$g&ePa}98o(MNI3snd$!bneM@^hbOU)soXSnra0 z$f>VHenTW{A=0o=yP^0txnNP`yR9C`JeA68Ej7E2|>q z^<;KEbl^-DpLH9RwyMK%imYF+72HO-P-ee;C=Ckt`JEmt|9)?Wg zOynDpfH)U$9l$7otPU1Vj#!Ug?%4O{kOXxC@cVauF~u3Tq93j&XN-f z@<*qEiya>E>*fcX0j9ptZmo-V&6(p;VnpHc)npV2^rJk@0S-wt?>LTSK0bT)I}bjL z30aV`w4e;%g7M-P;aBwBs}9^l&gS|(FgCa~kP;JdCuQ1Mbbp|l6s1}_WaH25STlbl z1Dg#;G0hhKW&OXIUMGP%aeB7OuK6LRy}ZhXlpgQm=;uHB$1o3^hlXP;s=7Z& z@wSTr@pD~od3AAef*MuM^5WV+E^nv93>@55WsLUk+2i!)c<}aQ;pXMcmM$jd1F;U@ z53imT)8lO$?2Zt`s^3s`hT-7IU7K{ayw_WkF(JE(b}f1ne3Qz?_hparml$l zkX*a@it$rvY8&s#oCB}jN&UK;Df2~|j;hA%Ko0^$FPF#pMxH(( zFHnG7EaC$O%QsNHf*2%hg(ji9g^}=cg+1hEUgLbd((MZ8oiTuYPjeF3EnyM-KL{cL z18KSlC|(Fih!pKFQ@$>kU9CaIvRBdgDw{8t{3NF^Oi<(RR{nLm^ymm;XsYI^(!KT~ zWV^spS7!+vtC2HcWTZ-STT50F10j+Yiy*}i_#JCg32+k)iAtehO0G;m&vz>JS3N$O^FdcIZG2E3<4 zz{zwmJUdGaPY~Y&5u*M8kz%Zh7rtXvrdxr1=RD6u1YV5jPGN%hqsIsmiX4C3p|`L2 z7T(^5anF0;;OTkn(`SrpT9&9MGn7G}6jyW-Edpz1#Jv5=tkZ;!Qhvql#em5-KqT2# z=5k_%Lvz2mofExCi$t z!y_v7+wOCt0f&U`BrqYup7b%|O!o=Xs!HCB$>x7mN@prWvJiO%G;{Wj!CxcoHkJoX zUo&-n$Jn0++TIj8me~`5I*?#mGH%FFTrQ$++jOMaa_AHE74)cwCm_j#L+JE?jy2{8 zfOYMMz!-yrr(qP$`sU~N)8YR2YUjy8%SC0)L`b_^|n z!r2CO=U$om3KSe#e^3{QE?4s0pCXQk%JPy4%^gHbwgJAD_!wk7j&l_lH&3K{)Xfqvr%1X2aS+=0l)75BVI}kJrv>cb&rQfAly3ej~&1$FIi( z0fidTHSKYL;3u+WK?i{IiI?H+;dKjg4FIzM|VkP9hBW6v+6eOOQdD2Pi26Pi=wMxIz?(xa3=^ zpw=_!NruR}VY5gJRTfPjZBu#U3YFSXNS>-#Qb&VB47!>}b_ zVqEsQ2G;eiAm@@p4i;-JBW{poP_0{V@0e-= z`hBUKmO+)g;pPmsq`|@bNJPGIn28zIIciOF`FL~ZO+(AD?Wg>T%Ll_cJ{ zlI=TEdY#I!MAr1;;Mgpa^)>*Jh1yS^Cp{^3UJ_XKbO(X!&LFcY6ek}H6NmI`2VJ({ zzw~Q|7nh8G4}X&6A-U2sj`zDldMVJNeRSu72o5_5UK*7W+>QjujVQ606-$#x!_WBd zqUGc$yFAyy^!k_yLKrn)d%poqd`BVi*Y4MwH?ZUe+bi-4@QXhKt}0%W%&o6qylu@dwKl3!>k zdc(>o#mdK~ds7{-%)w={`FTr8LShIr&y8v}B0f{SIz+Jpk#I&oBs@w4*r2N->wFmUJ%e{lBIt&B%DS^k}RSs7p=g$mFCP6%#4rGYu!0R5Yyi9_3$Br zoo5;CJKjK^Q*vRkGZlt%5&3Hg;E0L{RdWqenb!3@`szTEH)IcR=6)xk&;VllcC3A! zo>%(PyD65X=AIbXP9SQ4yV|SEFX2Z126)HigLgSg6?Qj0>g5IY+v^Wj1@0;^tiJD6 zve6pr1bwU8m{DyJWuZwQ0kS5t-{tM$k3-=IDIO%qLhR=?{20tD9-NN*DX8&%Isfd{ zO5ZG%fYjyw0;LhgF43Yr38w%ngc-JZFs?cWx#drtK|@;XigKp=cx*EA)`~vhezaLb z-h1dC9`c4@kJ8uL$>>%o>%urQlbV-#e}k>S0cNjE-WJjkIh(QB4yy~tEYb0^mGOX zOZnMd8~7{Ynv!J{fTz))d&cLxz>n5ae_Yiny2dMUkk+CZC*OiB9IJ)M8q_wW82Z_ng3h5fi~T2%YWo5(U1lt3KL16_bF*ia0$PBIKEfWI9>6)er>1Xc4tn%FA^u z|28aO3-te4uESg*$U_(cmJkG4)%gQdDjz!{gvU_CUUML-Au+XfLf37EA6=3_8QC=& zW66BB&b~s&wUiYl8Jb+yX-1+;cl-@_P1xFT%QOy7Ki`cG%X=qGD}v*EkItXqOpzNV zS5F$<>%rAdpjs-h6TlsFcUCI#^>AswC$(I!OgD0oH)V}_O%lr2xZ9#pN|HaM{4ENp zy6PgF>wzxjL!H=g?)#Sub$`!@&yiePI5zoxA-Lxl)t@wvcvGKr2)M{9&`Cc>j*Jkk zmXM4cWNIn#AI`r0d_BSLkq9*D1ZH7EQoSzRM?3Bbk^g@YCqsIJ4I_kzu+y({o zcy+tn9DlVXs!&6PE4@jru=gXZM<>}OfYRv=cLuS56$G7cDY#16R=GN4NTJ)L+U@JX zjEi^K2V&t@^uoO!#yDj8(Z{hCm6kkp%^^TS5> z+9XZ~7i>(kv!r28 zd-&n+AK19p<{a&oo~+9jm(iwM3|@J&^k4XOZk+Gy^`m!EreCgXHRWye_)V!ux%SYG(t7RKw4&n4X9W@(tVGXCn??cJSEC<&ZCP6p4E!E$> zqMjz=EFhB~hML6T1uOR6!SYp4iJYhTd(#6v4v&UQM{2~ab3`E3z#JlUkagG%yIWl& zH6O@&oSwjHQnD6BJ-f19Yi;XML_?ggL&bXVS*o&){e6wUkYEaow1^`peqHtkqSicz zU87gQJDcvdHh1;LdGb~ATKU#OE-NQ$1@%C3a@&t-ZY`1du$3Rl&$shE-|KL59bU@y zLv3V^%M?nlh%viA3Q$y0sK#8Bu*^Zo`kRTsjMKk*#KChac08i^uRQq(!!ESBv*3f>^kFQ>sXzZa@1 zb#NoV%fL9opv7sMv$nTFSJ`GumO&{0?Sm;n8cMr&GneGYtBLZpz0J6(#i60Wdo5&a zCl<~QuaEUm`eklW2RiPme1_|7zfW&eKEPRpCf@|{fHu1*E>dqZ+IDA~wI-2&PJi@mZhcVgTo3oG#1 z6{a4(Fb!gG-XW2E8I( zCu8Im)5|`Q6%6>#7$>VS*`}_;&{7?ULRdR6TFxRFn7p4oct3w-;jC_7kxM^1YP@Ca zyPd^&i9KTxux96gr#e-p=Ag~_da9p_hxOviKbP_^kf4)X5t5Z4LiTKM7a8cLKdt>t z?D3zH(3qrh7*Ox@j#OFm$EKW&GP|kNKStdQ9TS-OgmT=smfm#YB-775U|~qA)53_0 ztwnXXEqJa3ah*%(^-9^eF`FX~Y zjMkJ4rLIQ&QY!B}^Lm~fz;`NG<@Y0l@QDs?!p;n?SFP6%e=_;>EgXuF>;%huhCk8? zRr{c?=)zM@Qg%P_+Fg8$E~vaA*qDd95AAhXdCd~C48B86CLcc@6yb7FH;Ihf!kGIIDeRSb{k2m zb+1A>HuI$Z0i1qK9XmLR`PD;aS?~8-xHqxp`n^zp_*pqgiE!Uo4rJ&nIZcb$BD2T+ z_mPNts)nVQ@ATlz`9P;y4^niT%5GYc{zCBYoz34{bVqhXmBG$Ulj>T&zrd|u-vxhp z@A=a#)mDj$=YRbCsUbKGPUj0P_dhS+TO?eIJ65=%)c^TBy!{+p2mzXgwJp_&zn$0r z|Di1?$AEq10f@BFZQa!PNQt#Tx~Mzyp9>5>woeVpm>3+77X~PrM#VVSR-n+k=ga@M z_x-fGu)9Z-?HiWXPN_74Nt&FllQI#_IcexktO;cm+mbRQP5kF z68q`t6zMoGk&l_M5p&uj4 zD=8`XZ^r}IMUf8N7pGB0ycd6vXDj#VfB{frp35reBf`ymGbODp4S zSR+p@s1@!H2cEE~BpmIf)&}N=yWEoNB!53K_&w0f4&fk5;=}(Mny+Y?f8(pF`ujUp zQTj6k`AN;HtlfxoTGX<}kE~yz{N59WSCLXuQV=OiMrw4>n?(?CEgrPi0VCVr{T+a~ z;bNI10_FwH;?{-%no3%oum60`?#n#Q8ZVc#{r#>|@7?xU(YL7D!9aR;~!vAv8<7r6r$w zAKN2fJJ+~)Dn;T~>pZX!wBC#U)Q=Z{3l2c`J)HvZNSor^XB9Si8z^VL%PcxuN8pb*hmft_?@i?xNWriEkW|?{wXMvLM9Kw1j+#PwEFH>E&F$sPD55ofdjC_HOz~g3Rw{j2aC*Sw&TH;)^_4 zk?_);v(;d))U^YSg9Y5J{sctz09*|K5T^iupC7O_0f>Sl4OH*eA1~I>{|BlDMXC%4 zcp7)hg9jFAf?A2=zHctKkxu#}S zD=oQ{s%v`F(U1BwQl#W*xV{p+YmQMN=zjQX1ITgL2S82i#2}$>F;Do)-1VS`t}R-U zl<*fiefB##jSgr5>z{8xJdaMvD_{ezbiD>CbR^9n&91zMHq{>vP=oRtEx|BL5FkA= z)5$qw+v1saaU3LkN%3Y0AeJ?IAUePwpuse`A#vwc_CM+KKR&Q!dY3a{bJ5l zy$K*`_7h-YU-o11PKz!Exok9cx*$OpMj6V8EfGmd0tekM%?~}UK^>bmVALa#EP$6r zDIZgo2>{j~o&+~Q*6B}{>+c_e{QxPvmO76CsMGW|=?0YbBI>&W#L7S{^9JV9({2O| zz2f?M-k&=&+{I1D)A|6g4af3%^SI?fdJ3t0TrWA?Z=9|Z?1i$ zdV9L@ZOVNK;YGOi08s)Td)iq;YkCt#HKMe7hh+Tft{$jqSA5YJX@;YDZ$E^9 zaPy_w0dTw8_mxvOM`q&aD>4Z(gsq7(z1f2w7hor~w7%FD)a6(o*xUo`C%BQ(WV;gQ z8rqTgKZZpy!aYN34)PR~3%uCc5U;Z(OWuYXL~r^%1*rhp-OuRB8H2Os1GTP`O*2CF zXMTa&_#lcZtbX&?oRaC^2|8eK19z<{;BhX#^`8v|=ehYC!lVKW#XsSa9@Rr=`dP zoa_jS%Qnkc53R5vFrH-a7j;8M!mZ#8z)lUO|MKZ3}&KMedX_R~I%110FW1n-oB z+3ilh8{jWP1D^IU#OkI`IgWTM30Q&zRKe?A&kY@Ep;${g#!%`CYG_})--RnkO=WTE z_WU)vDw-m>vZ!IRZ+E%=SZ3ni%+Fq$<+!_57jcb*XaT2MKuo0<4=8E2bX;6H`0k3Z zbRciYg)yhkG$K!E1a2^UBXmU)=5@<&xnD9Hqob96S4Y5MXLVk;$>8pt3&}{$&qVD@ zV;Bge5Z8CE&$j{3vIqX2h5o}L-^taJsQXL}KJj*%;eJ4*fL*^1@3&Ae=$f8^LV};B+|1`)>Vpm;h#Ed+728j6IkxjKEnnROBb9p=7vEa8c)+ z6R-Mi;xKCcS-)GbPz2m3w@Uo*~a=7XHM>5rL5#rhnU4r3@R_+GL^yY3znP4 zG1=2_91*`An;Oz5xl&xemZGFc1%1SSf(FYN5`Dt57U}gf+x5P87tD_UUqc4|?Iao!QVIcQwT;5E5iY*skQ{YUzekD&p>6h1!L-jIR_^#qq z5Pp~ox)M8JLiC_j9N*$}r(dTD-N6zNn;lMkMdsJ_G4cEnm@ZBgX{#ivyVB1r@1Z34(A8*U{7cTb#9%4S)>3?sUg`R zeJnCQ3qj|1>g@NyJYq!I(dO+>GNP{h%+?7Uun!~}%ymt?THgO!jAsg=>@hD+>6*KP zfo{kt?PQ10G>vWI3{8v_!1eZXJR`J^n&4Gzjs)e$u$0I*nj(;yZqiSvizJ`$^c;y{ z4>)|g@zTRkfJ@X$;z+(Xfkr{}kOsL)-+538p;MhT;EJO%Aw&^IF~>U=s(rj|F(8o-fKhyWAgM;g;V^TDXGY z#Bn3x!eT+K9Xqga@xHc0Q@h-;^?ulfkENRvX!o?09PvOx2gTk<=G!B+14arV%!;`8 z2NcJr8Td3Gzv@mMoH?zrpT*xZP$wUbRq8bISQGmEe^&8QVO6E)NB0>@tJ4$j1n;>r z>FH&U-;)d;Eu9)ykWL)GTyh_BLQ`dc4x#JcxP@V?by z=E5Mn>_%X+ky(?(#nUkkHdQ~rDag9W$Q}OU;N*V!yA7l{AaPpWJaLuP5O->R`sAaKV$BNfuL=Kzo*r&KF7&Y{r_gTUv@^`fOFd4Vt$YV&TX=nDzTvSy z4lPCe>;EUHYYgecyzzb4zn0HCisX+pKNb5+uK;9atCV zi9F=+W)rlSf7I{6VsOvTp;j|CM|?KGP%J^k?-t zaJT)6J|o+0j?#SnH5;tu(l;3X-@W$p?Jl5$!me}AxF1x#7ucg0W@_24c+NoWPT4g9 zrl&_7yoB{n9okSOl3DODHPjQho$jfN)bUB-UFVlxi`;lc(s8a!?5%0jj=qOf3>GZA z7IP$D*s`}ubZaZLzuD*~lyFWkF@I`Lz$ugXRhKN^!G>!VeB@Z-u-omn*w&>;YJZkG z0aptyEQHQ!0mm{<2rKMrwS4*VdGx2_yFRDCin($^t~IKB-{&>a&O#0yn#ZIzy<7{t zsTedQw%{_$58nVWrsRj~+>Dm-^2pomnGhPbLUdE4&J^Ua3o$vyJzi^8tPyQ**uOYz z&bAFI$USagcV3CHb(VP(vKH`?t^n{-hCWnrpwjOsLr|kiafd+TJf#T^%7Qa23X_yW zukS({3c8}%xa{W4tC=@-x1ok_W1i=M3&- Date: Tue, 28 Jul 2015 17:10:21 -0700 Subject: [PATCH 707/956] Updated AWS SDK calls to match the 0.7.0 release of the AWS SDK --- builder/amazon/chroot/step_create_volume.go | 5 ++--- builder/amazon/chroot/step_register_ami.go | 4 ++-- builder/amazon/common/access_config.go | 4 ++-- builder/amazon/common/artifact.go | 2 +- builder/amazon/common/block_device.go | 8 +++---- builder/amazon/common/block_device_test.go | 21 +++++++++---------- builder/amazon/common/step_ami_region_copy.go | 9 ++++---- builder/amazon/common/step_create_tags.go | 2 +- .../common/step_modify_ami_attributes.go | 2 +- .../amazon/common/step_run_source_instance.go | 12 +++++------ builder/amazon/common/step_security_group.go | 4 ++-- builder/amazon/ebs/step_create_ami.go | 2 +- builder/amazon/instance/step_register_ami.go | 2 +- 13 files changed, 38 insertions(+), 39 deletions(-) diff --git a/builder/amazon/chroot/step_create_volume.go b/builder/amazon/chroot/step_create_volume.go index 40925483a..9db99163a 100644 --- a/builder/amazon/chroot/step_create_volume.go +++ b/builder/amazon/chroot/step_create_volume.go @@ -5,7 +5,6 @@ import ( "log" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" @@ -52,12 +51,12 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction { } createVolume := &ec2.CreateVolumeInput{ AvailabilityZone: instance.Placement.AvailabilityZone, - Size: aws.Long(vs), + Size: aws.Int64(vs), SnapshotID: rootDevice.EBS.SnapshotID, VolumeType: rootDevice.EBS.VolumeType, IOPS: rootDevice.EBS.IOPS, } - log.Printf("Create args: %s", awsutil.StringValue(createVolume)) + log.Printf("Create args: %s", createVolume) createVolumeResp, err := ec2conn.CreateVolume(createVolume) if err != nil { diff --git a/builder/amazon/chroot/step_register_ami.go b/builder/amazon/chroot/step_register_ami.go index 5314ef0a1..8ed4df9b9 100644 --- a/builder/amazon/chroot/step_register_ami.go +++ b/builder/amazon/chroot/step_register_ami.go @@ -34,7 +34,7 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { } if s.RootVolumeSize > *newDevice.EBS.VolumeSize { - newDevice.EBS.VolumeSize = aws.Long(s.RootVolumeSize) + newDevice.EBS.VolumeSize = aws.Int64(s.RootVolumeSize) } } @@ -64,7 +64,7 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { // Set the AMI ID in the state ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageID)) amis := make(map[string]string) - amis[ec2conn.Config.Region] = *registerResp.ImageID + amis[*ec2conn.Config.Region] = *registerResp.ImageID state.Put("amis", amis) // Wait for the image to become ready diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go index 4479e0181..88bda0423 100644 --- a/builder/amazon/common/access_config.go +++ b/builder/amazon/common/access_config.go @@ -40,9 +40,9 @@ func (c *AccessConfig) Config() (*aws.Config, error) { } return &aws.Config{ - Region: region, + Region: aws.String(region), Credentials: creds, - MaxRetries: 11, + MaxRetries: aws.Int(11), }, nil } diff --git a/builder/amazon/common/artifact.go b/builder/amazon/common/artifact.go index 7b2537072..aba2ffde4 100644 --- a/builder/amazon/common/artifact.go +++ b/builder/amazon/common/artifact.go @@ -70,7 +70,7 @@ func (a *Artifact) Destroy() error { regionConfig := &aws.Config{ Credentials: a.Conn.Config.Credentials, - Region: region, + Region: aws.String(region), } regionConn := ec2.New(regionConfig) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index fb14a66ae..f009cd7bc 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -32,20 +32,20 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { for _, blockDevice := range b { ebsBlockDevice := &ec2.EBSBlockDevice{ VolumeType: aws.String(blockDevice.VolumeType), - VolumeSize: aws.Long(blockDevice.VolumeSize), - DeleteOnTermination: aws.Boolean(blockDevice.DeleteOnTermination), + VolumeSize: aws.Int64(blockDevice.VolumeSize), + DeleteOnTermination: aws.Bool(blockDevice.DeleteOnTermination), } // IOPS is only valid for SSD Volumes if blockDevice.VolumeType != "" && blockDevice.VolumeType != "standard" && blockDevice.VolumeType != "gp2" { - ebsBlockDevice.IOPS = aws.Long(blockDevice.IOPS) + ebsBlockDevice.IOPS = aws.Int64(blockDevice.IOPS) } // You cannot specify Encrypted if you specify a Snapshot ID if blockDevice.SnapshotId != "" { ebsBlockDevice.SnapshotID = aws.String(blockDevice.SnapshotId) } else if blockDevice.Encrypted { - ebsBlockDevice.Encrypted = aws.Boolean(blockDevice.Encrypted) + ebsBlockDevice.Encrypted = aws.Bool(blockDevice.Encrypted) } mapping := &ec2.BlockDeviceMapping{ diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index c69ef2efb..d76cf4d07 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/service/ec2" ) @@ -29,8 +28,8 @@ func TestBlockDevice(t *testing.T) { EBS: &ec2.EBSBlockDevice{ SnapshotID: aws.String("snap-1234"), VolumeType: aws.String("standard"), - VolumeSize: aws.Long(8), - DeleteOnTermination: aws.Boolean(true), + VolumeSize: aws.Int64(8), + DeleteOnTermination: aws.Bool(true), }, }, }, @@ -45,8 +44,8 @@ func TestBlockDevice(t *testing.T) { VirtualName: aws.String(""), EBS: &ec2.EBSBlockDevice{ VolumeType: aws.String(""), - VolumeSize: aws.Long(8), - DeleteOnTermination: aws.Boolean(false), + VolumeSize: aws.Int64(8), + DeleteOnTermination: aws.Bool(false), }, }, }, @@ -64,9 +63,9 @@ func TestBlockDevice(t *testing.T) { VirtualName: aws.String(""), EBS: &ec2.EBSBlockDevice{ VolumeType: aws.String("io1"), - VolumeSize: aws.Long(8), - DeleteOnTermination: aws.Boolean(true), - IOPS: aws.Long(1000), + VolumeSize: aws.Int64(8), + DeleteOnTermination: aws.Bool(true), + IOPS: aws.Int64(1000), }, }, }, @@ -93,13 +92,13 @@ func TestBlockDevice(t *testing.T) { got := blockDevices.BuildAMIDevices() if !reflect.DeepEqual(expected, got) { t.Fatalf("Bad block device, \nexpected: %s\n\ngot: %s", - awsutil.StringValue(expected), awsutil.StringValue(got)) + expected, got) } if !reflect.DeepEqual(expected, blockDevices.BuildLaunchDevices()) { t.Fatalf("Bad block device, \nexpected: %s\n\ngot: %s", - awsutil.StringValue(expected), - awsutil.StringValue(blockDevices.BuildLaunchDevices())) + expected, + blockDevices.BuildLaunchDevices()) } } } diff --git a/builder/amazon/common/step_ami_region_copy.go b/builder/amazon/common/step_ami_region_copy.go index 3f545284f..d19ffe5bd 100644 --- a/builder/amazon/common/step_ami_region_copy.go +++ b/builder/amazon/common/step_ami_region_copy.go @@ -5,6 +5,7 @@ import ( "sync" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" @@ -21,7 +22,7 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) amis := state.Get("amis").(map[string]string) - ami := amis[ec2conn.Config.Region] + ami := amis[*ec2conn.Config.Region] if len(s.Regions) == 0 { return multistep.ActionContinue @@ -33,7 +34,7 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { var wg sync.WaitGroup errs := new(packer.MultiError) for _, region := range s.Regions { - if region == ec2conn.Config.Region { + if region == *ec2conn.Config.Region { ui.Message(fmt.Sprintf( "Avoiding copying AMI to duplicate region %s", region)) continue @@ -44,7 +45,7 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction { go func(region string) { defer wg.Done() - id, err := amiRegionCopy(state, s.AccessConfig, s.Name, ami, region, ec2conn.Config.Region) + id, err := amiRegionCopy(state, s.AccessConfig, s.Name, ami, region, *ec2conn.Config.Region) lock.Lock() defer lock.Unlock() @@ -84,7 +85,7 @@ func amiRegionCopy(state multistep.StateBag, config *AccessConfig, name string, if err != nil { return "", err } - awsConfig.Region = target + awsConfig.Region = aws.String(target) regionconn := ec2.New(awsConfig) resp, err := regionconn.CopyImage(&ec2.CopyImageInput{ diff --git a/builder/amazon/common/step_create_tags.go b/builder/amazon/common/step_create_tags.go index 4750d7a08..220735bed 100644 --- a/builder/amazon/common/step_create_tags.go +++ b/builder/amazon/common/step_create_tags.go @@ -36,7 +36,7 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction { regionconn := ec2.New(&aws.Config{ Credentials: ec2conn.Config.Credentials, - Region: region, + Region: aws.String(region), }) // Retrieve image list for given AMI diff --git a/builder/amazon/common/step_modify_ami_attributes.go b/builder/amazon/common/step_modify_ami_attributes.go index ff0352a1f..df6424245 100644 --- a/builder/amazon/common/step_modify_ami_attributes.go +++ b/builder/amazon/common/step_modify_ami_attributes.go @@ -90,7 +90,7 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc ui.Say(fmt.Sprintf("Modifying attributes on AMI (%s)...", ami)) regionconn := ec2.New(&aws.Config{ Credentials: ec2conn.Config.Credentials, - Region: region, + Region: aws.String(region), }) for name, input := range options { ui.Message(fmt.Sprintf("Modifying: %s", name)) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index b94a6031c..6482b8084 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -141,8 +141,8 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi ImageID: &s.SourceAMI, InstanceType: &s.InstanceType, UserData: &userData, - MaxCount: aws.Long(1), - MinCount: aws.Long(1), + MaxCount: aws.Int64(1), + MinCount: aws.Int64(1), IAMInstanceProfile: &ec2.IAMInstanceProfileSpecification{Name: &s.IamInstanceProfile}, BlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(), Placement: &ec2.Placement{AvailabilityZone: &s.AvailabilityZone}, @@ -151,11 +151,11 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi if s.SubnetId != "" && s.AssociatePublicIpAddress { runOpts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{ &ec2.InstanceNetworkInterfaceSpecification{ - DeviceIndex: aws.Long(0), + DeviceIndex: aws.Int64(0), AssociatePublicIPAddress: &s.AssociatePublicIpAddress, SubnetID: &s.SubnetId, Groups: securityGroupIds, - DeleteOnTermination: aws.Boolean(true), + DeleteOnTermination: aws.Bool(true), }, } } else { @@ -185,11 +185,11 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi IAMInstanceProfile: &ec2.IAMInstanceProfileSpecification{Name: &s.IamInstanceProfile}, NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ &ec2.InstanceNetworkInterfaceSpecification{ - DeviceIndex: aws.Long(0), + DeviceIndex: aws.Int64(0), AssociatePublicIPAddress: &s.AssociatePublicIpAddress, SubnetID: &s.SubnetId, Groups: securityGroupIds, - DeleteOnTermination: aws.Boolean(true), + DeleteOnTermination: aws.Bool(true), }, }, Placement: &ec2.SpotPlacement{ diff --git a/builder/amazon/common/step_security_group.go b/builder/amazon/common/step_security_group.go index b65ebb408..e43e866a3 100644 --- a/builder/amazon/common/step_security_group.go +++ b/builder/amazon/common/step_security_group.go @@ -59,8 +59,8 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction { req := &ec2.AuthorizeSecurityGroupIngressInput{ GroupID: groupResp.GroupID, IPProtocol: aws.String("tcp"), - FromPort: aws.Long(int64(port)), - ToPort: aws.Long(int64(port)), + FromPort: aws.Int64(int64(port)), + ToPort: aws.Int64(int64(port)), CIDRIP: aws.String("0.0.0.0/0"), } diff --git a/builder/amazon/ebs/step_create_ami.go b/builder/amazon/ebs/step_create_ami.go index dff7d88b0..a3980e3ee 100644 --- a/builder/amazon/ebs/step_create_ami.go +++ b/builder/amazon/ebs/step_create_ami.go @@ -38,7 +38,7 @@ func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction { // Set the AMI ID in the state ui.Message(fmt.Sprintf("AMI: %s", *createResp.ImageID)) amis := make(map[string]string) - amis[ec2conn.Config.Region] = *createResp.ImageID + amis[*ec2conn.Config.Region] = *createResp.ImageID state.Put("amis", amis) // Wait for the image to become ready diff --git a/builder/amazon/instance/step_register_ami.go b/builder/amazon/instance/step_register_ami.go index f97c5df0e..dc76331f8 100644 --- a/builder/amazon/instance/step_register_ami.go +++ b/builder/amazon/instance/step_register_ami.go @@ -44,7 +44,7 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { // Set the AMI ID in the state ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageID)) amis := make(map[string]string) - amis[ec2conn.Config.Region] = *registerResp.ImageID + amis[*ec2conn.Config.Region] = *registerResp.ImageID state.Put("amis", amis) // Wait for the image to become ready From 585638d06384dacffd9bd1163460430661764299 Mon Sep 17 00:00:00 2001 From: Bob Kuo Date: Tue, 28 Jul 2015 17:21:37 -0500 Subject: [PATCH 708/956] Do not require exclusive VNC access while buildling An additional client can be connected during build time for inspection. We can manually connect and set our VNC clients to ignore all input or we can connect with vnc2flv to record the build session for later verification. --- builder/qemu/step_type_boot_command.go | 2 +- builder/vmware/common/step_type_boot_command.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/qemu/step_type_boot_command.go b/builder/qemu/step_type_boot_command.go index e42903f55..b97241b0b 100644 --- a/builder/qemu/step_type_boot_command.go +++ b/builder/qemu/step_type_boot_command.go @@ -52,7 +52,7 @@ func (s *stepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction } defer nc.Close() - c, err := vnc.Client(nc, &vnc.ClientConfig{Exclusive: true}) + c, err := vnc.Client(nc, &vnc.ClientConfig{Exclusive: false}) if err != nil { err := fmt.Errorf("Error handshaking with VNC: %s", err) state.Put("error", err) diff --git a/builder/vmware/common/step_type_boot_command.go b/builder/vmware/common/step_type_boot_command.go index b23ede1da..3959e5517 100644 --- a/builder/vmware/common/step_type_boot_command.go +++ b/builder/vmware/common/step_type_boot_command.go @@ -57,7 +57,7 @@ func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction } defer nc.Close() - c, err := vnc.Client(nc, &vnc.ClientConfig{Exclusive: true}) + c, err := vnc.Client(nc, &vnc.ClientConfig{Exclusive: false}) if err != nil { err := fmt.Errorf("Error handshaking with VNC: %s", err) state.Put("error", err) From 9030c1fa3ad6d357ccccca288e7d7e81c42edbd7 Mon Sep 17 00:00:00 2001 From: Cameron Stokes Date: Thu, 30 Jul 2015 10:54:28 -0700 Subject: [PATCH 709/956] Fix funny characters. --- website/source/community/index.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/community/index.html.markdown b/website/source/community/index.html.markdown index 3951e909f..31facf753 100644 --- a/website/source/community/index.html.markdown +++ b/website/source/community/index.html.markdown @@ -11,12 +11,12 @@ page_title: Community Packer is a new project with a growing community. Despite this, there are dedicated users willing to help through various mediums. -**IRC:** `#packer-tool` on Freenode. +**IRC:** `#packer-tool` on Freenode. -**Mailing List:** [Packer Google +**Mailing List:** [Packer Google Group](http://groups.google.com/group/packer-tool) -**Bug Tracker:** [Issue tracker on +**Bug Tracker:** [Issue tracker on GitHub](https://github.com/mitchellh/packer/issues). Please only use this for reporting bugs. Do not ask for general help here. Use IRC or the mailing list for that. From 66b7b9a0b7fd31bd56c4b24abf2266052f6f0fb1 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 30 Jul 2015 15:58:12 -0700 Subject: [PATCH 710/956] Added a section to explain more clearly how to reference external resources like boot ISOs --- .../source/docs/command-line/push.html.markdown | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index 140c996d3..bb35a9d40 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -77,17 +77,11 @@ build artifacts larger than 5gb, and Atlas *can* store artifacts larger than images), you will need to put your boot ISO in an external web service and download it during the packer run. -The easiest way to host these in a secure fashion is to upload your ISO to -[Amazon -S3](http://docs.aws.amazon.com/AmazonS3/latest/dev/ShareObjectPreSignedURL.html) -or [Google Cloud -Storage](https://cloud.google.com/storage/docs/gsutil/commands/signurl) and -download it using a signed URL. You can inject the signed URL into your build by -using a build variable (environment variable) in Atlas. Example: +## Building Private `.iso` and `.dmg` Files + +If you want to build a private `.iso` file you can upload the `.iso` to a secure file hosting service like [Amazon S3](http://docs.aws.amazon.com/AmazonS3/latest/dev/ShareObjectPreSignedURL.html), [Google Cloud Storage](https://cloud.google.com/storage/docs/gsutil/commands/signurl), or [Azure File Service](https://msdn.microsoft.com/en-us/library/azure/dn194274.aspx) and download it at build time using a signed URL. You should convert `.dmg` files to `.iso` and follow a similar procedure. + +Once you have added [variables in your packer template](/docs/templates/user-variables.html) you can specify credentials or signed URLs using Atlas environment variables, or via the `-var` flag when you run `push`. ![Configure your signed URL in the Atlas build variables menu](/assets/images/packer-signed-urls.png) - -You will also need to [configure your packer -template](http://stormchaser.local:4567/docs/templates/user-variables.html) to -use the variable injected by Atlas (or via `push -var`). From 5218c5a65b674c947e104fcdd4dc5b43dd41830e Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 30 Jul 2015 16:09:40 -0700 Subject: [PATCH 711/956] Reformat --- .../source/docs/command-line/push.html.markdown | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/website/source/docs/command-line/push.html.markdown b/website/source/docs/command-line/push.html.markdown index bb35a9d40..57ea58cf0 100644 --- a/website/source/docs/command-line/push.html.markdown +++ b/website/source/docs/command-line/push.html.markdown @@ -79,9 +79,20 @@ download it during the packer run. ## Building Private `.iso` and `.dmg` Files -If you want to build a private `.iso` file you can upload the `.iso` to a secure file hosting service like [Amazon S3](http://docs.aws.amazon.com/AmazonS3/latest/dev/ShareObjectPreSignedURL.html), [Google Cloud Storage](https://cloud.google.com/storage/docs/gsutil/commands/signurl), or [Azure File Service](https://msdn.microsoft.com/en-us/library/azure/dn194274.aspx) and download it at build time using a signed URL. You should convert `.dmg` files to `.iso` and follow a similar procedure. +If you want to build a private `.iso` file you can upload the `.iso` to a secure +file hosting service like [Amazon +S3](http://docs.aws.amazon.com/AmazonS3/latest/dev/ShareObjectPreSignedURL.html), +[Google Cloud +Storage](https://cloud.google.com/storage/docs/gsutil/commands/signurl), or +[Azure File +Service](https://msdn.microsoft.com/en-us/library/azure/dn194274.aspx) and +download it at build time using a signed URL. You should convert `.dmg` files to +`.iso` and follow a similar procedure. -Once you have added [variables in your packer template](/docs/templates/user-variables.html) you can specify credentials or signed URLs using Atlas environment variables, or via the `-var` flag when you run `push`. +Once you have added [variables in your packer +template](/docs/templates/user-variables.html) you can specify credentials or +signed URLs using Atlas environment variables, or via the `-var` flag when you +run `push`. ![Configure your signed URL in the Atlas build variables menu](/assets/images/packer-signed-urls.png) From 87bcfc3ef7b68ffa2d62c4d0b6e026c337107f7a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 30 Jul 2015 16:15:55 -0700 Subject: [PATCH 712/956] Ignore internal packages for go 1.5; thanks @dlsniper --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 884d6bbf2..2fb386399 100644 --- a/Makefile +++ b/Makefile @@ -36,6 +36,7 @@ updatedeps: go list ./... \ | xargs go list -f '{{join .Deps "\n"}}' \ | grep -v github.com/mitchellh/packer \ + | grep -v '/internal/' \ | sort -u \ | xargs go get -f -u -v From 32b714e0853072a61772dfff868f13d9eeec38e4 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 30 Jul 2015 19:19:48 -0700 Subject: [PATCH 713/956] Update code.google.com/gosshold/ssh to point to golang.org/x/crypto/ssh, since this has been moved into core now Fixes #2515 --- builder/digitalocean/step_create_ssh_key.go | 2 +- builder/googlecompute/step_create_ssh_key.go | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/builder/digitalocean/step_create_ssh_key.go b/builder/digitalocean/step_create_ssh_key.go index a99fd930d..ce65cb425 100644 --- a/builder/digitalocean/step_create_ssh_key.go +++ b/builder/digitalocean/step_create_ssh_key.go @@ -10,11 +10,11 @@ import ( "os" "runtime" - "code.google.com/p/gosshold/ssh" "github.com/digitalocean/godo" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/common/uuid" "github.com/mitchellh/packer/packer" + "golang.org/x/crypto/ssh" ) type stepCreateSSHKey struct { diff --git a/builder/googlecompute/step_create_ssh_key.go b/builder/googlecompute/step_create_ssh_key.go index bbf048ee7..521e6c3d6 100644 --- a/builder/googlecompute/step_create_ssh_key.go +++ b/builder/googlecompute/step_create_ssh_key.go @@ -1,15 +1,16 @@ package googlecompute import ( - "code.google.com/p/gosshold/ssh" "crypto/rand" "crypto/rsa" "crypto/x509" "encoding/pem" "fmt" + "os" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" - "os" + "golang.org/x/crypto/ssh" ) // StepCreateSSHKey represents a Packer build step that generates SSH key pairs. From 015742b5470ed0c399d2e9fbeec5ed270dfc692a Mon Sep 17 00:00:00 2001 From: Gabriel Sobrinho Date: Fri, 31 Jul 2015 00:01:00 -0300 Subject: [PATCH 714/956] Fix last example syntax --- .../getting-started/remote-builds.html.markdown | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/website/source/intro/getting-started/remote-builds.html.markdown b/website/source/intro/getting-started/remote-builds.html.markdown index f37a5a5ad..6ddb4ece3 100644 --- a/website/source/intro/getting-started/remote-builds.html.markdown +++ b/website/source/intro/getting-started/remote-builds.html.markdown @@ -90,7 +90,19 @@ it's even better to store and version the AMI output so it can be easily deployed by a tool like [Terraform](https://terraform.io). The `atlas` [post-processor](/docs/post-processors/atlas.html) makes this process simple: -`javascript { "variables": ["..."], "builders": ["..."], "provisioners": ["..."], "push": ["..."], "post-processors": [ { "type": "atlas", "artifact": "ATLAS_USERNAME/packer-tutorial", "artifact_type": "amazon.ami" } ] }` +``` {.javascript} +{ + "variables": ["..."], + "builders": ["..."], + "provisioners": ["..."], + "push": ["..."], + "post-processors": [{ + "type": "atlas", + "artifact": "ATLAS_USERNAME/packer-tutorial", + "artifact_type": "amazon.ami" + }] +} +``` Update the `post-processors` block with your Atlas username, then `packer push example.json` and watch the build kick off in Atlas! When the build From 3c517a65c3c0b1f995f12ab9c96ad30c1527fb8f Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 30 Jul 2015 21:16:11 -0700 Subject: [PATCH 715/956] Autoreformat --- website/source/docs/builders/openstack.html.markdown | 4 ++-- .../source/docs/provisioners/salt-masterless.html.markdown | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index 56db25474..12a1ca882 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -100,8 +100,8 @@ builder. Rackconnect to assign the machine an IP address before connecting via SSH. Defaults to false. -- `metadata` (object of key/value strings) - Glance metadata that will be applied - to the image. +- `metadata` (object of key/value strings) - Glance metadata that will be + applied to the image. ## Basic Example: Rackspace public cloud diff --git a/website/source/docs/provisioners/salt-masterless.html.markdown b/website/source/docs/provisioners/salt-masterless.html.markdown index 1eeabaf14..adb1c4bb3 100644 --- a/website/source/docs/provisioners/salt-masterless.html.markdown +++ b/website/source/docs/provisioners/salt-masterless.html.markdown @@ -54,8 +54,8 @@ Optional: tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). This will be uploaded to the `remote_state_tree` on the remote. -- `minion_config` (string) - The path to your local [minion - config file](http://docs.saltstack.com/ref/configuration/minion.html). This will be +- `minion_config` (string) - The path to your local [minion config + file](http://docs.saltstack.com/ref/configuration/minion.html). This will be uploaded to the `/etc/salt` on the remote. - `skip_bootstrap` (boolean) - By default the salt provisioner runs [salt From 98b9d22b68053f751e7ffedbdc85499537772a67 Mon Sep 17 00:00:00 2001 From: Florian Noeding Date: Fri, 31 Jul 2015 15:34:25 +0200 Subject: [PATCH 716/956] amazon builder: only fetch password for winrm --- builder/amazon/common/step_get_password.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/builder/amazon/common/step_get_password.go b/builder/amazon/common/step_get_password.go index 08a9c7b66..fec33891f 100644 --- a/builder/amazon/common/step_get_password.go +++ b/builder/amazon/common/step_get_password.go @@ -26,11 +26,10 @@ type StepGetPassword struct { func (s *StepGetPassword) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) - image := state.Get("source_image").(*ec2.Image) - // Skip if we're not Windows... - if image.Platform == nil || *image.Platform != "windows" { - log.Printf("[INFO] Not Windows, skipping get password...") + // Skip if we're not using winrm + if s.Comm.Type != "winrm" { + log.Printf("[INFO] Not using winrm communicator, skipping get password...") return multistep.ActionContinue } From 263641c53799c20c7f9b23bcdd5097c97ca22194 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Sat, 1 Aug 2015 15:09:59 -0700 Subject: [PATCH 717/956] Fix case for ethernet.generatedAddress property lookup in VMX --- builder/vmware/common/ssh.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/vmware/common/ssh.go b/builder/vmware/common/ssh.go index 86e184bb5..9db075a71 100644 --- a/builder/vmware/common/ssh.go +++ b/builder/vmware/common/ssh.go @@ -39,7 +39,7 @@ func CommHost(config *SSHConfig) func(multistep.StateBag) (string, error) { var ok bool macAddress := "" if macAddress, ok = vmxData["ethernet0.address"]; !ok || macAddress == "" { - if macAddress, ok = vmxData["ethernet0.generatedaddress"]; !ok || macAddress == "" { + if macAddress, ok = vmxData["ethernet0.generatedAddress"]; !ok || macAddress == "" { return "", errors.New("couldn't find MAC address in VMX") } } From e0d46685ea6f0ff545aa561eac858ae7451eaa40 Mon Sep 17 00:00:00 2001 From: Dane Elwell Date: Mon, 3 Aug 2015 17:53:33 +0100 Subject: [PATCH 718/956] Document remote_port option --- website/source/docs/builders/vmware-iso.html.markdown | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index 8ac3a9fd3..35c1fb05b 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -369,6 +369,8 @@ fill in the required `remote_*` configurations: Additionally, there are some optional configurations that you'll likely have to modify as well: +* `remote_port` - The SSH port of the remote machine + * `remote_datastore` - The path to the datastore where the VM will be stored on the ESXi machine. From db1a781b6ef493754fc17e62eca2724e2a7efc48 Mon Sep 17 00:00:00 2001 From: Tyler Tidman Date: Mon, 3 Aug 2015 13:02:01 -0400 Subject: [PATCH 719/956] Rename .html.md files to .html.markdown Fixes #2546. Make files under website/source/docs/provisioners conform to standards for rest of docs. --- .../{shell-local.html.md => shell-local.html.markdown} | 0 .../{windows-restart.html.md => windows-restart.html.markdown} | 0 .../{windows-shell.html.md => windows-shell.html.markdown} | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename website/source/docs/provisioners/{shell-local.html.md => shell-local.html.markdown} (100%) rename website/source/docs/provisioners/{windows-restart.html.md => windows-restart.html.markdown} (100%) rename website/source/docs/provisioners/{windows-shell.html.md => windows-shell.html.markdown} (100%) diff --git a/website/source/docs/provisioners/shell-local.html.md b/website/source/docs/provisioners/shell-local.html.markdown similarity index 100% rename from website/source/docs/provisioners/shell-local.html.md rename to website/source/docs/provisioners/shell-local.html.markdown diff --git a/website/source/docs/provisioners/windows-restart.html.md b/website/source/docs/provisioners/windows-restart.html.markdown similarity index 100% rename from website/source/docs/provisioners/windows-restart.html.md rename to website/source/docs/provisioners/windows-restart.html.markdown diff --git a/website/source/docs/provisioners/windows-shell.html.md b/website/source/docs/provisioners/windows-shell.html.markdown similarity index 100% rename from website/source/docs/provisioners/windows-shell.html.md rename to website/source/docs/provisioners/windows-shell.html.markdown From e73ec1f70daf0284cca4c189633be8e37b8c2981 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 3 Aug 2015 11:16:17 -0700 Subject: [PATCH 720/956] Use go vet instead of go tool vet, and actually run it with make --- Makefile | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 2fb386399..8ff6560a5 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,6 @@ TEST?=./... -VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods \ - -nilfunc -printf -rangeloops -shift -structtags -unsafeptr -default: test +default: test vet dev bin: @sh -c "$(CURDIR)/scripts/build.sh" @@ -41,10 +39,10 @@ updatedeps: | xargs go get -f -u -v vet: - @go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ + @go vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ go get golang.org/x/tools/cmd/vet; \ fi - @go tool vet $(VETARGS) . ; if [ $$? -eq 1 ]; then \ + @go vet ./... ; if [ $$? -eq 1 ]; then \ echo ""; \ echo "Vet found suspicious constructs. Please check the reported constructs"; \ echo "and fix them if necessary before submitting the code for reviewal."; \ From 07eff4c014d97ce2f0f108dadd18b2c411bf33bf Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 3 Aug 2015 11:32:54 -0700 Subject: [PATCH 721/956] Reformat docs --- .../provisioners/shell-local.html.markdown | 37 ++++---- .../windows-restart.html.markdown | 33 +++---- .../provisioners/windows-shell.html.markdown | 86 +++++++++---------- 3 files changed, 79 insertions(+), 77 deletions(-) diff --git a/website/source/docs/provisioners/shell-local.html.markdown b/website/source/docs/provisioners/shell-local.html.markdown index b986cd5ef..198e31272 100644 --- a/website/source/docs/provisioners/shell-local.html.markdown +++ b/website/source/docs/provisioners/shell-local.html.markdown @@ -1,23 +1,25 @@ --- -layout: "docs" -page_title: "Local Shell Provisioner" -description: |- - The shell Packer provisioner provisions machines built by Packer using shell scripts. Shell provisioning is the easiest way to get software installed and configured on a machine. ---- +description: | + The shell Packer provisioner provisions machines built by Packer using shell + scripts. Shell provisioning is the easiest way to get software installed and + configured on a machine. +layout: docs +page_title: Local Shell Provisioner +... # Local Shell Provisioner Type: `shell-local` -The local shell provisioner executes a local shell script on the machine -running Packer. The [remote shell](/docs/provisioners/shell.html) -provisioner executes shell scripts on a remote machine. +The local shell provisioner executes a local shell script on the machine running +Packer. The [remote shell](/docs/provisioners/shell.html) provisioner executes +shell scripts on a remote machine. ## Basic Example The example below is fully functional. -```javascript +``` {.javascript} { "type": "shell-local", "command": "echo foo" @@ -31,15 +33,14 @@ required element is "command". Required: -* `command` (string) - The command to execute. This will be executed - within the context of a shell as specified by `execute_command`. +- `command` (string) - The command to execute. This will be executed within + the context of a shell as specified by `execute_command`. Optional parameters: -* `execute_command` (array of strings) - The command to use to execute the script. - By default this is `["/bin/sh", "-c", "{{.Command}"]`. The value is an array - of arguments executed directly by the OS. - The value of this is - treated as [configuration template](/docs/templates/configuration-templates.html). - The only available variable is `Command` which is the command to execute. - +- `execute_command` (array of strings) - The command to use to execute + the script. By default this is `["/bin/sh", "-c", "{{.Command}"]`. The value + is an array of arguments executed directly by the OS. The value of this is + treated as [configuration + template](/docs/templates/configuration-templates.html). The only available + variable is `Command` which is the command to execute. diff --git a/website/source/docs/provisioners/windows-restart.html.markdown b/website/source/docs/provisioners/windows-restart.html.markdown index a1b65cae1..05377ca23 100644 --- a/website/source/docs/provisioners/windows-restart.html.markdown +++ b/website/source/docs/provisioners/windows-restart.html.markdown @@ -1,16 +1,17 @@ --- -layout: "docs" -page_title: "Windows Restart Provisioner" -description: |- - The Windows restart provisioner restarts a Windows machine and waits for it to come back up. ---- +description: | + The Windows restart provisioner restarts a Windows machine and waits for it to + come back up. +layout: docs +page_title: Windows Restart Provisioner +... # Windows Restart Provisioner Type: `windows-restart` -The Windows restart provisioner initiates a reboot on a Windows machine -and waits for the machine to come back online. +The Windows restart provisioner initiates a reboot on a Windows machine and +waits for the machine to come back online. The Windows provisioning process often requires multiple reboots, and this provisioner helps to ease that process. @@ -19,7 +20,7 @@ provisioner helps to ease that process. The example below is fully functional. -```javascript +``` {.javascript} { "type": "windows-restart" } @@ -31,13 +32,13 @@ The reference of available configuration options is listed below. Optional parameters: -* `restart_command` (string) - The command to execute to initiate the - restart. By default this is `shutdown /r /c "packer restart" /t 5 && net stop winrm`. - A key action of this is to stop WinRM so that Packer can detect it - is rebooting. +- `restart_command` (string) - The command to execute to initiate the restart. + By default this is `shutdown /r /c "packer restart" /t 5 && net stop winrm`. + A key action of this is to stop WinRM so that Packer can detect it + is rebooting. -* `restart_check_command` (string) - A command to execute to check if the - restart succeeded. This will be done in a loop. +- `restart_check_command` (string) - A command to execute to check if the + restart succeeded. This will be done in a loop. -* `restart_timeout` (string) - The timeout to wait for the restart. - By default this is 5 minutes. Example value: "5m" +- `restart_timeout` (string) - The timeout to wait for the restart. By default + this is 5 minutes. Example value: "5m" diff --git a/website/source/docs/provisioners/windows-shell.html.markdown b/website/source/docs/provisioners/windows-shell.html.markdown index c758a5ebd..38f10fcef 100644 --- a/website/source/docs/provisioners/windows-shell.html.markdown +++ b/website/source/docs/provisioners/windows-shell.html.markdown @@ -1,22 +1,23 @@ --- -layout: "docs" -page_title: "Windows Shell Provisioner" -description: |- - The windows-shell Packer provisioner runs commands on Windows using the cmd shell. ---- +description: | + The windows-shell Packer provisioner runs commands on Windows using the cmd + shell. +layout: docs +page_title: Windows Shell Provisioner +... # Windows Shell Provisioner Type: `windows-shell` -The windows-shell Packer provisioner runs commands on a Windows machine -using `cmd`. It assumes it is running over WinRM. +The windows-shell Packer provisioner runs commands on a Windows machine using +`cmd`. It assumes it is running over WinRM. ## Basic Example The example below is fully functional. -```javascript +``` {.javascript} { "type": "windows-shell", "inline": ["dir c:\\"] @@ -28,48 +29,47 @@ The example below is fully functional. The reference of available configuration options is listed below. The only required element is either "inline" or "script". Every other option is optional. -Exactly _one_ of the following is required: +Exactly *one* of the following is required: -* `inline` (array of strings) - This is an array of commands to execute. - The commands are concatenated by newlines and turned into a single file, - so they are all executed within the same context. This allows you to - change directories in one command and use something in the directory in - the next and so on. Inline scripts are the easiest way to pull off simple - tasks within the machine. +- `inline` (array of strings) - This is an array of commands to execute. The + commands are concatenated by newlines and turned into a single file, so they + are all executed within the same context. This allows you to change + directories in one command and use something in the directory in the next + and so on. Inline scripts are the easiest way to pull off simple tasks + within the machine. -* `script` (string) - The path to a script to upload and execute in the machine. - This path can be absolute or relative. If it is relative, it is relative - to the working directory when Packer is executed. +- `script` (string) - The path to a script to upload and execute in + the machine. This path can be absolute or relative. If it is relative, it is + relative to the working directory when Packer is executed. -* `scripts` (array of strings) - An array of scripts to execute. The scripts - will be uploaded and executed in the order specified. Each script is executed - in isolation, so state such as variables from one script won't carry on to - the next. +- `scripts` (array of strings) - An array of scripts to execute. The scripts + will be uploaded and executed in the order specified. Each script is + executed in isolation, so state such as variables from one script won't + carry on to the next. Optional parameters: -* `binary` (boolean) - If true, specifies that the script(s) are binary - files, and Packer should therefore not convert Windows line endings to - Unix line endings (if there are any). By default this is false. +- `binary` (boolean) - If true, specifies that the script(s) are binary files, + and Packer should therefore not convert Windows line endings to Unix line + endings (if there are any). By default this is false. -* `environment_vars` (array of strings) - An array of key/value pairs - to inject prior to the execute_command. The format should be - `key=value`. Packer injects some environmental variables by default - into the environment, as well, which are covered in the section below. +- `environment_vars` (array of strings) - An array of key/value pairs to + inject prior to the execute\_command. The format should be `key=value`. + Packer injects some environmental variables by default into the environment, + as well, which are covered in the section below. -* `execute_command` (string) - The command to use to execute the script. - By default this is `{{ .Vars }}"{{ .Path }}"`. The value of this is - treated as [configuration template](/docs/templates/configuration-templates.html). - There are two available variables: `Path`, which is - the path to the script to run, and `Vars`, which is the list of - `environment_vars`, if configured. +- `execute_command` (string) - The command to use to execute the script. By + default this is `{{ .Vars }}"{{ .Path }}"`. The value of this is treated as + [configuration template](/docs/templates/configuration-templates.html). + There are two available variables: `Path`, which is the path to the script + to run, and `Vars`, which is the list of `environment_vars`, if configured. -* `remote_path` (string) - The path where the script will be uploaded to - in the machine. This defaults to "/tmp/script.sh". This value must be - a writable location and any parent directories must already exist. +- `remote_path` (string) - The path where the script will be uploaded to in + the machine. This defaults to "/tmp/script.sh". This value must be a + writable location and any parent directories must already exist. -* `start_retry_timeout` (string) - The amount of time to attempt to - _start_ the remote process. By default this is "5m" or 5 minutes. This - setting exists in order to deal with times when SSH may restart, such as - a system reboot. Set this to a higher value if reboots take a longer - amount of time. +- `start_retry_timeout` (string) - The amount of time to attempt to *start* + the remote process. By default this is "5m" or 5 minutes. This setting + exists in order to deal with times when SSH may restart, such as a + system reboot. Set this to a higher value if reboots take a longer amount + of time. From 8d6719e71fe5fb6b9a31c031c2e5b5849c8b8030 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 3 Aug 2015 16:34:24 -0700 Subject: [PATCH 722/956] Add failing test for compress interpolation --- .../compress/post-processor_test.go | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index db23cf3b1..fec3b7a72 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -150,6 +150,35 @@ func TestCompressOptions(t *testing.T) { } } +func TestCompressInterpolation(t *testing.T) { + const config = ` + { + "post-processors": [ + { + "type": "compress", + "output": "{{ .BuildName }}.gz" + } + ] + } + ` + + artifact := testArchive(t, config) + defer artifact.Destroy() + + filename := "file.gz" + archive, err := os.Open(filename) + if err != nil { + t.Fatalf("Unable to read %s: %s", filename, err) + } + + gzipReader, _ := gzip.NewReader(archive) + data, _ := ioutil.ReadAll(gzipReader) + + if string(data) != expectedFileContents { + t.Errorf("Expected:\n%s\nFound:\n%s\n", expectedFileContents, data) + } +} + // Test Helpers func setup(t *testing.T) (packer.Ui, packer.Artifact, error) { From 4ef3baa3eedfc171cd3d66ab7542030693084d24 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 4 Aug 2015 19:30:57 -0700 Subject: [PATCH 723/956] Update test to include some interpolation configs --- post-processor/compress/post-processor_test.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index fec3b7a72..ea1d973eb 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -156,7 +156,7 @@ func TestCompressInterpolation(t *testing.T) { "post-processors": [ { "type": "compress", - "output": "{{ .BuildName }}.gz" + "output": "{{ build_name}}-{{ .BuildName }}-{{.BuilderType}}.gz" } ] } @@ -165,7 +165,9 @@ func TestCompressInterpolation(t *testing.T) { artifact := testArchive(t, config) defer artifact.Destroy() - filename := "file.gz" + // You can interpolate using the .BuildName variable or build_name global + // function. We'll check both. + filename := "chocolate-vanilla-file.gz" archive, err := os.Open(filename) if err != nil { t.Fatalf("Unable to read %s: %s", filename, err) @@ -230,6 +232,13 @@ func testArchive(t *testing.T, config string) packer.Artifact { compressor := PostProcessor{} compressor.Configure(tpl.PostProcessors[0][0].Config) + + // I get the feeling these should be automatically available somewhere, but + // some of the post-processors construct this manually. + compressor.config.ctx.BuildName = "chocolate" + compressor.config.PackerBuildName = "vanilla" + compressor.config.PackerBuilderType = "file" + artifactOut, _, err := compressor.PostProcess(ui, artifact) if err != nil { t.Fatalf("Failed to compress artifact: %s", err) From 8f2a9de28e24d76aef02a9863fcf30d7e8623b25 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 4 Aug 2015 19:46:14 -0700 Subject: [PATCH 724/956] Updated documentation explaining how to use variables in compress post-processor filenames --- .../post-processors/compress.html.markdown | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index ad78a9315..9236dd0e7 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -15,10 +15,11 @@ VMware or VirtualBox) and compresses the artifact into a single archive. ## Configuration -### Required: +### Optional: -You must specify the output filename. The archive format is derived from the -filename. +By default, packer will build archives in `.tar.gz` format with the following +filename: `packer_{{.BuildName}}_{{.BuilderType}}`. If you want to change this +you will need to specify the `output` option. - `output` (string) - The path to save the compressed archive. The archive format is inferred from the filename. E.g. `.tar.gz` will be a @@ -26,13 +27,9 @@ filename. detected packer defaults to `.tar.gz` behavior but will not change the filename. -If you are executing multiple builders in parallel you should make sure `output` -is unique for each one. For example `packer_{{.BuildName}}_{{.Provider}}.zip`. - -### Optional: - -If you want more control over how the archive is created you can specify the -following settings: + You can use `{{.BuildName}}` and ``{{.BuilderType}}` in your output path. + If you are executing multiple builders in parallel you should make sure + `output` is unique for each one. For example `packer_{{.BuildName}}.zip`. - `compression_level` (integer) - Specify the compression level, for algorithms that support it, from 1 through 9 inclusive. Typically higher @@ -61,14 +58,14 @@ configuration: ``` {.json} { "type": "compress", - "output": "archive.zip" + "output": "{{.BuildName}}_bundle.zip" } ``` ``` {.json} { "type": "compress", - "output": "archive.gz", + "output": "log_{{.BuildName}}.gz", "compression": 9 } ``` From fbb24d4acfa7746a416d48a0556043807a2130e5 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 4 Aug 2015 19:49:41 -0700 Subject: [PATCH 725/956] Changed interpolation logic so .BuildName can be used in the output config option --- post-processor/compress/post-processor.go | 44 +++++++++++------------ 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index bb6ce27bf..b95b27bde 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -55,9 +55,12 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { Interpolate: true, InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ - Exclude: []string{}, + Exclude: []string{"output"}, }, }, raws...) + if err != nil { + return err + } errs := new(packer.MultiError) @@ -67,16 +70,7 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } if p.config.OutputPath == "" { - p.config.OutputPath = "packer_{{.BuildName}}_{{.Provider}}" - } - - if err = interpolate.Validate(p.config.OutputPath, &p.config.ctx); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error parsing target template: %s", err)) - } - - templates := map[string]*string{ - "output": &p.config.OutputPath, + p.config.OutputPath = "packer_{{.BuildName}}_{{.BuilderType}}" } if p.config.CompressionLevel > pgzip.BestCompression { @@ -89,17 +83,9 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { p.config.CompressionLevel = pgzip.DefaultCompression } - for key, ptr := range templates { - if *ptr == "" { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("%s must be set", key)) - } - - *ptr, err = interpolate.Render(p.config.OutputPath, &p.config.ctx) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", key, err)) - } + if err = interpolate.Validate(p.config.OutputPath, &p.config.ctx); err != nil { + errs = packer.MultiErrorAppend( + errs, fmt.Errorf("Error parsing target template: %s", err)) } p.config.detectFromFilename() @@ -113,7 +99,19 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { - target := p.config.OutputPath + // These are extra variables that will be made available for interpolation. + p.config.ctx.Data = map[string]string{ + "BuildName": p.config.PackerBuildName, + "BuilderType": p.config.PackerBuilderType, + } + + target, err := interpolate.Render(p.config.OutputPath, &p.config.ctx) + if err != nil { + return nil, false, fmt.Errorf("Error interpolating output value: %s", err) + } else { + fmt.Println(target) + } + keep := p.config.KeepInputArtifact newArtifact := &Artifact{Path: target} From 1c956ff406c8d03c88cd6f4af90b5e09c5256463 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 4 Aug 2015 20:11:53 -0700 Subject: [PATCH 726/956] Removed errant backtick --- website/source/docs/post-processors/compress.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index 9236dd0e7..3834ffc72 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -27,7 +27,7 @@ you will need to specify the `output` option. detected packer defaults to `.tar.gz` behavior but will not change the filename. - You can use `{{.BuildName}}` and ``{{.BuilderType}}` in your output path. + You can use `{{.BuildName}}` and `{{.BuilderType}}` in your output path. If you are executing multiple builders in parallel you should make sure `output` is unique for each one. For example `packer_{{.BuildName}}.zip`. From e7ab9fb3c0ad801e2e9bd38f8230ba543f8c4554 Mon Sep 17 00:00:00 2001 From: Bryce Fisher-Fleig Date: Wed, 5 Aug 2015 15:28:20 -0700 Subject: [PATCH 727/956] Add missing option --- website/source/docs/builders/virtualbox-iso.html.markdown | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index 7df4975dc..61e5d3e16 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -72,6 +72,9 @@ builder. - `ssh_username` (string) - The username to use to SSH into the machine once the OS is installed. +- `ssh_password` (string) - The password to use to SSH into the machine once + the OS is installed. + ### Optional: - `boot_command` (array of strings) - This is an array of commands to type From abb67fdd7964385c23c9a57349bc07158f25798d Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 5 Aug 2015 19:41:29 -0700 Subject: [PATCH 728/956] Fix govet issues --- builder/amazon/common/artifact.go | 2 +- builder/amazon/common/state.go | 2 -- builder/openstack/server.go | 2 -- packer/rpc/server.go | 8 ++++---- provisioner/windows-restart/provisioner.go | 1 - 5 files changed, 5 insertions(+), 10 deletions(-) diff --git a/builder/amazon/common/artifact.go b/builder/amazon/common/artifact.go index aba2ffde4..4082b2abc 100644 --- a/builder/amazon/common/artifact.go +++ b/builder/amazon/common/artifact.go @@ -88,7 +88,7 @@ func (a *Artifact) Destroy() error { if len(errors) == 1 { return errors[0] } else { - return &packer.MultiError{errors} + return &packer.MultiError{Errors: errors} } } diff --git a/builder/amazon/common/state.go b/builder/amazon/common/state.go index 075ce8ef7..3b40a48d1 100644 --- a/builder/amazon/common/state.go +++ b/builder/amazon/common/state.go @@ -181,8 +181,6 @@ func WaitForState(conf *StateChangeConf) (i interface{}, err error) { time.Sleep(time.Duration(sleepSeconds) * time.Second) } - - return } func isTransientNetworkError(err error) bool { diff --git a/builder/openstack/server.go b/builder/openstack/server.go index 482657c03..0897821a8 100644 --- a/builder/openstack/server.go +++ b/builder/openstack/server.go @@ -92,6 +92,4 @@ func WaitForState(conf *StateChangeConf) (i interface{}, err error) { log.Printf("Waiting for state to become: %s currently %s (%d%%)", conf.Target, currentState, currentProgress) time.Sleep(2 * time.Second) } - - return } diff --git a/packer/rpc/server.go b/packer/rpc/server.go index b6d17dacf..ceb77a8d3 100644 --- a/packer/rpc/server.go +++ b/packer/rpc/server.go @@ -1,13 +1,13 @@ package rpc import ( - "fmt" - "github.com/hashicorp/go-msgpack/codec" - "github.com/mitchellh/packer/packer" "io" "log" "net/rpc" "sync/atomic" + + "github.com/hashicorp/go-msgpack/codec" + "github.com/mitchellh/packer/packer" ) var endpointId uint64 @@ -149,7 +149,7 @@ func (s *Server) Serve() { func registerComponent(server *rpc.Server, name string, rcvr interface{}, id bool) string { endpoint := name if id { - fmt.Sprintf("%s.%d", endpoint, atomic.AddUint64(&endpointId, 1)) + log.Printf("%s.%d", endpoint, atomic.AddUint64(&endpointId, 1)) } server.RegisterName(endpoint, rcvr) diff --git a/provisioner/windows-restart/provisioner.go b/provisioner/windows-restart/provisioner.go index 4b6af609e..2e4b7c371 100644 --- a/provisioner/windows-restart/provisioner.go +++ b/provisioner/windows-restart/provisioner.go @@ -134,7 +134,6 @@ WaitLoop: case <-p.cancel: close(waitDone) return fmt.Errorf("Interrupt detected, quitting waiting for machine to restart") - break WaitLoop } } From 94c12c9afcedc128f0aa6eacd25575f684fad2c2 Mon Sep 17 00:00:00 2001 From: Cameron Stokes Date: Thu, 6 Aug 2015 09:27:38 -0700 Subject: [PATCH 729/956] Fix 'ephemeral' misspelling. --- website/source/docs/other/debugging.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/other/debugging.html.markdown b/website/source/docs/other/debugging.html.markdown index 8c8012bc8..efe01a0cf 100644 --- a/website/source/docs/other/debugging.html.markdown +++ b/website/source/docs/other/debugging.html.markdown @@ -20,9 +20,9 @@ usually will stop between each step, waiting for keyboard input before continuing. This will allow you to inspect state and so on. In debug mode once the remote instance is instantiated, Packer will emit to the -current directory an emphemeral private ssh key as a .pem file. Using that you +current directory an ephemeral private ssh key as a .pem file. Using that you can `ssh -i ` into the remote build instance and see what is going on -for debugging. The emphemeral key will be deleted at the end of the packer run +for debugging. The ephemeral key will be deleted at the end of the packer run during cleanup. ### Windows From f40ccd55adaa78897fb9daa8e9463c6fac37815a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 6 Aug 2015 11:02:57 -0700 Subject: [PATCH 730/956] Added debug output to the makefile so I can see which commit travis is building --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index 8ff6560a5..0ed426520 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,7 @@ generate: go generate ./... test: + @echo "Running tests on:"; git symbolic-ref HEAD; git rev-parse HEAD go test $(TEST) $(TESTARGS) -timeout=10s @$(MAKE) vet @@ -29,6 +30,7 @@ testrace: go test -race $(TEST) $(TESTARGS) updatedeps: + @echo "Updating deps on:"; git symbolic-ref HEAD; git rev-parse HEAD go get -u github.com/mitchellh/gox go get -u golang.org/x/tools/cmd/stringer go list ./... \ @@ -37,8 +39,10 @@ updatedeps: | grep -v '/internal/' \ | sort -u \ | xargs go get -f -u -v + @echo "Finished updating deps, now on:"; git symbolic-ref HEAD; git rev-parse HEAD vet: + @echo "Running go vet on:"; git symbolic-ref HEAD; git rev-parse HEAD @go vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ go get golang.org/x/tools/cmd/vet; \ fi From af2fa705bf441699cc12accc25ef3801afc55cd9 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 6 Aug 2015 12:24:13 -0700 Subject: [PATCH 731/956] Added go vet and git rev-parse head to appveyor so we can see what we're actually building / testing --- appveyor.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/appveyor.yml b/appveyor.yml index 202456f58..c5d317da6 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -31,6 +31,8 @@ install: build_script: - go test -v ./... + - go vet ./... + - git rev-parse HEAD test: off From 211817c78e4b450b872d3095a778d580a48e5baa Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 6 Aug 2015 16:52:30 -0700 Subject: [PATCH 732/956] Fix formatting for code block in docs --- .../docs/builders/virtualbox-ovf.html.markdown | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/website/source/docs/builders/virtualbox-ovf.html.markdown b/website/source/docs/builders/virtualbox-ovf.html.markdown index b9b2de033..0800b14bc 100644 --- a/website/source/docs/builders/virtualbox-ovf.html.markdown +++ b/website/source/docs/builders/virtualbox-ovf.html.markdown @@ -19,13 +19,11 @@ image). When exporting from VirtualBox make sure to choose OVF Version 2, since Version 1 is not compatible and will generate errors like this: -==> virtualbox-ovf: Progress state: VBOX\_E\_FILE\_ERROR ==> -virtualbox-ovf: VBoxManage: error: Appliance read failed ==> virtualbox-ovf: -VBoxManage: error: Error reading "source.ova": element "Section" has no "type" -attribute, line 21 ==> virtualbox-ovf: VBoxManage: error: Details: code -VBOX\_E\_FILE\_ERROR (0x80bb0004), component Appliance, interface IAppliance -==> virtualbox-ovf: VBoxManage: error: Context: "int -handleImportAppliance(HandlerArg\*)" at line 304 of file VBoxManageAppliance.cpp + ==> virtualbox-ovf: Progress state: VBOX_E_FILE_ERROR + ==> virtualbox-ovf: VBoxManage: error: Appliance read failed + ==> virtualbox-ovf: VBoxManage: error: Error reading "source.ova": element "Section" has no "type" attribute, line 21 + ==> virtualbox-ovf: VBoxManage: error: Details: code VBOX_E_FILE_ERROR (0x80bb0004), component Appliance, interface IAppliance + ==> virtualbox-ovf: VBoxManage: error: Context: "int handleImportAppliance(HandlerArg*)" at line 304 of file VBoxManageAppliance.cpp The builder builds a virtual machine by importing an existing OVF or OVA file. It then boots this image, runs provisioners on this new VM, and exports that VM From 6dd0a21c89ff936ff565a2d1e8cee972533ab489 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Sun, 26 Jul 2015 16:22:46 -0700 Subject: [PATCH 733/956] Added an artifice post-processor which allows you to override artifacts in a post-processor chain --- plugin/post-processor-artifice/main.go | 15 +++++ post-processor/artifice/artifact.go | 56 +++++++++++++++++ post-processor/artifice/post-processor.go | 60 +++++++++++++++++++ .../artifice/post-processor_test.go | 1 + 4 files changed, 132 insertions(+) create mode 100644 plugin/post-processor-artifice/main.go create mode 100644 post-processor/artifice/artifact.go create mode 100644 post-processor/artifice/post-processor.go create mode 100644 post-processor/artifice/post-processor_test.go diff --git a/plugin/post-processor-artifice/main.go b/plugin/post-processor-artifice/main.go new file mode 100644 index 000000000..c503e1572 --- /dev/null +++ b/plugin/post-processor-artifice/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "github.com/mitchellh/packer/packer/plugin" + "github.com/mitchellh/packer/post-processor/artifice" +) + +func main() { + server, err := plugin.Server() + if err != nil { + panic(err) + } + server.RegisterPostProcessor(new(artifice.PostProcessor)) + server.Serve() +} diff --git a/post-processor/artifice/artifact.go b/post-processor/artifice/artifact.go new file mode 100644 index 000000000..cb344b8e2 --- /dev/null +++ b/post-processor/artifice/artifact.go @@ -0,0 +1,56 @@ +package artifice + +import ( + "fmt" + "os" + "strings" +) + +const BuilderId = "packer.post-processor.artifice" + +type Artifact struct { + files []string +} + +func NewArtifact(files []string) (*Artifact, error) { + for _, f := range files { + if _, err := os.Stat(f); err != nil { + return nil, err + } + } + artifact := &Artifact{ + files: files, + } + return artifact, nil +} + +func (a *Artifact) BuilderId() string { + return BuilderId +} + +func (a *Artifact) Files() []string { + return a.files +} + +func (a *Artifact) Id() string { + return "" +} + +func (a *Artifact) String() string { + files := strings.Join(a.files, ", ") + return fmt.Sprintf("Created artifact from files: %s", files) +} + +func (a *Artifact) State(name string) interface{} { + return nil +} + +func (a *Artifact) Destroy() error { + for _, f := range a.files { + err := os.RemoveAll(f) + if err != nil { + return err + } + } + return nil +} diff --git a/post-processor/artifice/post-processor.go b/post-processor/artifice/post-processor.go new file mode 100644 index 000000000..ff33184de --- /dev/null +++ b/post-processor/artifice/post-processor.go @@ -0,0 +1,60 @@ +package artifice + +import ( + "fmt" + "strings" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" +) + +// The artifact-override post-processor allows you to specify arbitrary files as +// artifacts. These will override any other artifacts created by the builder. +// This allows you to use a builder and provisioner to create some file, such as +// a compiled binary or tarball, extract it from the builder (VM or container) +// and then save that binary or tarball and throw away the builder. + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + + Files []string `mapstructure:"files"` + Keep bool `mapstructure:"keep_input_artifact"` + + ctx interpolate.Context +} + +type PostProcessor struct { + config Config +} + +func (p *PostProcessor) Configure(raws ...interface{}) error { + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateContext: &p.config.ctx, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) + if err != nil { + return err + } + + if len(p.config.Files) == 0 { + return fmt.Errorf("No files specified in artifice configuration") + } + + return nil +} + +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { + if len(artifact.Files()) > 0 { + ui.Say(fmt.Sprintf("Discarding artifact files: %s", strings.Join(artifact.Files(), ", "))) + } + + artifact, err := NewArtifact(p.config.Files) + ui.Say(fmt.Sprintf("Using these artifact files: %s", strings.Join(artifact.Files(), ", "))) + + return artifact, true, err +} diff --git a/post-processor/artifice/post-processor_test.go b/post-processor/artifice/post-processor_test.go new file mode 100644 index 000000000..7e087e3e8 --- /dev/null +++ b/post-processor/artifice/post-processor_test.go @@ -0,0 +1 @@ +package artifice From 16d7e7542ae8da34f10b5ffb9870d8465e84884e Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 7 Aug 2015 20:10:17 -0700 Subject: [PATCH 734/956] Added docs for artifice --- .../post-processors/artifice.html.markdown | 147 ++++++++++++++++++ website/source/layouts/docs.erb | 1 + 2 files changed, 148 insertions(+) create mode 100644 website/source/docs/post-processors/artifice.html.markdown diff --git a/website/source/docs/post-processors/artifice.html.markdown b/website/source/docs/post-processors/artifice.html.markdown new file mode 100644 index 000000000..28255e836 --- /dev/null +++ b/website/source/docs/post-processors/artifice.html.markdown @@ -0,0 +1,147 @@ +--- +description: | + The Atlas post-processor for Packer receives an artifact from a Packer build and + uploads it to Atlas. Atlas hosts and serves artifacts, allowing you to version + and distribute them in a simple way. +layout: docs +page_title: 'Atlas Post-Processor' +... + +# Artifice Post-Processor + +\~> This is a beta feature, and may change significantly before it is +finalized. Please open a [GitHub issue to provide +feedback](https://github.com/mitchellh/packer/issues). + +Type: `artifice` + +The artifice post-processor overrides the artifact list from an upstream builder +or post-processor. All downstream post-processors will see the new artifacts you +specify. The primary use-case is to build artifacts inside a packer builder -- +for example, spinning up an EC2 instance to build a docker container -- and then +extracting the docker container and throwing away the EC2 instance. + +After overriding the artifact with artifice, you can use it with other +post-processors like +[compress](https://packer.io/docs/post-processors/compress.html), +[docker-push](https://packer.io/docs/post-processors/docker-push.html), +[Atlas](https://packer.io/docs/post-processors/atlas.html), or a third-party +post-processor. + +Artifice allows you to use the familiar packer workflow to create a fresh, +stateless build environment for each build on the infrastructure of your +choosing. You can use this to build just about anything: buildpacks, containers, +jars, binaries, tarballs, msi installers, and more. + +## Workflow + +Artifice helps you tie together a few other packer features: + +- A builder, which spins up a VM (or container) to build your artifact +- A provisioner, which performs the steps to create your artifact +- A file provisioner, which downloads the artifact from the VM +- The artifice post-processor, which identifies which files have been + downloaded from the VM +- Additional post-processors, which push the artifact to Atlas, Docker + hub, etc. + +You will want to perform as much work as possible inside the VM. Ideally +the only other post-processor you need after artifice is one that uploads your +artifact to the appropriate repository. + +## Configuration + +The configuration allows you to specify which files comprise your artifact. + +### Required: + +- `files` (array of strings) - A list of files that comprise your artifact. + These files must exist on your local disk after the provisioning phase of + packer is complete. These will replace any of the builder's original + artifacts (such as a VM snapshot). + +### Example Configuration + +This minimal example: + +1. Spins up a cloned VMware virtual machine +2. Installs a [consul](https://consul.io/) release +3. Downloads the consul binary +4. Packages it into a `.tar.gz` file +5. Uploads it to Atlas. + +VMX is a fast way to build and test locally, but you can easily substitute another builder. + +``` {.javascript} +{ + "builders": [ + { + "type": "vmware-vmx", + "source_path": "/opt/ubuntu-1404-vmware.vmx", + "ssh_username": "vagrant", + "ssh_password": "vagrant", + "shutdown_command": "sudo shutdown -h now", + "headless":"true", + "skip_compaction":"true" + } + ], + "provisioners": [ + { + "type": "shell", + "inline": [ + "sudo apt-get install -y python-pip", + "sudo pip install ifs", + "sudo ifs install consul --version=0.5.2" + ] + }, + { + "type": "file", + "source": "/usr/local/bin/consul", + "destination": "consul", + "direction": "download" + } + ], + "post-processors": [ + [ + { + "type": "artifice", + "files": ["consul"] + }, + { + "type": "compress", + "output": "consul-0.5.2.tar.gz" + }, + { + "type":"atlas", + "artifact": "hashicorp/consul", + "artifact_type": "archive" + } + ] + ] +} +``` + +**Notice that there are two sets of square brackets in the post-processor +section.** This creates a post-processor chain, where the output of the +proceeding artifact is passed to subsequent post-processors. If you use only one +set of square braces the post-processors will run individually against the build +artifact (the vmx file in this case) and it will not have the desired result. + + "post-processors": [ + [ <--- Start post-processor chain + { + "type": "artifice", + "files": ["consul"] + }, + { + "type": "atlas", + ... + } + ], <--- End post-processor chain + { + "type":"compress" <-- Standalone post-processor + } + ] + +You can create multiple post-processor chains to handle multiple builders (for example, +building linux and windows binaries during the same build). diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 2b8bb8810..0bba9799c 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -69,6 +69,7 @@
  • Post-Processors

  • +
  • Artifice
  • Atlas
  • compress
  • docker-import
  • From 8484c2e2a05c5a9a22c61d0cdb0df09f612b5691 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Sat, 8 Aug 2015 00:51:01 -0700 Subject: [PATCH 735/956] Prepare 0.8.3 --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 84958092f..59a306403 100644 --- a/version.go +++ b/version.go @@ -9,4 +9,4 @@ const Version = "0.8.3" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" From 441695446115309b8a09dc80c94d8ded45d38a9a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 10:25:19 -0700 Subject: [PATCH 736/956] Corrected the meta text on the artifice page. --- .../post-processors/artifice.html.markdown | 29 ++++++++++--------- .../post-processors/compress.html.markdown | 4 +-- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/website/source/docs/post-processors/artifice.html.markdown b/website/source/docs/post-processors/artifice.html.markdown index 28255e836..2ee9abc85 100644 --- a/website/source/docs/post-processors/artifice.html.markdown +++ b/website/source/docs/post-processors/artifice.html.markdown @@ -1,8 +1,10 @@ --- description: | - The Atlas post-processor for Packer receives an artifact from a Packer build and - uploads it to Atlas. Atlas hosts and serves artifacts, allowing you to version - and distribute them in a simple way. + The artifice post-processor overrides the artifact list from an upstream builder + or post-processor. All downstream post-processors will see the new artifacts you + specify. The primary use-case is to build artifacts inside a packer builder -- + for example, spinning up an EC2 instance to build a docker container -- and then + extracting the docker container and throwing away the EC2 instance. layout: docs page_title: 'Atlas Post-Processor' ... @@ -45,8 +47,8 @@ Artifice helps you tie together a few other packer features: - Additional post-processors, which push the artifact to Atlas, Docker hub, etc. -You will want to perform as much work as possible inside the VM. Ideally -the only other post-processor you need after artifice is one that uploads your +You will want to perform as much work as possible inside the VM. Ideally the +only other post-processor you need after artifice is one that uploads your artifact to the appropriate repository. ## Configuration @@ -64,13 +66,14 @@ The configuration allows you to specify which files comprise your artifact. This minimal example: -1. Spins up a cloned VMware virtual machine -2. Installs a [consul](https://consul.io/) release -3. Downloads the consul binary -4. Packages it into a `.tar.gz` file -5. Uploads it to Atlas. +1. Spins up a cloned VMware virtual machine +2. Installs a [consul](https://consul.io/) release +3. Downloads the consul binary +4. Packages it into a `.tar.gz` file +5. Uploads it to Atlas. -VMX is a fast way to build and test locally, but you can easily substitute another builder. +VMX is a fast way to build and test locally, but you can easily substitute +another builder. ``` {.javascript} { @@ -143,5 +146,5 @@ artifact (the vmx file in this case) and it will not have the desired result. } ] -You can create multiple post-processor chains to handle multiple builders (for example, -building linux and windows binaries during the same build). +You can create multiple post-processor chains to handle multiple builders (for +example, building linux and windows binaries during the same build). diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index 3834ffc72..373230d44 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -27,8 +27,8 @@ you will need to specify the `output` option. detected packer defaults to `.tar.gz` behavior but will not change the filename. - You can use `{{.BuildName}}` and `{{.BuilderType}}` in your output path. - If you are executing multiple builders in parallel you should make sure + You can use `{{.BuildName}}` and `{{.BuilderType}}` in your output path. If + you are executing multiple builders in parallel you should make sure `output` is unique for each one. For example `packer_{{.BuildName}}.zip`. - `compression_level` (integer) - Specify the compression level, for From 3a6cac97dd99aecbda7a641b71d1066aa40eb756 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 12:45:20 -0700 Subject: [PATCH 737/956] Added v0.8.3 changelog --- CHANGELOG.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 172589cd4..55514c156 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,32 @@ +## 0.8.3 (Aug 8, 2015) + +FEATURES: + + * **[Beta] Artifice post-processor:** Override packer artifacts during post- + processing. This allows you to **extract artifacts from a packer builder** + and use them with other post-processors like compress, docker, and Atlas. + +IMPROVEMENTS: + + * Many docs have been updated and corrected; big thanks to our contributors! + * builder/openstack: Add debug logging for IP addresses used for SSH [GH-2513] + * builder/openstack: Add option to use existing SSH keypair [GH-2512] + * builder/openstack: Add support for Glance metadata [GH-2434] + * builder/qemu and builder/vmware: Packer's VNC connection no longer asks for + an exclusive connection [GH-2522] + * provisioner/salt-masterless: Can now customize salt remote directories [GH-2519] + +BUG FIXES: + + * builder/openstack: track new IP address discovered during RackConnect [GH-2514] + * post-processor/atlas: atlas_url configuration option works now [GH-2478] + * post-processor/compress: Now supports interpolation in output config [GH-2414] + * provisioner/powershell: Elevated runs now receive environment variables [GH-2378] + * provisioner/salt-masterless: Clarify error messages when we can't create or + write to the temp directory [GH-2518] + * provisioner/salt-masterless: Copy state even if /srv/salt exists already [GH-1699] + * provisioner/salt-masterless: Make sure /etc/salt exists before writing to it [GH-2520] + ## 0.8.2 (July 17, 2015) IMPROVEMENTS: From f1eb95dbe04f9c6f3b7fe28d08a45211c8d6e17f Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 13:30:31 -0700 Subject: [PATCH 738/956] Remove extra emphasis --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 55514c156..1b04e6722 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,8 +2,8 @@ FEATURES: - * **[Beta] Artifice post-processor:** Override packer artifacts during post- - processing. This allows you to **extract artifacts from a packer builder** + * **[Beta]** Artifice post-processor: Override packer artifacts during post- + processing. This allows you to extract artifacts from a packer builder and use them with other post-processors like compress, docker, and Atlas. IMPROVEMENTS: From 4cc443da8ecc3a20801f4b8cdbe19e8302f66495 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 13:59:56 -0700 Subject: [PATCH 739/956] Update use of ec2rolecreds to match upstream --- builder/amazon/common/access_config.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/builder/amazon/common/access_config.go b/builder/amazon/common/access_config.go index 88bda0423..dccde08d4 100644 --- a/builder/amazon/common/access_config.go +++ b/builder/amazon/common/access_config.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" "github.com/mitchellh/packer/template/interpolate" ) @@ -31,7 +32,7 @@ func (c *AccessConfig) Config() (*aws.Config, error) { }}, &credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - &credentials.EC2RoleProvider{}, + &ec2rolecreds.EC2RoleProvider{}, }) region, err := c.Region() From fce6f86328e79dcb9908a49fb4ab59532d5a2312 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 14:19:20 -0700 Subject: [PATCH 740/956] Updated changelog with some missing changes --- CHANGELOG.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b04e6722..226c23f22 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ ## 0.8.3 (Aug 8, 2015) +BACKWARDS INCOMPATIBILITIES: + + * VMware VMX options are no longer lowercased internally. This is to support + the virtualSSD option which is case-sensitive. See [GH-2309] for details. + FEATURES: * **[Beta]** Artifice post-processor: Override packer artifacts during post- @@ -14,11 +19,17 @@ IMPROVEMENTS: * builder/openstack: Add support for Glance metadata [GH-2434] * builder/qemu and builder/vmware: Packer's VNC connection no longer asks for an exclusive connection [GH-2522] + * builder/vmware: Add support for virtualSSD option [GH-2309] * provisioner/salt-masterless: Can now customize salt remote directories [GH-2519] BUG FIXES: - * builder/openstack: track new IP address discovered during RackConnect [GH-2514] + * builder/amazon: Improve instance cleanup by storing id sooner [GH-2404] + * builder/amazon: Only fetch windows password when using WinRM communicator [GH-2538] + * builder/openstack: Support IPv6 SSH address [GH-2450] + * builder/openstack: Track new IP address discovered during RackConnect [GH-2514] + * builder/qemu: Add 100ms delay between VNC key events. [GH-2415] + * builder/vmware: Don't force lowercase all VMX options [GH-2309] * post-processor/atlas: atlas_url configuration option works now [GH-2478] * post-processor/compress: Now supports interpolation in output config [GH-2414] * provisioner/powershell: Elevated runs now receive environment variables [GH-2378] @@ -26,6 +37,8 @@ BUG FIXES: write to the temp directory [GH-2518] * provisioner/salt-masterless: Copy state even if /srv/salt exists already [GH-1699] * provisioner/salt-masterless: Make sure /etc/salt exists before writing to it [GH-2520] + * provisioner/winrm: Connect to the correct port when using NAT with + VirtualBox / VMware [GH-2399] ## 0.8.2 (July 17, 2015) From 313fcaf0ff85bc759bac5cc19c77307d53122e9e Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 14:52:34 -0700 Subject: [PATCH 741/956] Revert backwards-compatibility break in VMX option casing PR #2309 introduced case-sensitive options in VMX files. This is to support a case-sensitive option called `virtualSSD`. The change made all options case-sensitive, which causes problems with external VMX options provided in user templates. To prevent breakage, this change is being reverted. - Fixes #2574 - Reverts #2542 - Reverts #2309 --- builder/vmware/common/ssh.go | 2 +- builder/vmware/common/step_clean_vmx.go | 9 +++++---- builder/vmware/common/step_clean_vmx_test.go | 18 +++++++++--------- builder/vmware/common/step_configure_vmx.go | 2 ++ builder/vmware/common/vmx.go | 20 ++++---------------- builder/vmware/vmx/step_clone_vmx.go | 12 ++++++------ 6 files changed, 27 insertions(+), 36 deletions(-) diff --git a/builder/vmware/common/ssh.go b/builder/vmware/common/ssh.go index 9db075a71..86e184bb5 100644 --- a/builder/vmware/common/ssh.go +++ b/builder/vmware/common/ssh.go @@ -39,7 +39,7 @@ func CommHost(config *SSHConfig) func(multistep.StateBag) (string, error) { var ok bool macAddress := "" if macAddress, ok = vmxData["ethernet0.address"]; !ok || macAddress == "" { - if macAddress, ok = vmxData["ethernet0.generatedAddress"]; !ok || macAddress == "" { + if macAddress, ok = vmxData["ethernet0.generatedaddress"]; !ok || macAddress == "" { return "", errors.New("couldn't find MAC address in VMX") } } diff --git a/builder/vmware/common/step_clean_vmx.go b/builder/vmware/common/step_clean_vmx.go index 44bf4c407..e9bc51987 100755 --- a/builder/vmware/common/step_clean_vmx.go +++ b/builder/vmware/common/step_clean_vmx.go @@ -2,11 +2,12 @@ package common import ( "fmt" - "github.com/mitchellh/multistep" - "github.com/mitchellh/packer/packer" "log" "regexp" "strings" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" ) // This step cleans up the VMX by removing or changing this prior to @@ -51,8 +52,8 @@ func (s StepCleanVMX) Run(state multistep.StateBag) multistep.StepAction { ui.Message("Detaching ISO from CD-ROM device...") - vmxData[ide+"deviceType"] = "cdrom-raw" - vmxData[ide+"fileName"] = "auto detect" + vmxData[ide+"devicetype"] = "cdrom-raw" + vmxData[ide+"filename"] = "auto detect" } ui.Message("Disabling VNC server...") diff --git a/builder/vmware/common/step_clean_vmx_test.go b/builder/vmware/common/step_clean_vmx_test.go index 3ca6a7e23..ea30fb54a 100755 --- a/builder/vmware/common/step_clean_vmx_test.go +++ b/builder/vmware/common/step_clean_vmx_test.go @@ -61,8 +61,8 @@ func TestStepCleanVMX_floppyPath(t *testing.T) { Value string }{ {"floppy0.present", "FALSE"}, - {"floppy0.fileType", ""}, - {"floppy0.fileName", ""}, + {"floppy0.filetype", ""}, + {"floppy0.filename", ""}, } for _, tc := range cases { @@ -109,9 +109,9 @@ func TestStepCleanVMX_isoPath(t *testing.T) { Key string Value string }{ - {"ide0:0.fileName", "auto detect"}, - {"ide0:0.deviceType", "cdrom-raw"}, - {"ide0:1.fileName", "bar"}, + {"ide0:0.filename", "auto detect"}, + {"ide0:0.devicetype", "cdrom-raw"}, + {"ide0:1.filename", "bar"}, {"foo", "bar"}, } @@ -130,12 +130,12 @@ func TestStepCleanVMX_isoPath(t *testing.T) { const testVMXFloppyPath = ` floppy0.present = "TRUE" -floppy0.fileType = "file" +floppy0.filetype = "file" ` const testVMXISOPath = ` -ide0:0.deviceType = "cdrom-image" -ide0:0.fileName = "foo" -ide0:1.fileName = "bar" +ide0:0.devicetype = "cdrom-image" +ide0:0.filename = "foo" +ide0:1.filename = "bar" foo = "bar" ` diff --git a/builder/vmware/common/step_configure_vmx.go b/builder/vmware/common/step_configure_vmx.go index 14c68e76a..401d53055 100755 --- a/builder/vmware/common/step_configure_vmx.go +++ b/builder/vmware/common/step_configure_vmx.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "log" "regexp" + "strings" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" @@ -52,6 +53,7 @@ func (s *StepConfigureVMX) Run(state multistep.StateBag) multistep.StepAction { // Set custom data for k, v := range s.CustomData { log.Printf("Setting VMX: '%s' = '%s'", k, v) + k = strings.ToLower(k) vmxData[k] = v } diff --git a/builder/vmware/common/vmx.go b/builder/vmware/common/vmx.go index ab0291807..e7cdb662f 100755 --- a/builder/vmware/common/vmx.go +++ b/builder/vmware/common/vmx.go @@ -17,7 +17,7 @@ import ( func ParseVMX(contents string) map[string]string { results := make(map[string]string) - lineRe := regexp.MustCompile(`^(.+?)\s*=\s*"?(.*?)"?\s*$`) + lineRe := regexp.MustCompile(`^(.+?)\s*=\s*"(.*?)"\s*$`) for _, line := range strings.Split(contents, "\n") { matches := lineRe.FindStringSubmatch(line) @@ -25,7 +25,8 @@ func ParseVMX(contents string) map[string]string { continue } - results[matches[1]] = matches[2] + key := strings.ToLower(matches[1]) + results[key] = matches[2] } return results @@ -42,22 +43,9 @@ func EncodeVMX(contents map[string]string) string { i++ } - // a list of VMX key fragments that should not be wrapped in quotes, - // fragments because multiple disks can use the virtualSSD suffix - noQuotes := []string { - "virtualSSD", - } - sort.Strings(keys) for _, k := range keys { - pat := "%s = \"%s\"\n" - for _, q := range noQuotes { - if strings.Contains(k, q) { - pat = "%s = %s\n" - break; - } - } - buf.WriteString(fmt.Sprintf(pat, k, contents[k])) + buf.WriteString(fmt.Sprintf("%s = \"%s\"\n", k, contents[k])) } return buf.String() diff --git a/builder/vmware/vmx/step_clone_vmx.go b/builder/vmware/vmx/step_clone_vmx.go index 1dbae678a..a020e1627 100755 --- a/builder/vmware/vmx/step_clone_vmx.go +++ b/builder/vmware/vmx/step_clone_vmx.go @@ -38,14 +38,14 @@ func (s *StepCloneVMX) Run(state multistep.StateBag) multistep.StepAction { } var diskName string - if _, ok := vmxData["scsi0:0.fileName"]; ok { - diskName = vmxData["scsi0:0.fileName"] + if _, ok := vmxData["scsi0:0.filename"]; ok { + diskName = vmxData["scsi0:0.filename"] } - if _, ok := vmxData["sata0:0.fileName"]; ok { - diskName = vmxData["sata0:0.fileName"] + if _, ok := vmxData["sata0:0.filename"]; ok { + diskName = vmxData["sata0:0.filename"] } - if _, ok := vmxData["ide0:0.fileName"]; ok { - diskName = vmxData["ide0:0.fileName"] + if _, ok := vmxData["ide0:0.filename"]; ok { + diskName = vmxData["ide0:0.filename"] } if diskName == "" { err := fmt.Errorf("Root disk filename could not be found!") From 27e7a02e6281f2c618a0c6db734104e7261de552 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 18:04:47 -0700 Subject: [PATCH 742/956] Replace v0.8.3 changelog with v0.8.4. --- CHANGELOG.md | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 226c23f22..6b9ee3681 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,4 @@ -## 0.8.3 (Aug 8, 2015) - -BACKWARDS INCOMPATIBILITIES: - - * VMware VMX options are no longer lowercased internally. This is to support - the virtualSSD option which is case-sensitive. See [GH-2309] for details. +## 0.8.4 (Aug 10, 2015) FEATURES: @@ -19,7 +14,6 @@ IMPROVEMENTS: * builder/openstack: Add support for Glance metadata [GH-2434] * builder/qemu and builder/vmware: Packer's VNC connection no longer asks for an exclusive connection [GH-2522] - * builder/vmware: Add support for virtualSSD option [GH-2309] * provisioner/salt-masterless: Can now customize salt remote directories [GH-2519] BUG FIXES: @@ -29,7 +23,6 @@ BUG FIXES: * builder/openstack: Support IPv6 SSH address [GH-2450] * builder/openstack: Track new IP address discovered during RackConnect [GH-2514] * builder/qemu: Add 100ms delay between VNC key events. [GH-2415] - * builder/vmware: Don't force lowercase all VMX options [GH-2309] * post-processor/atlas: atlas_url configuration option works now [GH-2478] * post-processor/compress: Now supports interpolation in output config [GH-2414] * provisioner/powershell: Elevated runs now receive environment variables [GH-2378] From 60bbe850ef0b7fec19eba1929d83e7267ca1572b Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 10 Aug 2015 18:30:33 -0700 Subject: [PATCH 743/956] Bump version to v0.8.5 --- CHANGELOG.md | 4 +++- version.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b9ee3681..90b5bed8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.8.4 (Aug 10, 2015) +## 0.8.5 (Aug 10, 2015) FEATURES: @@ -33,6 +33,8 @@ BUG FIXES: * provisioner/winrm: Connect to the correct port when using NAT with VirtualBox / VMware [GH-2399] +Note: 0.8.3 was pulled and 0.8.4 was skipped. + ## 0.8.2 (July 17, 2015) IMPROVEMENTS: diff --git a/version.go b/version.go index 59a306403..1aaa4c8df 100644 --- a/version.go +++ b/version.go @@ -4,7 +4,7 @@ package main var GitCommit string // The main version number that is being run at the moment. -const Version = "0.8.3" +const Version = "0.8.5" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release From 5a6bcdeb7899c6518a60aba700308dbf7b92a587 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Wed, 12 Aug 2015 01:34:08 +0200 Subject: [PATCH 744/956] Fix interpolation of {{.Flavor}} in parallels_tools_guest_path. Fixes [GH-2543] --- .../step_upload_parallels_tools_test.go | 86 +++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 builder/parallels/common/step_upload_parallels_tools_test.go diff --git a/builder/parallels/common/step_upload_parallels_tools_test.go b/builder/parallels/common/step_upload_parallels_tools_test.go new file mode 100644 index 000000000..0599912a9 --- /dev/null +++ b/builder/parallels/common/step_upload_parallels_tools_test.go @@ -0,0 +1,86 @@ +package common + +import ( + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "testing" +) + +func TestStepUploadParallelsTools_impl(t *testing.T) { + var _ multistep.Step = new(StepUploadParallelsTools) +} + +func TestStepUploadParallelsTools(t *testing.T) { + state := testState(t) + state.Put("parallels_tools_path", "./step_upload_parallels_tools_test.go") + step := new(StepUploadParallelsTools) + step.ParallelsToolsMode = "upload" + step.ParallelsToolsGuestPath = "/tmp/prl-lin.iso" + step.ParallelsToolsFlavor = "lin" + + comm := new(packer.MockCommunicator) + state.Put("communicator", comm) + + // Test the run + if action := step.Run(state); action != multistep.ActionContinue { + t.Fatalf("bad action: %#v", action) + } + if _, ok := state.GetOk("error"); ok { + t.Fatal("should NOT have error") + } + + // Verify + if comm.UploadPath != "/tmp/prl-lin.iso" { + t.Fatalf("bad: %#v", comm.UploadPath) + } +} + +func TestStepUploadParallelsTools_interpolate(t *testing.T) { + state := testState(t) + state.Put("parallels_tools_path", "./step_upload_parallels_tools_test.go") + step := new(StepUploadParallelsTools) + step.ParallelsToolsMode = "upload" + step.ParallelsToolsGuestPath = "/tmp/prl-{{ .Flavor }}.iso" + step.ParallelsToolsFlavor = "win" + + comm := new(packer.MockCommunicator) + state.Put("communicator", comm) + + // Test the run + if action := step.Run(state); action != multistep.ActionContinue { + t.Fatalf("bad action: %#v", action) + } + if _, ok := state.GetOk("error"); ok { + t.Fatal("should NOT have error") + } + + // Verify + if comm.UploadPath != "/tmp/prl-win.iso" { + t.Fatalf("bad: %#v", comm.UploadPath) + } +} + +func TestStepUploadParallelsTools_attach(t *testing.T) { + state := testState(t) + state.Put("parallels_tools_path", "./step_upload_parallels_tools_test.go") + step := new(StepUploadParallelsTools) + step.ParallelsToolsMode = "attach" + step.ParallelsToolsGuestPath = "/tmp/prl-lin.iso" + step.ParallelsToolsFlavor = "lin" + + comm := new(packer.MockCommunicator) + state.Put("communicator", comm) + + // Test the run + if action := step.Run(state); action != multistep.ActionContinue { + t.Fatalf("bad action: %#v", action) + } + if _, ok := state.GetOk("error"); ok { + t.Fatal("should NOT have error") + } + + // Verify + if comm.UploadCalled { + t.Fatal("bad") + } +} From d9a0f059262d8199bcd5d4b36f460264be380485 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 11 Aug 2015 17:10:34 -0700 Subject: [PATCH 745/956] Bump version.go to reflect dev status --- version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version.go b/version.go index 1aaa4c8df..a32442840 100644 --- a/version.go +++ b/version.go @@ -4,9 +4,9 @@ package main var GitCommit string // The main version number that is being run at the moment. -const Version = "0.8.5" +const Version = "0.8.6" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" From dc3c55cf8e7366514018b4c065a40dc18917bd56 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 11 Aug 2015 22:22:52 -0700 Subject: [PATCH 746/956] Implemented downloader for the docker communicator so we can pull files out of a container --- builder/docker/communicator.go | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 4fcd9b658..31ccc2579 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -194,8 +194,36 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error return nil } +// Download pulls a file out of a container using `docker cp`. We have a source +// path and want to write to an io.Writer, not a file. We use - to make docker +// cp to write to stdout, and then copy the stream to our destination io.Writer. func (c *Communicator) Download(src string, dst io.Writer) error { - panic("not implemented") + + log.Printf("Downloading file from container: %s:%s", c.ContainerId, src) + localCmd := exec.Command("docker", "cp", fmt.Sprintf("%s:%s", c.ContainerId, src), "-") + + pipe, err := localCmd.StdoutPipe() + if err != nil { + return fmt.Errorf("Failed to open pipe: %s", err) + } + + err = localCmd.Start() + if err != nil { + return fmt.Errorf("Failed to start download: %s", err) + } + + numBytes, err := io.Copy(dst, pipe) + if err != nil { + return fmt.Errorf("Failed to pipe download: %s", err) + } else { + log.Printf("Copied %d bytes for %s", numBytes, src) + } + + if err = localCmd.Wait(); err != nil { + return fmt.Errorf("Failed to download '%s' from container: %s", src, err) + } + + return nil } // canExec tells us whether `docker exec` is supported From 047382eec9e0cc39e5dbdd9ecd46fb73c7943f91 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 11 Aug 2015 22:30:19 -0700 Subject: [PATCH 747/956] Style tweak --- builder/docker/communicator.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 31ccc2579..38126366c 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -207,17 +207,15 @@ func (c *Communicator) Download(src string, dst io.Writer) error { return fmt.Errorf("Failed to open pipe: %s", err) } - err = localCmd.Start() - if err != nil { + if err = localCmd.Start(); err != nil { return fmt.Errorf("Failed to start download: %s", err) } numBytes, err := io.Copy(dst, pipe) if err != nil { return fmt.Errorf("Failed to pipe download: %s", err) - } else { - log.Printf("Copied %d bytes for %s", numBytes, src) } + log.Printf("Copied %d bytes for %s", numBytes, src) if err = localCmd.Wait(); err != nil { return fmt.Errorf("Failed to download '%s' from container: %s", src, err) From da82ff3fd687e4b2c79652a09f382a4ecde5175d Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Wed, 12 Aug 2015 10:28:06 +0200 Subject: [PATCH 748/956] Fix interpolation of {{.Flavor}} in parallels_tools_guest_path. (2) Actually fix the error... Fixes [GH-2543] --- builder/parallels/iso/builder.go | 2 +- builder/parallels/pvm/config.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index 4a75b0b47..6b731544d 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -64,7 +64,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { Exclude: []string{ "boot_command", "prlctl", - "parallel_tools_guest_path", + "parallels_tools_guest_path", }, }, }, raws...) diff --git a/builder/parallels/pvm/config.go b/builder/parallels/pvm/config.go index f03584bf2..89c3ec1f9 100644 --- a/builder/parallels/pvm/config.go +++ b/builder/parallels/pvm/config.go @@ -41,7 +41,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { Exclude: []string{ "boot_command", "prlctl", - "parallel_tools_guest_path", + "parallels_tools_guest_path", }, }, }, raws...) From 3523ffdce14b6e27b640b959f405fd972ca8a22e Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 11:36:10 -0700 Subject: [PATCH 749/956] Farewell extra line. You were pretty but out of place. --- builder/docker/communicator.go | 1 - 1 file changed, 1 deletion(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 38126366c..8af54bdfe 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -198,7 +198,6 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error // path and want to write to an io.Writer, not a file. We use - to make docker // cp to write to stdout, and then copy the stream to our destination io.Writer. func (c *Communicator) Download(src string, dst io.Writer) error { - log.Printf("Downloading file from container: %s:%s", c.ContainerId, src) localCmd := exec.Command("docker", "cp", fmt.Sprintf("%s:%s", c.ContainerId, src), "-") From de9ecd2d62cc0cc38dc66293cd98e3599e6703f5 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 11:36:33 -0700 Subject: [PATCH 750/956] Add a test fixture file --- builder/docker/test-fixtures/cake | 1 + 1 file changed, 1 insertion(+) create mode 100644 builder/docker/test-fixtures/cake diff --git a/builder/docker/test-fixtures/cake b/builder/docker/test-fixtures/cake new file mode 100644 index 000000000..63d40b126 --- /dev/null +++ b/builder/docker/test-fixtures/cake @@ -0,0 +1 @@ +chocolate cake is delicious From 62c5e8358d4045e5ee1ba64956e3536a5952bb4d Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 11:36:56 -0700 Subject: [PATCH 751/956] Added a test for docker upload and download --- builder/docker/communicator_test.go | 117 +++++++++++++++++++++++++++- 1 file changed, 116 insertions(+), 1 deletion(-) diff --git a/builder/docker/communicator_test.go b/builder/docker/communicator_test.go index f75a89d96..221356723 100644 --- a/builder/docker/communicator_test.go +++ b/builder/docker/communicator_test.go @@ -1,10 +1,125 @@ package docker import ( - "github.com/mitchellh/packer/packer" + "crypto/sha256" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" "testing" + + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/provisioner/file" + "github.com/mitchellh/packer/template" ) func TestCommunicator_impl(t *testing.T) { var _ packer.Communicator = new(Communicator) } + +func TestUploadDownload(t *testing.T) { + ui := packer.TestUi(t) + cache := &packer.FileCache{CacheDir: os.TempDir()} + + tpl, err := template.Parse(strings.NewReader(dockerBuilderConfig)) + if err != nil { + t.Fatalf("Unable to parse config: %s", err) + } + + // Make sure we only run this on linux hosts + if os.Getenv("PACKER_ACC") == "" { + t.Skip("This test is only run with PACKER_ACC=1") + } + if runtime.GOOS != "linux" { + t.Skip("This test is only supported on linux") + } + cmd := exec.Command("docker", "-v") + cmd.Run() + if !cmd.ProcessState.Success() { + t.Error("docker command not found; please make sure docker is installed") + } + + // Setup the builder + builder := &Builder{} + warnings, err := builder.Prepare(tpl.Builders["docker"].Config) + if err != nil { + t.Fatalf("Error preparing configuration %s", err) + } + if len(warnings) > 0 { + t.Fatal("Encountered configuration warnings; aborting") + } + + // Setup the provisioners + upload := &file.Provisioner{} + err = upload.Prepare(tpl.Provisioners[0].Config) + if err != nil { + t.Fatalf("Error preparing upload: %s", err) + } + download := &file.Provisioner{} + err = download.Prepare(tpl.Provisioners[1].Config) + if err != nil { + t.Fatalf("Error preparing download: %s", err) + } + // Preemptive cleanup + defer os.Remove("delicious-cake") + + // Add hooks so the provisioners run during the build + hooks := map[string][]packer.Hook{} + hooks[packer.HookProvision] = []packer.Hook{ + &packer.ProvisionHook{ + Provisioners: []packer.Provisioner{ + upload, + download, + }, + }, + } + hook := &packer.DispatchHook{Mapping: hooks} + + // Run things + artifact, err := builder.Run(ui, hook, cache) + if err != nil { + t.Fatalf("Error running build %s", err) + } + // Preemptive cleanup + defer artifact.Destroy() + + // Verify that the thing we downloaded is the same thing we sent up. + inputFile, err := ioutil.ReadFile("test-fixtures/cake") + if err != nil { + t.Fatalf("Unable to read input file: %s", err) + } + outputFile, err := ioutil.ReadFile("delicious-cake") + if err != nil { + t.Fatalf("Unable to read output file: %s", err) + } + if sha256.Sum256(inputFile) != sha256.Sum256(outputFile) { + t.Fatalf("Input and output files do not match\nInput:\n%s\nOutput:\n%s\n", inputFile, outputFile) + } +} + +const dockerBuilderConfig = ` +{ + "builders": [ + { + "type": "docker", + "image": "alpine", + "export_path": "alpine.tar", + "run_command": ["-d", "-i", "-t", "{{.Image}}", "/bin/sh"] + } + ], + "provisioners": [ + { + "type": "file", + "source": "test-fixtures/cake", + "destination": "/chocolate-cake" + }, + { + "type": "file", + "source": "/chocolate-cake", + "destination": "delicious-cake", + "direction": "download" + } + ] +} +` From 8cdd07895217f11dc2b8ac34082fc7dc01ed733b Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 11:48:47 -0700 Subject: [PATCH 752/956] Changed fixtures so we can do a directory test too --- builder/docker/test-fixtures/cake | 1 - builder/docker/test-fixtures/manycakes/chocolate | 1 + builder/docker/test-fixtures/manycakes/vanilla | 1 + builder/docker/test-fixtures/onecakes/strawberry | 1 + 4 files changed, 3 insertions(+), 1 deletion(-) delete mode 100644 builder/docker/test-fixtures/cake create mode 100644 builder/docker/test-fixtures/manycakes/chocolate create mode 100644 builder/docker/test-fixtures/manycakes/vanilla create mode 100644 builder/docker/test-fixtures/onecakes/strawberry diff --git a/builder/docker/test-fixtures/cake b/builder/docker/test-fixtures/cake deleted file mode 100644 index 63d40b126..000000000 --- a/builder/docker/test-fixtures/cake +++ /dev/null @@ -1 +0,0 @@ -chocolate cake is delicious diff --git a/builder/docker/test-fixtures/manycakes/chocolate b/builder/docker/test-fixtures/manycakes/chocolate new file mode 100644 index 000000000..a2286c928 --- /dev/null +++ b/builder/docker/test-fixtures/manycakes/chocolate @@ -0,0 +1 @@ +chocolate! diff --git a/builder/docker/test-fixtures/manycakes/vanilla b/builder/docker/test-fixtures/manycakes/vanilla new file mode 100644 index 000000000..000a45578 --- /dev/null +++ b/builder/docker/test-fixtures/manycakes/vanilla @@ -0,0 +1 @@ +vanilla! diff --git a/builder/docker/test-fixtures/onecakes/strawberry b/builder/docker/test-fixtures/onecakes/strawberry new file mode 100644 index 000000000..b663de3a9 --- /dev/null +++ b/builder/docker/test-fixtures/onecakes/strawberry @@ -0,0 +1 @@ +strawberry! From 5ad4b0e97e186f5414045c1aa42b313fb3e6df65 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 12:16:26 -0700 Subject: [PATCH 753/956] Added tests and handle the tar format from docker cp - --- builder/docker/communicator.go | 12 +++++++++++- builder/docker/communicator_test.go | 22 +++++++++++++--------- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/builder/docker/communicator.go b/builder/docker/communicator.go index 8af54bdfe..fb88a4491 100644 --- a/builder/docker/communicator.go +++ b/builder/docker/communicator.go @@ -1,6 +1,7 @@ package docker import ( + "archive/tar" "bytes" "fmt" "io" @@ -210,7 +211,16 @@ func (c *Communicator) Download(src string, dst io.Writer) error { return fmt.Errorf("Failed to start download: %s", err) } - numBytes, err := io.Copy(dst, pipe) + // When you use - to send docker cp to stdout it is streamed as a tar; this + // enables it to work with directories. We don't actually support + // directories in Download() but we still need to handle the tar format. + archive := tar.NewReader(pipe) + _, err = archive.Next() + if err != nil { + return fmt.Errorf("Failed to read header from tar stream: %s", err) + } + + numBytes, err := io.Copy(dst, archive) if err != nil { return fmt.Errorf("Failed to pipe download: %s", err) } diff --git a/builder/docker/communicator_test.go b/builder/docker/communicator_test.go index 221356723..db0bfcfe8 100644 --- a/builder/docker/communicator_test.go +++ b/builder/docker/communicator_test.go @@ -61,8 +61,10 @@ func TestUploadDownload(t *testing.T) { if err != nil { t.Fatalf("Error preparing download: %s", err) } - // Preemptive cleanup - defer os.Remove("delicious-cake") + // Preemptive cleanup. Honestly I don't know why you would want to get rid + // of my strawberry cake. It's so tasty! Do you not like cake? Are you a + // cake-hater? Or are you keeping all the cake all for yourself? So selfish! + defer os.Remove("my-strawberry-cake") // Add hooks so the provisioners run during the build hooks := map[string][]packer.Hook{} @@ -85,16 +87,18 @@ func TestUploadDownload(t *testing.T) { defer artifact.Destroy() // Verify that the thing we downloaded is the same thing we sent up. - inputFile, err := ioutil.ReadFile("test-fixtures/cake") + // Complain loudly if it isn't. + inputFile, err := ioutil.ReadFile("test-fixtures/onecakes/strawberry") if err != nil { t.Fatalf("Unable to read input file: %s", err) } - outputFile, err := ioutil.ReadFile("delicious-cake") + outputFile, err := ioutil.ReadFile("my-strawberry-cake") if err != nil { t.Fatalf("Unable to read output file: %s", err) } if sha256.Sum256(inputFile) != sha256.Sum256(outputFile) { - t.Fatalf("Input and output files do not match\nInput:\n%s\nOutput:\n%s\n", inputFile, outputFile) + t.Fatalf("Input and output files do not match\n"+ + "Input:\n%s\nOutput:\n%s\n", inputFile, outputFile) } } @@ -111,13 +115,13 @@ const dockerBuilderConfig = ` "provisioners": [ { "type": "file", - "source": "test-fixtures/cake", - "destination": "/chocolate-cake" + "source": "test-fixtures/onecakes/strawberry", + "destination": "/strawberry-cake" }, { "type": "file", - "source": "/chocolate-cake", - "destination": "delicious-cake", + "source": "/strawberry-cake", + "destination": "my-strawberry-cake", "direction": "download" } ] From 9ee07f1e8dfae4a06bfaa176ceda61ef414c4a28 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 12:34:52 -0700 Subject: [PATCH 754/956] Add parallel gzip compression to the vagrant post-processor --- post-processor/vagrant/util.go | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/post-processor/vagrant/util.go b/post-processor/vagrant/util.go index a9e745fe6..6ae2c337f 100644 --- a/post-processor/vagrant/util.go +++ b/post-processor/vagrant/util.go @@ -3,14 +3,23 @@ package vagrant import ( "archive/tar" "compress/flate" - "compress/gzip" "encoding/json" "fmt" - "github.com/mitchellh/packer/packer" "io" "log" "os" "path/filepath" + "runtime" + + "github.com/klauspost/pgzip" + "github.com/mitchellh/packer/packer" +) + +var ( + // ErrInvalidCompressionLevel is returned when the compression level passed + // to gzip is not in the expected range. See compress/flate for details. + ErrInvalidCompressionLevel = fmt.Errorf( + "Invalid compression level. Expected an integer from -1 to 9.") ) // Copies a file by copying the contents of the file to another place. @@ -60,10 +69,10 @@ func DirToBox(dst, dir string, ui packer.Ui, level int) error { } defer dstF.Close() - var dstWriter io.Writer = dstF + var dstWriter io.WriteCloser = dstF if level != flate.NoCompression { log.Printf("Compressing with gzip compression level: %d", level) - gzipWriter, err := gzip.NewWriterLevel(dstWriter, level) + gzipWriter, err := makePgzipWriter(dstWriter, level) if err != nil { return err } @@ -143,3 +152,12 @@ func WriteMetadata(dir string, contents interface{}) error { return nil } + +func makePgzipWriter(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) { + gzipWriter, err := pgzip.NewWriterLevel(output, compressionLevel) + if err != nil { + return nil, ErrInvalidCompressionLevel + } + gzipWriter.SetConcurrency(500000, runtime.GOMAXPROCS(-1)) + return gzipWriter, nil +} From 641c8a2ea0f972d2ade2aaac5c3289b3c119ea09 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 12 Aug 2015 14:08:00 -0700 Subject: [PATCH 755/956] Added changelog entries for recently-merged features and fixes --- CHANGELOG.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 90b5bed8b..90529ef1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +## (Unreleased) + +IMPROVEMENTS: + + * builder/docker: Now supports Download so it can be used with the file + provisioner to download a file from a container. [GH-2585] + * post-processor/vagrant: Like the compress post-processor, vagrant now uses a + parallel gzip algorithm to compress vagrant boxes. [GH-2590] + +BUG FIXES: + + * builded/parallels: Fix interpolation in parallels_tools_guest_path [GH-2543] + ## 0.8.5 (Aug 10, 2015) FEATURES: From 70af28be47256c51beffcc3e37ee0a569b8b4b14 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 14 Aug 2015 17:34:04 -0700 Subject: [PATCH 756/956] Added cake fixture for testing file:/// downloads --- common/test-fixtures/fileurl/cake | 1 + 1 file changed, 1 insertion(+) create mode 100644 common/test-fixtures/fileurl/cake diff --git a/common/test-fixtures/fileurl/cake b/common/test-fixtures/fileurl/cake new file mode 100644 index 000000000..e800d1ffb --- /dev/null +++ b/common/test-fixtures/fileurl/cake @@ -0,0 +1 @@ +delicious chocolate cake From 424ee658669a465cd92b42ef030b245778facd93 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 14 Aug 2015 17:34:39 -0700 Subject: [PATCH 757/956] Added a log message when we use a local file instead of downloading one --- common/download.go | 1 + 1 file changed, 1 insertion(+) diff --git a/common/download.go b/common/download.go index 16c0724c3..184624ffe 100644 --- a/common/download.go +++ b/common/download.go @@ -117,6 +117,7 @@ func (d *DownloadClient) Get() (string, error) { var finalPath string if url.Scheme == "file" && !d.config.CopyFile { finalPath = url.Path + log.Printf("Using local file: %s", finalPath) // Remove forward slash on absolute Windows file URLs before processing if runtime.GOOS == "windows" && len(finalPath) > 0 && finalPath[0] == '/' { From 7ecfb057ff551ce2388bee36354a16f57b4ee4ef Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 14 Aug 2015 17:37:57 -0700 Subject: [PATCH 758/956] Added test case to catch deleting local source file when checksum doesn't match --- common/download_test.go | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/common/download_test.go b/common/download_test.go index dc5bd29ed..f77956913 100644 --- a/common/download_test.go +++ b/common/download_test.go @@ -3,6 +3,7 @@ package common import ( "crypto/md5" "encoding/hex" + "fmt" "io/ioutil" "net/http" "net/http/httptest" @@ -338,3 +339,40 @@ func TestHashForType(t *testing.T) { t.Fatalf("fake hash is not nil") } } + +func TestDownloadFileUrl(t *testing.T) { + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("Unable to detect working directory: %s", err) + } + + // source_path is a file path and source is a network path + sourcePath := fmt.Sprintf("%s/test-fixtures/fileurl/%s", cwd, "cake") + source := fmt.Sprintf("file://" + sourcePath) + t.Logf("Trying to download %s", source) + + config := &DownloadConfig{ + Url: source, + // This should be wrong. We want to make sure we don't delete + Checksum: []byte("nope"), + Hash: HashForType("sha256"), + CopyFile: false, + } + + client := NewDownloadClient(config) + + filename, err := client.Get() + defer os.Remove(config.TargetPath) + if err != nil { + t.Fatalf("Failed to download test file") + } + + if sourcePath != filename { + t.Errorf("Filename doesn't match; expected %s got %s", sourcePath, filename) + } + + if _, err = os.Stat(sourcePath); err != nil { + t.Errorf("Could not stat source file: %s", sourcePath) + } + +} From 6e8c6a15ad01cd9dde6e0ea08f93132c7778658d Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 14 Aug 2015 17:49:08 -0700 Subject: [PATCH 759/956] Implement fix, add comments so it's more apparent why we're doing special logic --- common/download.go | 11 +++++++++-- common/download_test.go | 16 ++++++++-------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/common/download.go b/common/download.go index 184624ffe..5f213968b 100644 --- a/common/download.go +++ b/common/download.go @@ -115,7 +115,10 @@ func (d *DownloadClient) Get() (string, error) { // Files when we don't copy the file are special cased. var f *os.File var finalPath string + sourcePath := "" if url.Scheme == "file" && !d.config.CopyFile { + // This is a special case where we use a source file that already exists + // locally and we don't make a copy. Normally we would copy or download. finalPath = url.Path log.Printf("Using local file: %s", finalPath) @@ -123,6 +126,8 @@ func (d *DownloadClient) Get() (string, error) { if runtime.GOOS == "windows" && len(finalPath) > 0 && finalPath[0] == '/' { finalPath = finalPath[1:len(finalPath)] } + // Keep track of the source so we can make sure not to delete this later + sourcePath = finalPath } else { finalPath = d.config.TargetPath @@ -150,8 +155,10 @@ func (d *DownloadClient) Get() (string, error) { var verify bool verify, err = d.VerifyChecksum(finalPath) if err == nil && !verify { - // Delete the file - os.Remove(finalPath) + // Only delete the file if we made a copy or downloaded it + if sourcePath != finalPath { + os.Remove(finalPath) + } err = fmt.Errorf( "checksums didn't match expected: %s", diff --git a/common/download_test.go b/common/download_test.go index f77956913..51f6f270c 100644 --- a/common/download_test.go +++ b/common/download_test.go @@ -340,6 +340,10 @@ func TestHashForType(t *testing.T) { } } +// TestDownloadFileUrl tests a special case where we use a local file for +// iso_url. In this case we can still verify the checksum but we should not +// delete the file if the checksum fails. Instead we'll just error and let the +// user fix the checksum. func TestDownloadFileUrl(t *testing.T) { cwd, err := os.Getwd() if err != nil { @@ -361,14 +365,10 @@ func TestDownloadFileUrl(t *testing.T) { client := NewDownloadClient(config) - filename, err := client.Get() - defer os.Remove(config.TargetPath) - if err != nil { - t.Fatalf("Failed to download test file") - } - - if sourcePath != filename { - t.Errorf("Filename doesn't match; expected %s got %s", sourcePath, filename) + // Verify that we fail to match the checksum + _, err = client.Get() + if err.Error() != "checksums didn't match expected: 6e6f7065" { + t.Fatalf("Unexpected failure; expected checksum not to match") } if _, err = os.Stat(sourcePath); err != nil { From f54940d9e815522138effddc8363af2d04bcf5fc Mon Sep 17 00:00:00 2001 From: Alvaro Miranda Date: Mon, 17 Aug 2015 22:15:26 +1200 Subject: [PATCH 760/956] note about floppy on ESXi note about floppy on ESXi --- .../docs/builders/vmware-iso.html.markdown | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index 594d77cee..b9580ab1c 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -393,3 +393,19 @@ modify as well: - `remote_username` - The SSH username used to access the remote machine. - `remote_password` - The SSH password for access to the remote machine. + +### Using a floppy for linux kickstart file or preseed + +Once the vm has been started, and the boot process is in place, sometimes a response file is +required. For ESXi enviroment, sometimes is easier use a floopy disk: + +ie RedHat: + +``` {.text} + "floppy_files": [ + "folder/ks.cfg" + ], + .. + "boot_command": " text ks=floppy " +``` + From d96b426a6d74ada2d496f5062b94bf5ea65497ce Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 17 Aug 2015 14:56:26 -0700 Subject: [PATCH 761/956] Clarify use-case and example for floppy preseed --- .../docs/builders/vmware-iso.html.markdown | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index b9580ab1c..39678fd5f 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -394,18 +394,22 @@ modify as well: - `remote_password` - The SSH password for access to the remote machine. -### Using a floppy for linux kickstart file or preseed +### Using a Floppy for Linux kickstart file or preseed -Once the vm has been started, and the boot process is in place, sometimes a response file is -required. For ESXi enviroment, sometimes is easier use a floopy disk: +Depending on your network configuration, it may be difficult to use packer's +built-in HTTP server with ESXi. Instead, you can provide a kickstart or preseed +file by attaching a floppy disk. An example below, based on RHEL: -ie RedHat: - -``` {.text} - "floppy_files": [ - "folder/ks.cfg" - ], - .. - "boot_command": " text ks=floppy " +``` {.javascript} +{ + "builders": [ + { + "type":"vmware-iso", + "floppy_files": [ + "folder/ks.cfg" + ], + "boot_command": " text ks=floppy " + } + ] +} ``` - From 1a775c05d9a603ea5ce1a5afd0d8f17898d82403 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 17 Aug 2015 17:44:01 -0700 Subject: [PATCH 762/956] Update calls to amazon to match the upstream - see http://aws.amazon.com/releasenotes/2948141298714307 - run awsmigrate-renamer on each amazon module (chroot, instance, etc.) --- builder/amazon/chroot/step_attach_volume.go | 10 ++-- builder/amazon/chroot/step_create_volume.go | 16 +++--- builder/amazon/chroot/step_instance_info.go | 2 +- builder/amazon/chroot/step_register_ami.go | 26 +++++----- .../amazon/chroot/step_register_ami_test.go | 14 +++--- builder/amazon/chroot/step_snapshot.go | 8 +-- builder/amazon/common/artifact.go | 2 +- builder/amazon/common/block_device.go | 8 +-- builder/amazon/common/block_device_test.go | 10 ++-- builder/amazon/common/ssh.go | 16 +++--- builder/amazon/common/state.go | 6 +-- builder/amazon/common/step_ami_region_copy.go | 8 +-- builder/amazon/common/step_create_tags.go | 8 +-- builder/amazon/common/step_deregister_ami.go | 4 +- builder/amazon/common/step_get_password.go | 2 +- .../common/step_modify_ami_attributes.go | 6 +-- builder/amazon/common/step_pre_validate.go | 2 +- .../amazon/common/step_run_source_instance.go | 50 +++++++++---------- builder/amazon/common/step_security_group.go | 12 ++--- builder/amazon/common/step_source_ami_info.go | 2 +- builder/amazon/ebs/builder_acc_test.go | 4 +- builder/amazon/ebs/step_cleanup_volumes.go | 10 ++-- builder/amazon/ebs/step_create_ami.go | 12 ++--- builder/amazon/ebs/step_modify_instance.go | 6 +-- builder/amazon/ebs/step_stop_instance.go | 4 +- builder/amazon/ebs/tags_acc_test.go | 8 +-- builder/amazon/instance/step_register_ami.go | 8 +-- 27 files changed, 132 insertions(+), 132 deletions(-) diff --git a/builder/amazon/chroot/step_attach_volume.go b/builder/amazon/chroot/step_attach_volume.go index c450d3b02..486948c27 100644 --- a/builder/amazon/chroot/step_attach_volume.go +++ b/builder/amazon/chroot/step_attach_volume.go @@ -35,8 +35,8 @@ func (s *StepAttachVolume) Run(state multistep.StateBag) multistep.StepAction { ui.Say(fmt.Sprintf("Attaching the root volume to %s", attachVolume)) _, err := ec2conn.AttachVolume(&ec2.AttachVolumeInput{ - InstanceID: instance.InstanceID, - VolumeID: &volumeId, + InstanceId: instance.InstanceId, + VolumeId: &volumeId, Device: &attachVolume, }) if err != nil { @@ -58,7 +58,7 @@ func (s *StepAttachVolume) Run(state multistep.StateBag) multistep.StepAction { Refresh: func() (interface{}, string, error) { attempts := 0 for attempts < 30 { - resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIDs: []*string{&volumeId}}) + resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeId}}) if err != nil { return nil, "", err } @@ -107,7 +107,7 @@ func (s *StepAttachVolume) CleanupFunc(state multistep.StateBag) error { ui := state.Get("ui").(packer.Ui) ui.Say("Detaching EBS volume...") - _, err := ec2conn.DetachVolume(&ec2.DetachVolumeInput{VolumeID: &s.volumeId}) + _, err := ec2conn.DetachVolume(&ec2.DetachVolumeInput{VolumeId: &s.volumeId}) if err != nil { return fmt.Errorf("Error detaching EBS volume: %s", err) } @@ -120,7 +120,7 @@ func (s *StepAttachVolume) CleanupFunc(state multistep.StateBag) error { StepState: state, Target: "detached", Refresh: func() (interface{}, string, error) { - resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIDs: []*string{&s.volumeId}}) + resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&s.volumeId}}) if err != nil { return nil, "", err } diff --git a/builder/amazon/chroot/step_create_volume.go b/builder/amazon/chroot/step_create_volume.go index 9db99163a..a79e22c47 100644 --- a/builder/amazon/chroot/step_create_volume.go +++ b/builder/amazon/chroot/step_create_volume.go @@ -45,16 +45,16 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction { } ui.Say("Creating the root volume...") - vs := *rootDevice.EBS.VolumeSize - if s.RootVolumeSize > *rootDevice.EBS.VolumeSize { + vs := *rootDevice.Ebs.VolumeSize + if s.RootVolumeSize > *rootDevice.Ebs.VolumeSize { vs = s.RootVolumeSize } createVolume := &ec2.CreateVolumeInput{ AvailabilityZone: instance.Placement.AvailabilityZone, Size: aws.Int64(vs), - SnapshotID: rootDevice.EBS.SnapshotID, - VolumeType: rootDevice.EBS.VolumeType, - IOPS: rootDevice.EBS.IOPS, + SnapshotId: rootDevice.Ebs.SnapshotId, + VolumeType: rootDevice.Ebs.VolumeType, + Iops: rootDevice.Ebs.Iops, } log.Printf("Create args: %s", createVolume) @@ -67,7 +67,7 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction { } // Set the volume ID so we remember to delete it later - s.volumeId = *createVolumeResp.VolumeID + s.volumeId = *createVolumeResp.VolumeId log.Printf("Volume ID: %s", s.volumeId) // Wait for the volume to become ready @@ -76,7 +76,7 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction { StepState: state, Target: "available", Refresh: func() (interface{}, string, error) { - resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIDs: []*string{&s.volumeId}}) + resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&s.volumeId}}) if err != nil { return nil, "", err } @@ -107,7 +107,7 @@ func (s *StepCreateVolume) Cleanup(state multistep.StateBag) { ui := state.Get("ui").(packer.Ui) ui.Say("Deleting the created EBS volume...") - _, err := ec2conn.DeleteVolume(&ec2.DeleteVolumeInput{VolumeID: &s.volumeId}) + _, err := ec2conn.DeleteVolume(&ec2.DeleteVolumeInput{VolumeId: &s.volumeId}) if err != nil { ui.Error(fmt.Sprintf("Error deleting EBS volume: %s", err)) } diff --git a/builder/amazon/chroot/step_instance_info.go b/builder/amazon/chroot/step_instance_info.go index b77c9e8a1..ceba307cc 100644 --- a/builder/amazon/chroot/step_instance_info.go +++ b/builder/amazon/chroot/step_instance_info.go @@ -34,7 +34,7 @@ func (s *StepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction { log.Printf("Instance ID: %s", instanceId) // Query the entire instance metadata - instancesResp, err := ec2conn.DescribeInstances(&ec2.DescribeInstancesInput{InstanceIDs: []*string{&instanceId}}) + instancesResp, err := ec2conn.DescribeInstances(&ec2.DescribeInstancesInput{InstanceIds: []*string{&instanceId}}) if err != nil { err := fmt.Errorf("Error getting instance data: %s", err) state.Put("error", err) diff --git a/builder/amazon/chroot/step_register_ami.go b/builder/amazon/chroot/step_register_ami.go index 8ed4df9b9..f2a59ae01 100644 --- a/builder/amazon/chroot/step_register_ami.go +++ b/builder/amazon/chroot/step_register_ami.go @@ -27,21 +27,21 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { for i, device := range image.BlockDeviceMappings { newDevice := device if *newDevice.DeviceName == *image.RootDeviceName { - if newDevice.EBS != nil { - newDevice.EBS.SnapshotID = aws.String(snapshotId) + if newDevice.Ebs != nil { + newDevice.Ebs.SnapshotId = aws.String(snapshotId) } else { - newDevice.EBS = &ec2.EBSBlockDevice{SnapshotID: aws.String(snapshotId)} + newDevice.Ebs = &ec2.EbsBlockDevice{SnapshotId: aws.String(snapshotId)} } - if s.RootVolumeSize > *newDevice.EBS.VolumeSize { - newDevice.EBS.VolumeSize = aws.Int64(s.RootVolumeSize) + if s.RootVolumeSize > *newDevice.Ebs.VolumeSize { + newDevice.Ebs.VolumeSize = aws.Int64(s.RootVolumeSize) } } // assume working from a snapshot, so we unset the Encrypted field if set, // otherwise AWS API will return InvalidParameter - if newDevice.EBS != nil && newDevice.EBS.Encrypted != nil { - newDevice.EBS.Encrypted = nil + if newDevice.Ebs != nil && newDevice.Ebs.Encrypted != nil { + newDevice.Ebs.Encrypted = nil } blockDevices[i] = newDevice @@ -51,7 +51,7 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { // Set SriovNetSupport to "simple". See http://goo.gl/icuXh5 if config.AMIEnhancedNetworking { - registerOpts.SRIOVNetSupport = aws.String("simple") + registerOpts.SriovNetSupport = aws.String("simple") } registerResp, err := ec2conn.RegisterImage(registerOpts) @@ -62,16 +62,16 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { } // Set the AMI ID in the state - ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageID)) + ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageId)) amis := make(map[string]string) - amis[*ec2conn.Config.Region] = *registerResp.ImageID + amis[*ec2conn.Config.Region] = *registerResp.ImageId state.Put("amis", amis) // Wait for the image to become ready stateChange := awscommon.StateChangeConf{ Pending: []string{"pending"}, Target: "available", - Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *registerResp.ImageID), + Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *registerResp.ImageId), StepState: state, } @@ -102,8 +102,8 @@ func buildRegisterOpts(config *Config, image *ec2.Image, blockDevices []*ec2.Blo } if config.AMIVirtType != "hvm" { - registerOpts.KernelID = image.KernelID - registerOpts.RAMDiskID = image.RAMDiskID + registerOpts.KernelId = image.KernelId + registerOpts.RamdiskId = image.RamdiskId } return registerOpts diff --git a/builder/amazon/chroot/step_register_ami_test.go b/builder/amazon/chroot/step_register_ami_test.go index ac473b302..0cc3bc912 100644 --- a/builder/amazon/chroot/step_register_ami_test.go +++ b/builder/amazon/chroot/step_register_ami_test.go @@ -9,10 +9,10 @@ import ( func testImage() ec2.Image { return ec2.Image{ - ImageID: aws.String("ami-abcd1234"), + ImageId: aws.String("ami-abcd1234"), Name: aws.String("ami_test_name"), Architecture: aws.String("x86_64"), - KernelID: aws.String("aki-abcd1234"), + KernelId: aws.String("aki-abcd1234"), } } @@ -38,9 +38,9 @@ func TestStepRegisterAmi_buildRegisterOpts_pv(t *testing.T) { t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, *opts.Name) } - expected = *image.KernelID - if *opts.KernelID != expected { - t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, *opts.KernelID) + expected = *image.KernelId + if *opts.KernelId != expected { + t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, *opts.KernelId) } } @@ -67,7 +67,7 @@ func TestStepRegisterAmi_buildRegisterOpts_hvm(t *testing.T) { t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, *opts.Name) } - if opts.KernelID != nil { - t.Fatalf("Unexpected KernelId value: expected nil got %s\n", *opts.KernelID) + if opts.KernelId != nil { + t.Fatalf("Unexpected KernelId value: expected nil got %s\n", *opts.KernelId) } } diff --git a/builder/amazon/chroot/step_snapshot.go b/builder/amazon/chroot/step_snapshot.go index b98e10861..f003ee66a 100644 --- a/builder/amazon/chroot/step_snapshot.go +++ b/builder/amazon/chroot/step_snapshot.go @@ -28,7 +28,7 @@ func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction { description := fmt.Sprintf("Packer: %s", time.Now().String()) createSnapResp, err := ec2conn.CreateSnapshot(&ec2.CreateSnapshotInput{ - VolumeID: &volumeId, + VolumeId: &volumeId, Description: &description, }) if err != nil { @@ -39,7 +39,7 @@ func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction { } // Set the snapshot ID so we can delete it later - s.snapshotId = *createSnapResp.SnapshotID + s.snapshotId = *createSnapResp.SnapshotId ui.Message(fmt.Sprintf("Snapshot ID: %s", s.snapshotId)) // Wait for the snapshot to be ready @@ -48,7 +48,7 @@ func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction { StepState: state, Target: "completed", Refresh: func() (interface{}, string, error) { - resp, err := ec2conn.DescribeSnapshots(&ec2.DescribeSnapshotsInput{SnapshotIDs: []*string{&s.snapshotId}}) + resp, err := ec2conn.DescribeSnapshots(&ec2.DescribeSnapshotsInput{SnapshotIds: []*string{&s.snapshotId}}) if err != nil { return nil, "", err } @@ -86,7 +86,7 @@ func (s *StepSnapshot) Cleanup(state multistep.StateBag) { ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) ui.Say("Removing snapshot since we cancelled or halted...") - _, err := ec2conn.DeleteSnapshot(&ec2.DeleteSnapshotInput{SnapshotID: &s.snapshotId}) + _, err := ec2conn.DeleteSnapshot(&ec2.DeleteSnapshotInput{SnapshotId: &s.snapshotId}) if err != nil { ui.Error(fmt.Sprintf("Error: %s", err)) } diff --git a/builder/amazon/common/artifact.go b/builder/amazon/common/artifact.go index 4082b2abc..8eed0134d 100644 --- a/builder/amazon/common/artifact.go +++ b/builder/amazon/common/artifact.go @@ -75,7 +75,7 @@ func (a *Artifact) Destroy() error { regionConn := ec2.New(regionConfig) input := &ec2.DeregisterImageInput{ - ImageID: &imageId, + ImageId: &imageId, } if _, err := regionConn.DeregisterImage(input); err != nil { errors = append(errors, err) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index f009cd7bc..094738869 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -30,7 +30,7 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { var blockDevices []*ec2.BlockDeviceMapping for _, blockDevice := range b { - ebsBlockDevice := &ec2.EBSBlockDevice{ + ebsBlockDevice := &ec2.EbsBlockDevice{ VolumeType: aws.String(blockDevice.VolumeType), VolumeSize: aws.Int64(blockDevice.VolumeSize), DeleteOnTermination: aws.Bool(blockDevice.DeleteOnTermination), @@ -38,12 +38,12 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { // IOPS is only valid for SSD Volumes if blockDevice.VolumeType != "" && blockDevice.VolumeType != "standard" && blockDevice.VolumeType != "gp2" { - ebsBlockDevice.IOPS = aws.Int64(blockDevice.IOPS) + ebsBlockDevice.Iops = aws.Int64(blockDevice.IOPS) } // You cannot specify Encrypted if you specify a Snapshot ID if blockDevice.SnapshotId != "" { - ebsBlockDevice.SnapshotID = aws.String(blockDevice.SnapshotId) + ebsBlockDevice.SnapshotId = aws.String(blockDevice.SnapshotId) } else if blockDevice.Encrypted { ebsBlockDevice.Encrypted = aws.Bool(blockDevice.Encrypted) } @@ -54,7 +54,7 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { } if !strings.HasPrefix(blockDevice.VirtualName, "ephemeral") { - mapping.EBS = ebsBlockDevice + mapping.Ebs = ebsBlockDevice } if blockDevice.NoDevice { diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index d76cf4d07..99514009b 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -25,8 +25,8 @@ func TestBlockDevice(t *testing.T) { Result: &ec2.BlockDeviceMapping{ DeviceName: aws.String("/dev/sdb"), VirtualName: aws.String(""), - EBS: &ec2.EBSBlockDevice{ - SnapshotID: aws.String("snap-1234"), + Ebs: &ec2.EbsBlockDevice{ + SnapshotId: aws.String("snap-1234"), VolumeType: aws.String("standard"), VolumeSize: aws.Int64(8), DeleteOnTermination: aws.Bool(true), @@ -42,7 +42,7 @@ func TestBlockDevice(t *testing.T) { Result: &ec2.BlockDeviceMapping{ DeviceName: aws.String("/dev/sdb"), VirtualName: aws.String(""), - EBS: &ec2.EBSBlockDevice{ + Ebs: &ec2.EbsBlockDevice{ VolumeType: aws.String(""), VolumeSize: aws.Int64(8), DeleteOnTermination: aws.Bool(false), @@ -61,11 +61,11 @@ func TestBlockDevice(t *testing.T) { Result: &ec2.BlockDeviceMapping{ DeviceName: aws.String("/dev/sdb"), VirtualName: aws.String(""), - EBS: &ec2.EBSBlockDevice{ + Ebs: &ec2.EbsBlockDevice{ VolumeType: aws.String("io1"), VolumeSize: aws.Int64(8), DeleteOnTermination: aws.Bool(true), - IOPS: aws.Int64(1000), + Iops: aws.Int64(1000), }, }, }, diff --git a/builder/amazon/common/ssh.go b/builder/amazon/common/ssh.go index cf644eb25..d689d5990 100644 --- a/builder/amazon/common/ssh.go +++ b/builder/amazon/common/ssh.go @@ -17,14 +17,14 @@ func SSHHost(e *ec2.EC2, private bool) func(multistep.StateBag) (string, error) for j := 0; j < 2; j++ { var host string i := state.Get("instance").(*ec2.Instance) - if i.VPCID != nil && *i.VPCID != "" { - if i.PublicIPAddress != nil && *i.PublicIPAddress != "" && !private { - host = *i.PublicIPAddress + if i.VpcId != nil && *i.VpcId != "" { + if i.PublicIpAddress != nil && *i.PublicIpAddress != "" && !private { + host = *i.PublicIpAddress } else { - host = *i.PrivateIPAddress + host = *i.PrivateIpAddress } - } else if i.PublicDNSName != nil && *i.PublicDNSName != "" { - host = *i.PublicDNSName + } else if i.PublicDnsName != nil && *i.PublicDnsName != "" { + host = *i.PublicDnsName } if host != "" { @@ -32,14 +32,14 @@ func SSHHost(e *ec2.EC2, private bool) func(multistep.StateBag) (string, error) } r, err := e.DescribeInstances(&ec2.DescribeInstancesInput{ - InstanceIDs: []*string{i.InstanceID}, + InstanceIds: []*string{i.InstanceId}, }) if err != nil { return "", err } if len(r.Reservations) == 0 || len(r.Reservations[0].Instances) == 0 { - return "", fmt.Errorf("instance not found: %s", *i.InstanceID) + return "", fmt.Errorf("instance not found: %s", *i.InstanceId) } state.Put("instance", &r.Reservations[0].Instances[0]) diff --git a/builder/amazon/common/state.go b/builder/amazon/common/state.go index 3b40a48d1..6c9de3eb2 100644 --- a/builder/amazon/common/state.go +++ b/builder/amazon/common/state.go @@ -39,7 +39,7 @@ type StateChangeConf struct { func AMIStateRefreshFunc(conn *ec2.EC2, imageId string) StateRefreshFunc { return func() (interface{}, string, error) { resp, err := conn.DescribeImages(&ec2.DescribeImagesInput{ - ImageIDs: []*string{&imageId}, + ImageIds: []*string{&imageId}, }) if err != nil { if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" { @@ -70,7 +70,7 @@ func AMIStateRefreshFunc(conn *ec2.EC2, imageId string) StateRefreshFunc { func InstanceStateRefreshFunc(conn *ec2.EC2, instanceId string) StateRefreshFunc { return func() (interface{}, string, error) { resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ - InstanceIDs: []*string{&instanceId}, + InstanceIds: []*string{&instanceId}, }) if err != nil { if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" { @@ -101,7 +101,7 @@ func InstanceStateRefreshFunc(conn *ec2.EC2, instanceId string) StateRefreshFunc func SpotRequestStateRefreshFunc(conn *ec2.EC2, spotRequestId string) StateRefreshFunc { return func() (interface{}, string, error) { resp, err := conn.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{ - SpotInstanceRequestIDs: []*string{&spotRequestId}, + SpotInstanceRequestIds: []*string{&spotRequestId}, }) if err != nil { diff --git a/builder/amazon/common/step_ami_region_copy.go b/builder/amazon/common/step_ami_region_copy.go index d19ffe5bd..0cf4d40fa 100644 --- a/builder/amazon/common/step_ami_region_copy.go +++ b/builder/amazon/common/step_ami_region_copy.go @@ -90,7 +90,7 @@ func amiRegionCopy(state multistep.StateBag, config *AccessConfig, name string, regionconn := ec2.New(awsConfig) resp, err := regionconn.CopyImage(&ec2.CopyImageInput{ SourceRegion: &source, - SourceImageID: &imageId, + SourceImageId: &imageId, Name: &name, }) @@ -102,14 +102,14 @@ func amiRegionCopy(state multistep.StateBag, config *AccessConfig, name string, stateChange := StateChangeConf{ Pending: []string{"pending"}, Target: "available", - Refresh: AMIStateRefreshFunc(regionconn, *resp.ImageID), + Refresh: AMIStateRefreshFunc(regionconn, *resp.ImageId), StepState: state, } if _, err := WaitForState(&stateChange); err != nil { return "", fmt.Errorf("Error waiting for AMI (%s) in region (%s): %s", - *resp.ImageID, target, err) + *resp.ImageId, target, err) } - return *resp.ImageID, nil + return *resp.ImageId, nil } diff --git a/builder/amazon/common/step_create_tags.go b/builder/amazon/common/step_create_tags.go index 220735bed..7f62d2657 100644 --- a/builder/amazon/common/step_create_tags.go +++ b/builder/amazon/common/step_create_tags.go @@ -41,7 +41,7 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction { // Retrieve image list for given AMI imageResp, err := regionconn.DescribeImages(&ec2.DescribeImagesInput{ - ImageIDs: resourceIds, + ImageIds: resourceIds, }) if err != nil { @@ -62,9 +62,9 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction { // Add only those with a Snapshot ID, i.e. not Ephemeral for _, device := range image.BlockDeviceMappings { - if device.EBS != nil && device.EBS.SnapshotID != nil { - ui.Say(fmt.Sprintf("Tagging snapshot: %s", *device.EBS.SnapshotID)) - resourceIds = append(resourceIds, device.EBS.SnapshotID) + if device.Ebs != nil && device.Ebs.SnapshotId != nil { + ui.Say(fmt.Sprintf("Tagging snapshot: %s", *device.Ebs.SnapshotId)) + resourceIds = append(resourceIds, device.Ebs.SnapshotId) } } diff --git a/builder/amazon/common/step_deregister_ami.go b/builder/amazon/common/step_deregister_ami.go index ce20a5d90..1ea4325cd 100644 --- a/builder/amazon/common/step_deregister_ami.go +++ b/builder/amazon/common/step_deregister_ami.go @@ -36,7 +36,7 @@ func (s *StepDeregisterAMI) Run(state multistep.StateBag) multistep.StepAction { // deregister image(s) by that name for _, i := range resp.Images { _, err := ec2conn.DeregisterImage(&ec2.DeregisterImageInput{ - ImageID: i.ImageID, + ImageId: i.ImageId, }) if err != nil { @@ -45,7 +45,7 @@ func (s *StepDeregisterAMI) Run(state multistep.StateBag) multistep.StepAction { ui.Error(err.Error()) return multistep.ActionHalt } - ui.Say(fmt.Sprintf("Deregistered AMI %s, id: %s", s.AMIName, *i.ImageID)) + ui.Say(fmt.Sprintf("Deregistered AMI %s, id: %s", s.AMIName, *i.ImageId)) } } diff --git a/builder/amazon/common/step_get_password.go b/builder/amazon/common/step_get_password.go index fec33891f..618e3bbb8 100644 --- a/builder/amazon/common/step_get_password.go +++ b/builder/amazon/common/step_get_password.go @@ -111,7 +111,7 @@ func (s *StepGetPassword) waitForPassword(state multistep.StateBag, cancel <-cha } resp, err := ec2conn.GetPasswordData(&ec2.GetPasswordDataInput{ - InstanceID: instance.InstanceID, + InstanceId: instance.InstanceId, }) if err != nil { err := fmt.Errorf("Error retrieving auto-generated instance password: %s", err) diff --git a/builder/amazon/common/step_modify_ami_attributes.go b/builder/amazon/common/step_modify_ami_attributes.go index df6424245..e8e7de589 100644 --- a/builder/amazon/common/step_modify_ami_attributes.go +++ b/builder/amazon/common/step_modify_ami_attributes.go @@ -66,10 +66,10 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc adds := make([]*ec2.LaunchPermission, len(s.Users)) for i, u := range s.Users { users[i] = aws.String(u) - adds[i] = &ec2.LaunchPermission{UserID: aws.String(u)} + adds[i] = &ec2.LaunchPermission{UserId: aws.String(u)} } options["users"] = &ec2.ModifyImageAttributeInput{ - UserIDs: users, + UserIds: users, LaunchPermission: &ec2.LaunchPermissionModifications{ Add: adds, }, @@ -94,7 +94,7 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc }) for name, input := range options { ui.Message(fmt.Sprintf("Modifying: %s", name)) - input.ImageID = &ami + input.ImageId = &ami _, err := regionconn.ModifyImageAttribute(input) if err != nil { err := fmt.Errorf("Error modify AMI attributes: %s", err) diff --git a/builder/amazon/common/step_pre_validate.go b/builder/amazon/common/step_pre_validate.go index bbeacea43..86bb8e23d 100644 --- a/builder/amazon/common/step_pre_validate.go +++ b/builder/amazon/common/step_pre_validate.go @@ -41,7 +41,7 @@ func (s *StepPreValidate) Run(state multistep.StateBag) multistep.StepAction { } if len(resp.Images) > 0 { - err := fmt.Errorf("Error: name conflicts with an existing AMI: %s", *resp.Images[0].ImageID) + err := fmt.Errorf("Error: name conflicts with an existing AMI: %s", *resp.Images[0].ImageId) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index fcacc4ca8..be4120c19 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -66,7 +66,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi ui.Say("Launching a source AWS instance...") imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ - ImageIDs: []*string{&s.SourceAMI}, + ImageIds: []*string{&s.SourceAMI}, }) if err != nil { state.Put("error", fmt.Errorf("There was a problem with the source AMI: %s", err)) @@ -138,12 +138,12 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi if spotPrice == "" { runOpts := &ec2.RunInstancesInput{ KeyName: &keyName, - ImageID: &s.SourceAMI, + ImageId: &s.SourceAMI, InstanceType: &s.InstanceType, UserData: &userData, MaxCount: aws.Int64(1), MinCount: aws.Int64(1), - IAMInstanceProfile: &ec2.IAMInstanceProfileSpecification{Name: &s.IamInstanceProfile}, + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{Name: &s.IamInstanceProfile}, BlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(), Placement: &ec2.Placement{AvailabilityZone: &s.AvailabilityZone}, } @@ -152,15 +152,15 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi runOpts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{ &ec2.InstanceNetworkInterfaceSpecification{ DeviceIndex: aws.Int64(0), - AssociatePublicIPAddress: &s.AssociatePublicIpAddress, - SubnetID: &s.SubnetId, + AssociatePublicIpAddress: &s.AssociatePublicIpAddress, + SubnetId: &s.SubnetId, Groups: securityGroupIds, DeleteOnTermination: aws.Bool(true), }, } } else { - runOpts.SubnetID = &s.SubnetId - runOpts.SecurityGroupIDs = securityGroupIds + runOpts.SubnetId = &s.SubnetId + runOpts.SecurityGroupIds = securityGroupIds } runResp, err := ec2conn.RunInstances(runOpts) @@ -170,7 +170,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi ui.Error(err.Error()) return multistep.ActionHalt } - instanceId = *runResp.Instances[0].InstanceID + instanceId = *runResp.Instances[0].InstanceId } else { ui.Message(fmt.Sprintf( "Requesting spot instance '%s' for: %s", @@ -179,15 +179,15 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi SpotPrice: &spotPrice, LaunchSpecification: &ec2.RequestSpotLaunchSpecification{ KeyName: &keyName, - ImageID: &s.SourceAMI, + ImageId: &s.SourceAMI, InstanceType: &s.InstanceType, UserData: &userData, - IAMInstanceProfile: &ec2.IAMInstanceProfileSpecification{Name: &s.IamInstanceProfile}, + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{Name: &s.IamInstanceProfile}, NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ &ec2.InstanceNetworkInterfaceSpecification{ DeviceIndex: aws.Int64(0), - AssociatePublicIPAddress: &s.AssociatePublicIpAddress, - SubnetID: &s.SubnetId, + AssociatePublicIpAddress: &s.AssociatePublicIpAddress, + SubnetId: &s.SubnetId, Groups: securityGroupIds, DeleteOnTermination: aws.Bool(true), }, @@ -207,7 +207,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi s.spotRequest = runSpotResp.SpotInstanceRequests[0] - spotRequestId := s.spotRequest.SpotInstanceRequestID + spotRequestId := s.spotRequest.SpotInstanceRequestId ui.Message(fmt.Sprintf("Waiting for spot request (%s) to become active...", *spotRequestId)) stateChange := StateChangeConf{ Pending: []string{"open"}, @@ -224,7 +224,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi } spotResp, err := ec2conn.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{ - SpotInstanceRequestIDs: []*string{spotRequestId}, + SpotInstanceRequestIds: []*string{spotRequestId}, }) if err != nil { err := fmt.Errorf("Error finding spot request (%s): %s", *spotRequestId, err) @@ -232,7 +232,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi ui.Error(err.Error()) return multistep.ActionHalt } - instanceId = *spotResp.SpotInstanceRequests[0].InstanceID + instanceId = *spotResp.SpotInstanceRequests[0].InstanceId } // Set the instance ID so that the cleanup works properly @@ -264,7 +264,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi _, err = ec2conn.CreateTags(&ec2.CreateTagsInput{ Tags: ec2Tags, - Resources: []*string{instance.InstanceID}, + Resources: []*string{instance.InstanceId}, }) if err != nil { ui.Message( @@ -272,16 +272,16 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi } if s.Debug { - if instance.PublicDNSName != nil && *instance.PublicDNSName != "" { - ui.Message(fmt.Sprintf("Public DNS: %s", *instance.PublicDNSName)) + if instance.PublicDnsName != nil && *instance.PublicDnsName != "" { + ui.Message(fmt.Sprintf("Public DNS: %s", *instance.PublicDnsName)) } - if instance.PublicIPAddress != nil && *instance.PublicIPAddress != "" { - ui.Message(fmt.Sprintf("Public IP: %s", *instance.PublicIPAddress)) + if instance.PublicIpAddress != nil && *instance.PublicIpAddress != "" { + ui.Message(fmt.Sprintf("Public IP: %s", *instance.PublicIpAddress)) } - if instance.PrivateIPAddress != nil && *instance.PrivateIPAddress != "" { - ui.Message(fmt.Sprintf("Private IP: %s", *instance.PrivateIPAddress)) + if instance.PrivateIpAddress != nil && *instance.PrivateIpAddress != "" { + ui.Message(fmt.Sprintf("Private IP: %s", *instance.PrivateIpAddress)) } } @@ -299,7 +299,7 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) { if s.spotRequest != nil { ui.Say("Cancelling the spot request...") input := &ec2.CancelSpotInstanceRequestsInput{ - SpotInstanceRequestIDs: []*string{s.spotRequest.SpotInstanceRequestID}, + SpotInstanceRequestIds: []*string{s.spotRequest.SpotInstanceRequestId}, } if _, err := ec2conn.CancelSpotInstanceRequests(input); err != nil { ui.Error(fmt.Sprintf("Error cancelling the spot request, may still be around: %s", err)) @@ -307,7 +307,7 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) { } stateChange := StateChangeConf{ Pending: []string{"active", "open"}, - Refresh: SpotRequestStateRefreshFunc(ec2conn, *s.spotRequest.SpotInstanceRequestID), + Refresh: SpotRequestStateRefreshFunc(ec2conn, *s.spotRequest.SpotInstanceRequestId), Target: "cancelled", } @@ -318,7 +318,7 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) { // Terminate the source instance if it exists if s.instanceId != "" { ui.Say("Terminating the source AWS instance...") - if _, err := ec2conn.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIDs: []*string{&s.instanceId}}); err != nil { + if _, err := ec2conn.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIds: []*string{&s.instanceId}}); err != nil { ui.Error(fmt.Sprintf("Error terminating instance, may still be around: %s", err)) return } diff --git a/builder/amazon/common/step_security_group.go b/builder/amazon/common/step_security_group.go index e43e866a3..43b9fd4c7 100644 --- a/builder/amazon/common/step_security_group.go +++ b/builder/amazon/common/step_security_group.go @@ -43,7 +43,7 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction { group := &ec2.CreateSecurityGroupInput{ GroupName: &groupName, Description: aws.String("Temporary group for Packer"), - VPCID: &s.VpcId, + VpcId: &s.VpcId, } groupResp, err := ec2conn.CreateSecurityGroup(group) if err != nil { @@ -53,15 +53,15 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction { } // Set the group ID so we can delete it later - s.createdGroupId = *groupResp.GroupID + s.createdGroupId = *groupResp.GroupId // Authorize the SSH access for the security group req := &ec2.AuthorizeSecurityGroupIngressInput{ - GroupID: groupResp.GroupID, - IPProtocol: aws.String("tcp"), + GroupId: groupResp.GroupId, + IpProtocol: aws.String("tcp"), FromPort: aws.Int64(int64(port)), ToPort: aws.Int64(int64(port)), - CIDRIP: aws.String("0.0.0.0/0"), + CidrIp: aws.String("0.0.0.0/0"), } // We loop and retry this a few times because sometimes the security @@ -105,7 +105,7 @@ func (s *StepSecurityGroup) Cleanup(state multistep.StateBag) { var err error for i := 0; i < 5; i++ { - _, err = ec2conn.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{GroupID: &s.createdGroupId}) + _, err = ec2conn.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{GroupId: &s.createdGroupId}) if err == nil { break } diff --git a/builder/amazon/common/step_source_ami_info.go b/builder/amazon/common/step_source_ami_info.go index 5ab36e5da..21fd0db8e 100644 --- a/builder/amazon/common/step_source_ami_info.go +++ b/builder/amazon/common/step_source_ami_info.go @@ -23,7 +23,7 @@ func (s *StepSourceAMIInfo) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say("Inspecting the source AMI...") - imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ImageIDs: []*string{&s.SourceAmi}}) + imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{&s.SourceAmi}}) if err != nil { err := fmt.Errorf("Error querying AMI: %s", err) state.Put("error", err) diff --git a/builder/amazon/ebs/builder_acc_test.go b/builder/amazon/ebs/builder_acc_test.go index 879f7a732..890b3228a 100644 --- a/builder/amazon/ebs/builder_acc_test.go +++ b/builder/amazon/ebs/builder_acc_test.go @@ -71,7 +71,7 @@ func checkAMISharing(count int, uid, group string) builderT.TestCheckFunc { ec2conn, _ := testEC2Conn() imageResp, err := ec2conn.DescribeImageAttribute(&ec2.DescribeImageAttributeInput{ Attribute: aws.String("launchPermission"), - ImageID: aws.String(artifact.Amis["us-east-1"]), + ImageId: aws.String(artifact.Amis["us-east-1"]), }) if err != nil { @@ -86,7 +86,7 @@ func checkAMISharing(count int, uid, group string) builderT.TestCheckFunc { userFound := false for _, lp := range imageResp.LaunchPermissions { - if lp.UserID != nil && uid == *lp.UserID { + if lp.UserId != nil && uid == *lp.UserId { userFound = true } } diff --git a/builder/amazon/ebs/step_cleanup_volumes.go b/builder/amazon/ebs/step_cleanup_volumes.go index 56ebe5527..461bb7b9d 100644 --- a/builder/amazon/ebs/step_cleanup_volumes.go +++ b/builder/amazon/ebs/step_cleanup_volumes.go @@ -62,9 +62,9 @@ func (s *stepCleanupVolumes) Cleanup(state multistep.StateBag) { var vl []*string volList := make(map[string]string) for _, bdm := range instance.BlockDeviceMappings { - if bdm.EBS != nil { - vl = append(vl, bdm.EBS.VolumeID) - volList[*bdm.EBS.VolumeID] = *bdm.DeviceName + if bdm.Ebs != nil { + vl = append(vl, bdm.Ebs.VolumeId) + volList[*bdm.Ebs.VolumeId] = *bdm.DeviceName } } @@ -88,7 +88,7 @@ func (s *stepCleanupVolumes) Cleanup(state multistep.StateBag) { // available, remove them from the list of volumes for _, v := range resp.Volumes { if v.State != nil && *v.State != "available" { - delete(volList, *v.VolumeID) + delete(volList, *v.VolumeId) } } @@ -109,7 +109,7 @@ func (s *stepCleanupVolumes) Cleanup(state multistep.StateBag) { // Destroy remaining volumes for k, _ := range volList { ui.Say(fmt.Sprintf("Destroying volume (%s)...", k)) - _, err := ec2conn.DeleteVolume(&ec2.DeleteVolumeInput{VolumeID: aws.String(k)}) + _, err := ec2conn.DeleteVolume(&ec2.DeleteVolumeInput{VolumeId: aws.String(k)}) if err != nil { ui.Say(fmt.Sprintf("Error deleting volume: %s", k)) } diff --git a/builder/amazon/ebs/step_create_ami.go b/builder/amazon/ebs/step_create_ami.go index a3980e3ee..e3e7e7026 100644 --- a/builder/amazon/ebs/step_create_ami.go +++ b/builder/amazon/ebs/step_create_ami.go @@ -22,7 +22,7 @@ func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction { // Create the image ui.Say(fmt.Sprintf("Creating the AMI: %s", config.AMIName)) createOpts := &ec2.CreateImageInput{ - InstanceID: instance.InstanceID, + InstanceId: instance.InstanceId, Name: &config.AMIName, BlockDeviceMappings: config.BlockDevices.BuildAMIDevices(), } @@ -36,16 +36,16 @@ func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction { } // Set the AMI ID in the state - ui.Message(fmt.Sprintf("AMI: %s", *createResp.ImageID)) + ui.Message(fmt.Sprintf("AMI: %s", *createResp.ImageId)) amis := make(map[string]string) - amis[*ec2conn.Config.Region] = *createResp.ImageID + amis[*ec2conn.Config.Region] = *createResp.ImageId state.Put("amis", amis) // Wait for the image to become ready stateChange := awscommon.StateChangeConf{ Pending: []string{"pending"}, Target: "available", - Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *createResp.ImageID), + Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *createResp.ImageId), StepState: state, } @@ -57,7 +57,7 @@ func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - imagesResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ImageIDs: []*string{createResp.ImageID}}) + imagesResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{createResp.ImageId}}) if err != nil { err := fmt.Errorf("Error searching for AMI: %s", err) state.Put("error", err) @@ -84,7 +84,7 @@ func (s *stepCreateAMI) Cleanup(state multistep.StateBag) { ui := state.Get("ui").(packer.Ui) ui.Say("Deregistering the AMI because cancelation or error...") - deregisterOpts := &ec2.DeregisterImageInput{ImageID: s.image.ImageID} + deregisterOpts := &ec2.DeregisterImageInput{ImageId: s.image.ImageId} if _, err := ec2conn.DeregisterImage(deregisterOpts); err != nil { ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", err)) return diff --git a/builder/amazon/ebs/step_modify_instance.go b/builder/amazon/ebs/step_modify_instance.go index d7b30e42c..b9976db6d 100644 --- a/builder/amazon/ebs/step_modify_instance.go +++ b/builder/amazon/ebs/step_modify_instance.go @@ -21,11 +21,11 @@ func (s *stepModifyInstance) Run(state multistep.StateBag) multistep.StepAction ui.Say("Enabling Enhanced Networking...") simple := "simple" _, err := ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ - InstanceID: instance.InstanceID, - SRIOVNetSupport: &ec2.AttributeValue{Value: &simple}, + InstanceId: instance.InstanceId, + SriovNetSupport: &ec2.AttributeValue{Value: &simple}, }) if err != nil { - err := fmt.Errorf("Error enabling Enhanced Networking on %s: %s", *instance.InstanceID, err) + err := fmt.Errorf("Error enabling Enhanced Networking on %s: %s", *instance.InstanceId, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt diff --git a/builder/amazon/ebs/step_stop_instance.go b/builder/amazon/ebs/step_stop_instance.go index 967e5bbf4..77bcd2d7b 100644 --- a/builder/amazon/ebs/step_stop_instance.go +++ b/builder/amazon/ebs/step_stop_instance.go @@ -26,7 +26,7 @@ func (s *stepStopInstance) Run(state multistep.StateBag) multistep.StepAction { // Stop the instance so we can create an AMI from it ui.Say("Stopping the source instance...") _, err := ec2conn.StopInstances(&ec2.StopInstancesInput{ - InstanceIDs: []*string{instance.InstanceID}, + InstanceIds: []*string{instance.InstanceId}, }) if err != nil { err := fmt.Errorf("Error stopping instance: %s", err) @@ -40,7 +40,7 @@ func (s *stepStopInstance) Run(state multistep.StateBag) multistep.StepAction { stateChange := awscommon.StateChangeConf{ Pending: []string{"running", "stopping"}, Target: "stopped", - Refresh: awscommon.InstanceStateRefreshFunc(ec2conn, *instance.InstanceID), + Refresh: awscommon.InstanceStateRefreshFunc(ec2conn, *instance.InstanceId), StepState: state, } _, err = awscommon.WaitForState(&stateChange) diff --git a/builder/amazon/ebs/tags_acc_test.go b/builder/amazon/ebs/tags_acc_test.go index 606bb89ee..3027eff7f 100644 --- a/builder/amazon/ebs/tags_acc_test.go +++ b/builder/amazon/ebs/tags_acc_test.go @@ -40,7 +40,7 @@ func checkTags() builderT.TestCheckFunc { // describe the image, get block devices with a snapshot ec2conn, _ := testEC2Conn() imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ - ImageIDs: []*string{aws.String(artifact.Amis["us-east-1"])}, + ImageIds: []*string{aws.String(artifact.Amis["us-east-1"])}, }) if err != nil { @@ -56,14 +56,14 @@ func checkTags() builderT.TestCheckFunc { // Check only those with a Snapshot ID, i.e. not Ephemeral var snapshots []*string for _, device := range image.BlockDeviceMappings { - if device.EBS != nil && device.EBS.SnapshotID != nil { - snapshots = append(snapshots, device.EBS.SnapshotID) + if device.Ebs != nil && device.Ebs.SnapshotId != nil { + snapshots = append(snapshots, device.Ebs.SnapshotId) } } // grab matching snapshot info resp, err := ec2conn.DescribeSnapshots(&ec2.DescribeSnapshotsInput{ - SnapshotIDs: snapshots, + SnapshotIds: snapshots, }) if err != nil { diff --git a/builder/amazon/instance/step_register_ami.go b/builder/amazon/instance/step_register_ami.go index dc76331f8..fe6f785b8 100644 --- a/builder/amazon/instance/step_register_ami.go +++ b/builder/amazon/instance/step_register_ami.go @@ -31,7 +31,7 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { // Set SriovNetSupport to "simple". See http://goo.gl/icuXh5 if config.AMIEnhancedNetworking { - registerOpts.SRIOVNetSupport = aws.String("simple") + registerOpts.SriovNetSupport = aws.String("simple") } registerResp, err := ec2conn.RegisterImage(registerOpts) @@ -42,16 +42,16 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction { } // Set the AMI ID in the state - ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageID)) + ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageId)) amis := make(map[string]string) - amis[*ec2conn.Config.Region] = *registerResp.ImageID + amis[*ec2conn.Config.Region] = *registerResp.ImageId state.Put("amis", amis) // Wait for the image to become ready stateChange := awscommon.StateChangeConf{ Pending: []string{"pending"}, Target: "available", - Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *registerResp.ImageID), + Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *registerResp.ImageId), StepState: state, } From f0fdf865a19790530853e9b15ef6b149e37ebc81 Mon Sep 17 00:00:00 2001 From: Vasiliy Tolstov Date: Tue, 18 Aug 2015 10:53:33 +0000 Subject: [PATCH 763/956] allow wildcards in artifice files Signed-off-by: Vasiliy Tolstov --- post-processor/artifice/artifact.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/post-processor/artifice/artifact.go b/post-processor/artifice/artifact.go index cb344b8e2..de55e46e6 100644 --- a/post-processor/artifice/artifact.go +++ b/post-processor/artifice/artifact.go @@ -3,6 +3,7 @@ package artifice import ( "fmt" "os" + "path/filepath" "strings" ) @@ -13,13 +14,18 @@ type Artifact struct { } func NewArtifact(files []string) (*Artifact, error) { + artifact := &Artifact{} for _, f := range files { - if _, err := os.Stat(f); err != nil { + globfiles, err := filepath.Glob(f) + if err != nil { return nil, err } - } - artifact := &Artifact{ - files: files, + for _, gf := range globfiles { + if _, err := os.Stat(gf); err != nil { + return nil, err + } + artifact.files = append(artifact.files, gf) + } } return artifact, nil } From 32978a5109ae348de759b6dfab055567b69921b1 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 18 Aug 2015 13:48:18 -0700 Subject: [PATCH 764/956] Add an explicit error message when there is no output file specified --- builder/docker/step_export.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/builder/docker/step_export.go b/builder/docker/step_export.go index aa949b610..6fbdcb63d 100644 --- a/builder/docker/step_export.go +++ b/builder/docker/step_export.go @@ -2,9 +2,10 @@ package docker import ( "fmt" + "os" + "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" - "os" ) // StepExport exports the container to a flat tar file. @@ -17,6 +18,14 @@ func (s *StepExport) Run(state multistep.StateBag) multistep.StepAction { containerId := state.Get("container_id").(string) ui := state.Get("ui").(packer.Ui) + // We should catch this in validation, but guard anyway + if config.ExportPath == "" { + err := fmt.Errorf("No output file specified, we can't export anything") + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + // Open the file that we're going to write to f, err := os.Create(config.ExportPath) if err != nil { From 750a9c61de5133f627f6ea5051479f1eecf4452f Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 18 Aug 2015 14:38:32 -0700 Subject: [PATCH 765/956] Added discard option for docker builder, also reorganized some error messages --- builder/docker/builder.go | 16 ++++++++++++---- builder/docker/config.go | 28 ++++++++++++++++++++-------- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/builder/docker/builder.go b/builder/docker/builder.go index 89880aacc..702d530ce 100644 --- a/builder/docker/builder.go +++ b/builder/docker/builder.go @@ -9,8 +9,10 @@ import ( "github.com/mitchellh/packer/packer" ) -const BuilderId = "packer.docker" -const BuilderIdImport = "packer.post-processor.docker-import" +const ( + BuilderId = "packer.docker" + BuilderIdImport = "packer.post-processor.docker-import" +) type Builder struct { config *Config @@ -54,10 +56,16 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &common.StepProvision{}, } - if b.config.Commit { + if b.config.Discard { + log.Print("[DEBUG] Container will be discarded") + } else if b.config.Commit { + log.Print("[DEBUG] Container will be committed") steps = append(steps, new(StepCommit)) - } else { + } else if b.config.ExportPath != "" { + log.Printf("[DEBUG] Container will be exported to %s", b.config.ExportPath) steps = append(steps, new(StepExport)) + } else { + return nil, ErrArtifactNotUsed } // Setup the state bag and initial state for the steps diff --git a/builder/docker/config.go b/builder/docker/config.go index 36322080c..dc205eaf0 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -12,23 +12,33 @@ import ( "github.com/mitchellh/packer/template/interpolate" ) +var ( + ErrArtifactNotUsed = fmt.Errorf("No instructions given for handling the artifact; expected commit, discard, or export_path") + ErrArtifactUseConflict = fmt.Errorf("Cannot specify more than one of commit, discard, and export_path") + ErrExportPathNotFile = fmt.Errorf("export_path must be a file, not a directory") + ErrImageNotSpecified = fmt.Errorf("Image must be specified") +) + type Config struct { common.PackerConfig `mapstructure:",squash"` Comm communicator.Config `mapstructure:",squash"` Commit bool + Discard bool ExportPath string `mapstructure:"export_path"` Image string + Pty bool Pull bool RunCommand []string `mapstructure:"run_command"` Volumes map[string]string + // This is used to login to dockerhub to pull a private base container + // For pushing to dockerhub, see the docker post-processors Login bool LoginEmail string `mapstructure:"login_email"` - LoginUsername string `mapstructure:"login_username"` LoginPassword string `mapstructure:"login_password"` LoginServer string `mapstructure:"login_server"` - Pty bool + LoginUsername string `mapstructure:"login_username"` ctx interpolate.Context } @@ -84,18 +94,20 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { } if c.Image == "" { errs = packer.MultiErrorAppend(errs, - fmt.Errorf("image must be specified")) + ErrImageNotSpecified) } - if c.ExportPath != "" && c.Commit { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("both commit and export_path cannot be set")) + if (c.ExportPath != "" && c.Commit) || (c.ExportPath != "" && c.Discard) || (c.Commit && c.Discard) { + errs = packer.MultiErrorAppend(errs, ErrArtifactUseConflict) + } + + if c.ExportPath == "" && !c.Commit && !c.Discard { + errs = packer.MultiErrorAppend(errs, ErrArtifactNotUsed) } if c.ExportPath != "" { if fi, err := os.Stat(c.ExportPath); err == nil && fi.IsDir() { - errs = packer.MultiErrorAppend(errs, fmt.Errorf( - "export_path must be a file, not a directory")) + errs = packer.MultiErrorAppend(errs, ErrExportPathNotFile) } } From 1b1bd19c20e8e6daa4fd3eb2a2802a44efe15a25 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 18 Aug 2015 16:47:12 -0700 Subject: [PATCH 766/956] Reformat code so we can grep for this more easily --- builder/docker/config.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/builder/docker/config.go b/builder/docker/config.go index dc205eaf0..7c3565008 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -63,11 +63,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { // Defaults if len(c.RunCommand) == 0 { - c.RunCommand = []string{ - "-d", "-i", "-t", - "{{.Image}}", - "/bin/bash", - } + c.RunCommand = []string{"-d", "-i", "-t", "{{.Image}}", "/bin/bash"} } // Default Pull if it wasn't set From ffef8efaf4fa20f5d183e9be7cf2cc68846bc2e4 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 18 Aug 2015 16:47:58 -0700 Subject: [PATCH 767/956] Added docs for discard; clarify mutual exclusivity between commit, discard, and export_path --- website/source/docs/builders/docker.html.markdown | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/website/source/docs/builders/docker.html.markdown b/website/source/docs/builders/docker.html.markdown index 76b1d4057..48489380b 100644 --- a/website/source/docs/builders/docker.html.markdown +++ b/website/source/docs/builders/docker.html.markdown @@ -68,11 +68,17 @@ builder. ### Required: +You must specify (only) one of `commit`, `discard`, or `export_path`. + - `commit` (boolean) - If true, the container will be committed to an image - rather than exported. This cannot be set if `export_path` is set. + rather than exported. + +- `discard` (boolean) - Throw away the container when the build is complete. + This is useful for the [artifice + post-processor](https://packer.io/docs/post-processors/artifice.html). - `export_path` (string) - The path where the final container will be exported - as a tar file. This cannot be set if `commit` is set to true. + as a tar file. - `image` (string) - The base image for the Docker container that will be started. This image will be pulled from the Docker registry if it doesn't From 746b9a839198ea18690fbccd0e289305bbc2a85a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 18 Aug 2015 16:55:29 -0700 Subject: [PATCH 768/956] Formatting tweaks --- builder/docker/config.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/builder/docker/config.go b/builder/docker/config.go index 7c3565008..c45f6baf5 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -32,8 +32,8 @@ type Config struct { RunCommand []string `mapstructure:"run_command"` Volumes map[string]string - // This is used to login to dockerhub to pull a private base container - // For pushing to dockerhub, see the docker post-processors + // This is used to login to dockerhub to pull a private base container. For + // pushing to dockerhub, see the docker post-processors Login bool LoginEmail string `mapstructure:"login_email"` LoginPassword string `mapstructure:"login_password"` @@ -89,8 +89,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { errs = packer.MultiErrorAppend(errs, es...) } if c.Image == "" { - errs = packer.MultiErrorAppend(errs, - ErrImageNotSpecified) + errs = packer.MultiErrorAppend(errs, ErrImageNotSpecified) } if (c.ExportPath != "" && c.Commit) || (c.ExportPath != "" && c.Discard) || (c.Commit && c.Discard) { From d367b8b521bbfaaca92dc4220afeea3c0d240732 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 18 Aug 2015 17:43:59 -0700 Subject: [PATCH 769/956] If is customized no file extension is added. The documentation has been updated to reflect this. --- website/source/docs/builders/qemu.html.markdown | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index 651c69122..ab3935329 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -247,8 +247,10 @@ builder and not otherwise conflicting with the qemuargs): host port. - `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for - the new virtual machine, without the file extension. By default this is - "packer-BUILDNAME", where "BUILDNAME" is the name of the build. + the new virtual machine. By default this is "packer-BUILDNAME.FORMAT", where + `BUILDNAME` is the name of the build and `FORMAT` matches the `format` + option, above. If you customize this you must specify the file extension or + none will be used. - `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to use for the VNC port on the host machine which is forwarded to the VNC From 55fadd96bccc3d762ca93cbc4d12b27f6117e196 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 18 Aug 2015 17:44:17 -0700 Subject: [PATCH 770/956] By default, Qemu will now have an extension added based on the file format. This is a change to more closely match behavior from 0.7.5. --- builder/qemu/builder.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index 4ac22b59b..39984d68b 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -201,14 +201,15 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.VNCPortMax = 6000 } - if b.config.VMName == "" { - b.config.VMName = fmt.Sprintf("packer-%s", b.config.PackerBuildName) - } - if b.config.Format == "" { b.config.Format = "qcow2" } + if b.config.VMName == "" { + b.config.VMName = fmt.Sprintf("packer-%s.%s", + b.config.PackerBuildName, b.config.Format) + } + if b.config.FloppyFiles == nil { b.config.FloppyFiles = make([]string, 0) } From 8d8e320e8c0694f7e1dc9ee53cd08dce9428ece9 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 19 Aug 2015 13:01:55 -0700 Subject: [PATCH 771/956] Added docs for checkpoint --- .../source/docs/other/environmental-variables.html.markdown | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/website/source/docs/other/environmental-variables.html.markdown b/website/source/docs/other/environmental-variables.html.markdown index 8827ea5d9..ea18747b7 100644 --- a/website/source/docs/other/environmental-variables.html.markdown +++ b/website/source/docs/other/environmental-variables.html.markdown @@ -34,3 +34,8 @@ each can be found below: communication with plugins, since plugin communication happens over TCP connections on your local host. The default is 10,000. See the [core configuration page](/docs/other/core-configuration.html). + +- `CHECKPOINT_DISABLE` - When Packer is invoked it sometimes calls out to + [checkpoint.hashicorp.com](https://checkpoint.hashicorp.com/) to look for + new versions of Packer. If you want to disable this for security or privacy + reasons, you can set this environment variable to `1`. From 5503b7f496c3e323bf0d8b9ebcc435a0072ec136 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 19 Aug 2015 13:12:16 -0700 Subject: [PATCH 772/956] Don't export errors --- builder/docker/builder.go | 2 +- builder/docker/config.go | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/builder/docker/builder.go b/builder/docker/builder.go index 702d530ce..cfc5bf423 100644 --- a/builder/docker/builder.go +++ b/builder/docker/builder.go @@ -65,7 +65,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe log.Printf("[DEBUG] Container will be exported to %s", b.config.ExportPath) steps = append(steps, new(StepExport)) } else { - return nil, ErrArtifactNotUsed + return nil, errArtifactNotUsed } // Setup the state bag and initial state for the steps diff --git a/builder/docker/config.go b/builder/docker/config.go index c45f6baf5..ad8a5634b 100644 --- a/builder/docker/config.go +++ b/builder/docker/config.go @@ -13,10 +13,10 @@ import ( ) var ( - ErrArtifactNotUsed = fmt.Errorf("No instructions given for handling the artifact; expected commit, discard, or export_path") - ErrArtifactUseConflict = fmt.Errorf("Cannot specify more than one of commit, discard, and export_path") - ErrExportPathNotFile = fmt.Errorf("export_path must be a file, not a directory") - ErrImageNotSpecified = fmt.Errorf("Image must be specified") + errArtifactNotUsed = fmt.Errorf("No instructions given for handling the artifact; expected commit, discard, or export_path") + errArtifactUseConflict = fmt.Errorf("Cannot specify more than one of commit, discard, and export_path") + errExportPathNotFile = fmt.Errorf("export_path must be a file, not a directory") + errImageNotSpecified = fmt.Errorf("Image must be specified") ) type Config struct { @@ -89,20 +89,20 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { errs = packer.MultiErrorAppend(errs, es...) } if c.Image == "" { - errs = packer.MultiErrorAppend(errs, ErrImageNotSpecified) + errs = packer.MultiErrorAppend(errs, errImageNotSpecified) } if (c.ExportPath != "" && c.Commit) || (c.ExportPath != "" && c.Discard) || (c.Commit && c.Discard) { - errs = packer.MultiErrorAppend(errs, ErrArtifactUseConflict) + errs = packer.MultiErrorAppend(errs, errArtifactUseConflict) } if c.ExportPath == "" && !c.Commit && !c.Discard { - errs = packer.MultiErrorAppend(errs, ErrArtifactNotUsed) + errs = packer.MultiErrorAppend(errs, errArtifactNotUsed) } if c.ExportPath != "" { if fi, err := os.Stat(c.ExportPath); err == nil && fi.IsDir() { - errs = packer.MultiErrorAppend(errs, ErrExportPathNotFile) + errs = packer.MultiErrorAppend(errs, errExportPathNotFile) } } From 1764238c0b3c9aa8ad42268890e4acc3cc1670d0 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 19 Aug 2015 13:15:23 -0700 Subject: [PATCH 773/956] Added [DEBUG] prefix to log messages --- common/download.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/common/download.go b/common/download.go index 5f213968b..e4b4dc2e0 100644 --- a/common/download.go +++ b/common/download.go @@ -101,7 +101,7 @@ func (d *DownloadClient) Cancel() { func (d *DownloadClient) Get() (string, error) { // If we already have the file and it matches, then just return the target path. if verify, _ := d.VerifyChecksum(d.config.TargetPath); verify { - log.Println("Initial checksum matched, no download needed.") + log.Println("[DEBUG] Initial checksum matched, no download needed.") return d.config.TargetPath, nil } @@ -120,7 +120,7 @@ func (d *DownloadClient) Get() (string, error) { // This is a special case where we use a source file that already exists // locally and we don't make a copy. Normally we would copy or download. finalPath = url.Path - log.Printf("Using local file: %s", finalPath) + log.Printf("[DEBUG] Using local file: %s", finalPath) // Remove forward slash on absolute Windows file URLs before processing if runtime.GOOS == "windows" && len(finalPath) > 0 && finalPath[0] == '/' { @@ -143,7 +143,7 @@ func (d *DownloadClient) Get() (string, error) { return "", err } - log.Printf("Downloading: %s", url.String()) + log.Printf("[DEBUG] Downloading: %s", url.String()) err = d.downloader.Download(f, url) f.Close() if err != nil { From f301a6454dc9a6346c645d4d4da3b30b2fbcfe81 Mon Sep 17 00:00:00 2001 From: Olivier Tremblay Date: Thu, 20 Aug 2015 07:37:24 -0400 Subject: [PATCH 774/956] Added documentation, renamed config param to iso_target_path --- builder/parallels/iso/builder.go | 2 +- builder/qemu/builder.go | 2 +- builder/virtualbox/iso/builder.go | 2 +- builder/vmware/iso/builder.go | 2 +- website/source/docs/builders/parallels-iso.html.markdown | 4 ++++ website/source/docs/builders/qemu.html.markdown | 4 ++++ website/source/docs/builders/virtualbox-iso.html.markdown | 4 ++++ website/source/docs/builders/vmware-iso.html.markdown | 4 ++++ 8 files changed, 20 insertions(+), 4 deletions(-) diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index e6e72d7fb..72706748a 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -46,7 +46,7 @@ type Config struct { ISOChecksumType string `mapstructure:"iso_checksum_type"` ISOUrls []string `mapstructure:"iso_urls"` VMName string `mapstructure:"vm_name"` - TargetPath string `mapstructure:"target_path"` + TargetPath string `mapstructure:"iso_target_path"` RawSingleISOUrl string `mapstructure:"iso_url"` diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index 8369495d8..76b90421a 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -105,7 +105,7 @@ type Config struct { ShutdownCommand string `mapstructure:"shutdown_command"` SSHHostPortMin uint `mapstructure:"ssh_host_port_min"` SSHHostPortMax uint `mapstructure:"ssh_host_port_max"` - TargetPath string `mapstructure:"target_path"` + TargetPath string `mapstructure:"iso_target_path"` VNCPortMin uint `mapstructure:"vnc_port_min"` VNCPortMax uint `mapstructure:"vnc_port_max"` VMName string `mapstructure:"vm_name"` diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go index 8a19678c0..fcc062ecb 100644 --- a/builder/virtualbox/iso/builder.go +++ b/builder/virtualbox/iso/builder.go @@ -49,7 +49,7 @@ type Config struct { ISOChecksumType string `mapstructure:"iso_checksum_type"` ISOInterface string `mapstructure:"iso_interface"` ISOUrls []string `mapstructure:"iso_urls"` - TargetPath string `mapstructure:"target_path"` + TargetPath string `mapstructure:"iso_target_path"` VMName string `mapstructure:"vm_name"` RawSingleISOUrl string `mapstructure:"iso_url"` diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index c35719dc9..9f7d4cb1f 100755 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -49,7 +49,7 @@ type Config struct { VMName string `mapstructure:"vm_name"` BootCommand []string `mapstructure:"boot_command"` SkipCompaction bool `mapstructure:"skip_compaction"` - TargetPath string `mapstructure:"target_path"` + TargetPath string `mapstructure:"iso_target_path"` VMXTemplatePath string `mapstructure:"vmx_template_path"` VMXDiskTemplatePath string `mapstructure:"vmx_disk_template_path"` diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index 76278ec2b..766404ade 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -138,6 +138,10 @@ builder. to force the HTTP server to be on one port, make this minimum and maximum port the same. By default the values are 8000 and 9000, respectively. +- `iso_target_path` (string) - The path where the iso should be saved after + download. By default will go in the packer cache, with a hash of the + original filename as its name. + - `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer will try these in order. If anything goes wrong attempting to download or while downloading a single URL, it will move on to the next. All diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index 651c69122..7f2226aa5 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -168,6 +168,10 @@ builder. to force the HTTP server to be on one port, make this minimum and maximum port the same. By default the values are 8000 and 9000, respectively. +- `iso_target_path` (string) - The path where the iso should be saved after + download. By default will go in the packer cache, with a hash of the + original filename as its name. + - `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer will try these in order. If anything goes wrong attempting to download or while downloading a single URL, it will move on to the next. All diff --git a/website/source/docs/builders/virtualbox-iso.html.markdown b/website/source/docs/builders/virtualbox-iso.html.markdown index 61e5d3e16..a3421b630 100644 --- a/website/source/docs/builders/virtualbox-iso.html.markdown +++ b/website/source/docs/builders/virtualbox-iso.html.markdown @@ -170,6 +170,10 @@ builder. to, defaults to "ide". When set to "sata", the drive is attached to an AHCI SATA controller. +- `iso_target_path` (string) - The path where the iso should be saved after + download. By default will go in the packer cache, with a hash of the + original filename as its name. + - `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer will try these in order. If anything goes wrong attempting to download or while downloading a single URL, it will move on to the next. All diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index 594d77cee..e2140b5d7 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -148,6 +148,10 @@ builder. to force the HTTP server to be on one port, make this minimum and maximum port the same. By default the values are 8000 and 9000, respectively. +- `iso_target_path` (string) - The path where the iso should be saved after + download. By default will go in the packer cache, with a hash of the + original filename as its name. + - `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer will try these in order. If anything goes wrong attempting to download or while downloading a single URL, it will move on to the next. All From 839f051d936a632fa5c74a3ae8a871757be1c7e0 Mon Sep 17 00:00:00 2001 From: Olivier Tremblay Date: Thu, 20 Aug 2015 07:41:02 -0400 Subject: [PATCH 775/956] undo portions of general stupidity I have done. --- common/packer_config.go | 11 +++++------ common/step_download.go | 2 +- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/common/packer_config.go b/common/packer_config.go index 67a8a8efb..2ef86e582 100644 --- a/common/packer_config.go +++ b/common/packer_config.go @@ -4,10 +4,9 @@ package common // are sent by packer, properly tagged already so mapstructure can load // them. Embed this structure into your configuration class to get it. type PackerConfig struct { - PackerBuildName string `mapstructure:"packer_build_name"` - PackerBuilderType string `mapstructure:"packer_builder_type"` - PackerDebug bool `mapstructure:"packer_debug"` - PackerForce bool `mapstructure:"packer_force"` - PackerUserVars map[string]string `mapstructure:"packer_user_variables"` - PackerIsoTargetPath string `mapstructure:"packer_iso_target_path"` + PackerBuildName string `mapstructure:"packer_build_name"` + PackerBuilderType string `mapstructure:"packer_builder_type"` + PackerDebug bool `mapstructure:"packer_debug"` + PackerForce bool `mapstructure:"packer_force"` + PackerUserVars map[string]string `mapstructure:"packer_user_variables"` } diff --git a/common/step_download.go b/common/step_download.go index 458117bac..b8bd60b5e 100644 --- a/common/step_download.go +++ b/common/step_download.go @@ -71,7 +71,7 @@ func (s *StepDownload) Run(state multistep.StateBag) multistep.StepAction { // if we force a certain extension we hash the URL and add // the extension to force it. cacheKey := url - if s.Extension != "" { //HERE. + if s.Extension != "" { hash := sha1.Sum([]byte(url)) cacheKey = fmt.Sprintf( "%s.%s", hex.EncodeToString(hash[:]), s.Extension) From d13647ee0f9ed8726781a01b7772f3e58c2d149a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 20 Aug 2015 14:03:16 -0700 Subject: [PATCH 776/956] Updated test for qemu VMName. Previously (0.7.5) a file extension was automatically added to the VMname but this logic is not exposed for testing. --- builder/qemu/builder_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/builder/qemu/builder_test.go b/builder/qemu/builder_test.go index 84d1d40c3..5e96d2fa7 100644 --- a/builder/qemu/builder_test.go +++ b/builder/qemu/builder_test.go @@ -1,11 +1,12 @@ package qemu import ( - "github.com/mitchellh/packer/packer" "io/ioutil" "os" "reflect" "testing" + + "github.com/mitchellh/packer/packer" ) var testPem = ` @@ -83,7 +84,7 @@ func TestBuilderPrepare_Defaults(t *testing.T) { t.Errorf("bad ssh port: %d", b.config.Comm.SSHPort) } - if b.config.VMName != "packer-foo" { + if b.config.VMName != "packer-foo.qcow2" { t.Errorf("bad vm name: %s", b.config.VMName) } From 1830c81eab2f3327bd0ab5b43f09abbfc6a4a140 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 20 Aug 2015 14:15:52 -0700 Subject: [PATCH 777/956] Correct and expand docker config tests for commit, discard, export_path --- builder/docker/config_test.go | 38 +++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/builder/docker/config_test.go b/builder/docker/config_test.go index 907222b4f..442c6ffe2 100644 --- a/builder/docker/config_test.go +++ b/builder/docker/config_test.go @@ -52,10 +52,11 @@ func TestConfigPrepare_exportPath(t *testing.T) { raw := testConfig() - // No export path + // No export path. This is invalid. Previously this would not error during + // validation and as a result the failure would happen at build time. delete(raw, "export_path") _, warns, errs := NewConfig(raw) - testConfigOk(t, warns, errs) + testConfigErr(t, warns, errs) // Good export path raw["export_path"] = "good" @@ -70,14 +71,39 @@ func TestConfigPrepare_exportPath(t *testing.T) { func TestConfigPrepare_exportPathAndCommit(t *testing.T) { raw := testConfig() - raw["commit"] = true - // No export path + // Export but no commit (explicit default) + raw["commit"] = false _, warns, errs := NewConfig(raw) + testConfigOk(t, warns, errs) + + // Commit AND export specified (invalid) + raw["commit"] = true + _, warns, errs = NewConfig(raw) testConfigErr(t, warns, errs) - // No commit - raw["commit"] = false + // Commit but no export + delete(raw, "export_path") + _, warns, errs = NewConfig(raw) + testConfigOk(t, warns, errs) +} + +func TestConfigPrepare_exportDiscard(t *testing.T) { + raw := testConfig() + + // Export but no discard (explicit default) + raw["discard"] = false + _, warns, errs := NewConfig(raw) + testConfigOk(t, warns, errs) + + // Discard AND export (invalid) + raw["discard"] = true + _, warns, errs = NewConfig(raw) + testConfigErr(t, warns, errs) + + // Discard but no export + raw["discard"] = true + delete(raw, "export_path") _, warns, errs = NewConfig(raw) testConfigOk(t, warns, errs) } From 14b39c5b513a3d32ba8a82d23cf948cb3385d755 Mon Sep 17 00:00:00 2001 From: Tim Smith Date: Thu, 20 Aug 2015 16:26:18 -0700 Subject: [PATCH 778/956] Opscode -> Chef Basic company name update --- .../source/docs/provisioners/chef-client.html.markdown | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/source/docs/provisioners/chef-client.html.markdown b/website/source/docs/provisioners/chef-client.html.markdown index aca1a2717..e655a4622 100644 --- a/website/source/docs/provisioners/chef-client.html.markdown +++ b/website/source/docs/provisioners/chef-client.html.markdown @@ -12,12 +12,12 @@ page_title: 'Chef-Client Provisioner' Type: `chef-client` The Chef Client Packer provisioner installs and configures software on machines -built by Packer using [chef-client](http://docs.opscode.com/chef_client.html). +built by Packer using [chef-client](https://docs.chef.io/chef_client.html). Packer configures a Chef client to talk to a remote Chef Server to provision the machine. The provisioner will even install Chef onto your machine if it isn't already -installed, using the official Chef installers provided by Opscode. +installed, using the official Chef installers provided by Chef. ## Basic Example @@ -71,7 +71,7 @@ configuration is actually required. then the sudo will be omitted. - `run_list` (array of strings) - The [run - list](http://docs.opscode.com/essentials_node_object_run_lists.html) + list](http://docs.chef.io/essentials_node_object_run_lists.html) for Chef. By default this is empty, and will use the run list sent down by the Chef Server. @@ -84,7 +84,7 @@ configuration is actually required. Chef server after it is done running. By default, this is false. - `skip_install` (boolean) - If true, Chef will not automatically be installed - on the machine using the Opscode omnibus installers. + on the machine using the Chef omnibus installers. - `staging_directory` (string) - This is the directory where all the configuration of Chef by Packer will be placed. By default this @@ -168,7 +168,7 @@ readability) to install Chef. This command can be customized if you want to install Chef in another way. ``` {.text} -curl -L https://www.opscode.com/chef/install.sh | \ +curl -L https://www.chef.io/chef/install.sh | \ {{if .Sudo}}sudo{{end}} bash ``` From 0006cce41b75a7b0ea6a0f3bbc5e20e1ab33c6c8 Mon Sep 17 00:00:00 2001 From: Gonzalo Peci Date: Mon, 10 Aug 2015 16:55:59 +1200 Subject: [PATCH 779/956] When using MinGW gopath uses ; as separators, if we use : it strips everything after the driver letter. EG: C:\GO_Working\ gets stripped to C and makes ```make dev``` and ```make``` fail --- scripts/build.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/scripts/build.sh b/scripts/build.sh index c265b88c2..364e221dd 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -62,7 +62,16 @@ case $(uname) in ;; esac OLDIFS=$IFS -IFS=: MAIN_GOPATH=($GOPATH) +IFS=: +case $(uname) in + MINGW*) + IFS=";" + ;; + MSYS*) + IFS=";" + ;; +esac +MAIN_GOPATH=($GOPATH) IFS=$OLDIFS # Copy our OS/Arch to the bin/ directory From c8d1c650f0d5563ed4b5a24a50777d1d0755c0ac Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 21 Aug 2015 16:45:47 -0700 Subject: [PATCH 780/956] Revert "Updated test for qemu VMName. Previously (0.7.5) a file extension was automatically added to the VMname but this logic is not exposed for testing." This reverts commit d13647ee0f9ed8726781a01b7772f3e58c2d149a. --- builder/qemu/builder_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/builder/qemu/builder_test.go b/builder/qemu/builder_test.go index 5e96d2fa7..84d1d40c3 100644 --- a/builder/qemu/builder_test.go +++ b/builder/qemu/builder_test.go @@ -1,12 +1,11 @@ package qemu import ( + "github.com/mitchellh/packer/packer" "io/ioutil" "os" "reflect" "testing" - - "github.com/mitchellh/packer/packer" ) var testPem = ` @@ -84,7 +83,7 @@ func TestBuilderPrepare_Defaults(t *testing.T) { t.Errorf("bad ssh port: %d", b.config.Comm.SSHPort) } - if b.config.VMName != "packer-foo.qcow2" { + if b.config.VMName != "packer-foo" { t.Errorf("bad vm name: %s", b.config.VMName) } From bf733b75d5554cfc4ed2236ccd0e034af64f73b7 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 21 Aug 2015 16:46:29 -0700 Subject: [PATCH 781/956] Revert "By default, Qemu will now have an extension added based on the file format. This is a change to more closely match behavior from 0.7.5." This reverts commit 55fadd96bccc3d762ca93cbc4d12b27f6117e196. --- builder/qemu/builder.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index 39984d68b..4ac22b59b 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -201,13 +201,12 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.VNCPortMax = 6000 } - if b.config.Format == "" { - b.config.Format = "qcow2" + if b.config.VMName == "" { + b.config.VMName = fmt.Sprintf("packer-%s", b.config.PackerBuildName) } - if b.config.VMName == "" { - b.config.VMName = fmt.Sprintf("packer-%s.%s", - b.config.PackerBuildName, b.config.Format) + if b.config.Format == "" { + b.config.Format = "qcow2" } if b.config.FloppyFiles == nil { From 50d44ad7407dee712d2d482ae5b1508a7693fe1a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 21 Aug 2015 17:08:14 -0700 Subject: [PATCH 782/956] Updated changelog --- CHANGELOG.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 90529ef1c..733247805 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,15 +1,21 @@ -## (Unreleased) +## 0.8.6 (Unreleased) IMPROVEMENTS: * builder/docker: Now supports Download so it can be used with the file provisioner to download a file from a container. [GH-2585] + * builder/docker: Now verifies that the artifact will be used before the build + starts, unless the `discard` option is specified. This prevent failures + after the build completes. [GH-2626] + * post-processor/artifice: Now supports glob-like syntax for filenames [GH-2619] * post-processor/vagrant: Like the compress post-processor, vagrant now uses a parallel gzip algorithm to compress vagrant boxes. [GH-2590] BUG FIXES: - * builded/parallels: Fix interpolation in parallels_tools_guest_path [GH-2543] + * core: When `iso_url` is a local file and the checksum is invalid, the local + file will no longer be deleted. [GH-2603] + * builder/parallels: Fix interpolation in `parallels_tools_guest_path` [GH-2543] ## 0.8.5 (Aug 10, 2015) From f725cf1a4b86b2df891efdb1baa974f6200dc8d8 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 21 Aug 2015 17:10:56 -0700 Subject: [PATCH 783/956] Correct docs for qemu vm_name --- website/source/docs/builders/qemu.html.markdown | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index ab3935329..9288bbcab 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -247,10 +247,9 @@ builder and not otherwise conflicting with the qemuargs): host port. - `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for - the new virtual machine. By default this is "packer-BUILDNAME.FORMAT", where - `BUILDNAME` is the name of the build and `FORMAT` matches the `format` - option, above. If you customize this you must specify the file extension or - none will be used. + the new virtual machine. By default this is "packer-BUILDNAME", where + `BUILDNAME` is the name of the build. Currently, no file extension will be + used unless it is specified in this option. - `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to use for the VNC port on the host machine which is forwarded to the VNC From 0a79546cf92234017f2e5a1adbab0ba2db6f569f Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 21 Aug 2015 17:17:56 -0700 Subject: [PATCH 784/956] Update dependency to an SSL-enabled path; this was migrated from launchpad to github --- builder/parallels/common/driver_9.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/parallels/common/driver_9.go b/builder/parallels/common/driver_9.go index c577151dc..f742ad635 100644 --- a/builder/parallels/common/driver_9.go +++ b/builder/parallels/common/driver_9.go @@ -13,7 +13,7 @@ import ( "strings" "time" - "github.com/going/toolkit/xmlpath" + "gopkg.in/xmlpath.v2" ) type Parallels9Driver struct { From 938f2178d765002728c2165854c11ae5e8093366 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 6 Aug 2015 23:45:39 -0700 Subject: [PATCH 785/956] Overhaul the Makefile - Fix updatedeps reverting to master, which causes Travis CI to produce invalid results for pull-request builds. The makefile attempts to detect this change and checkout the correct branch if it happens. - Clean up the code style and failure messaging. - Add / update proxy targets for common workflows: default, deps, ci, release --- .travis.yml | 13 ++------- Makefile | 76 +++++++++++++++++++++++++++++++----------------- scripts/build.sh | 4 +-- 3 files changed, 54 insertions(+), 39 deletions(-) diff --git a/.travis.yml b/.travis.yml index 00f3361b9..5880a73a4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,25 +4,16 @@ language: go go: - 1.4 + - 1.5 - tip -install: make updatedeps - script: - - GOMAXPROCS=2 make test - #- go test -race ./... + - GOMAXPROCS=2 make ci branches: only: - master -notifications: - irc: - channels: - - "irc.freenode.org#packer-tool" - skip_join: true - use_notice: true - matrix: fast_finish: true allow_failures: diff --git a/Makefile b/Makefile index 0ed426520..7900a2830 100644 --- a/Makefile +++ b/Makefile @@ -1,12 +1,31 @@ TEST?=./... +# Get the current full sha from git +GITSHA:=$(shell git rev-parse HEAD) +# Get the current local branch name from git (if we can, this may be blank) +GITBRANCH:=$(shell git symbolic-ref --short HEAD 2>/dev/null) -default: test vet dev +default: test dev -bin: +ci: deps test testrace + +release: updatedeps test bin + +bin: deps + @grep 'const VersionPrerelease = ""' version.go > /dev/null ; if [ $$? -ne 0 ]; then \ + echo "ERROR: You must remove prerelease tags from version.go prior to release."; \ + exit 1; \ + fi @sh -c "$(CURDIR)/scripts/build.sh" -dev: - @TF_DEV=1 sh -c "$(CURDIR)/scripts/build.sh" +deps: + go get -v -d ./... + +dev: deps + @grep 'const VersionPrerelease = ""' version.go > /dev/null ; if [ $$? -eq 0 ]; then \ + echo "ERROR: You must add prerelease tags to version.go prior to making a dev build."; \ + exit 1; \ + fi + @PACKER_DEV=1 sh -c "$(CURDIR)/scripts/build.sh" # generate runs `go generate` to build the dynamically generated # source files. @@ -14,23 +33,33 @@ generate: go generate ./... test: - @echo "Running tests on:"; git symbolic-ref HEAD; git rev-parse HEAD - go test $(TEST) $(TESTARGS) -timeout=10s - @$(MAKE) vet + go test $(TEST) $(TESTARGS) -timeout=15s + @go vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ + go get golang.org/x/tools/cmd/vet; \ + fi + @go vet $(TEST) ; if [ $$? -eq 1 ]; then \ + echo "ERROR: Vet found problems in the code."; \ + exit 1; \ + fi # testacc runs acceptance tests testacc: generate - @if [ "$(TEST)" = "./..." ]; then \ - echo "ERROR: Set TEST to a specific package"; \ - exit 1; \ - fi - PACKER_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 45m + @echo "WARN: Acceptance tests will take a long time to run and may cost money. Ctrl-C if you want to cancel." + PACKER_ACC=1 go test -v $(TEST) $(TESTARGS) -timeout=45m testrace: - go test -race $(TEST) $(TESTARGS) + go test -race $(TEST) $(TESTARGS) -timeout=15s +# `go get -u` causes git to revert packer to the master branch. This causes all +# kinds of headaches. We record the git sha when make starts try to correct it +# if we detect dift. DO NOT use `git checkout -f` for this because it will wipe +# out your changes without asking. updatedeps: - @echo "Updating deps on:"; git symbolic-ref HEAD; git rev-parse HEAD + @echo "INFO: Currently on $(GITBRANCH) ($(GITSHA))" + @git diff-index --quiet HEAD ; if [ $$? -ne 0 ]; then \ + echo "ERROR: Your git working tree has uncommitted changes. updatedeps will fail. Please stash or commit your changes first."; \ + exit 1; \ + fi go get -u github.com/mitchellh/gox go get -u golang.org/x/tools/cmd/stringer go list ./... \ @@ -38,19 +67,14 @@ updatedeps: | grep -v github.com/mitchellh/packer \ | grep -v '/internal/' \ | sort -u \ - | xargs go get -f -u -v - @echo "Finished updating deps, now on:"; git symbolic-ref HEAD; git rev-parse HEAD - -vet: - @echo "Running go vet on:"; git symbolic-ref HEAD; git rev-parse HEAD - @go vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ - go get golang.org/x/tools/cmd/vet; \ + | xargs go get -f -u -v -d ; if [ $$? -eq 0 ]; then \ + echo "ERROR: go get failed. Your git branch may have changed; you were on $(GITBRANCH) ($(GITSHA))."; \ fi - @go vet ./... ; if [ $$? -eq 1 ]; then \ - echo ""; \ - echo "Vet found suspicious constructs. Please check the reported constructs"; \ - echo "and fix them if necessary before submitting the code for reviewal."; \ + @if [ "$(GITBRANCH)" != "" ]; then git checkout -q $(GITBRANCH); else git checkout -q $(GITSHA); fi + @if [ `git rev-parse HEAD` != "$(GITSHA)" ]; then \ + echo "ERROR: git checkout has drifted and we weren't able to correct it. Was $(GITBRANCH) ($(GITSHA))"; \ exit 1; \ fi + @echo "INFO: Currently on $(GITBRANCH) ($(GITSHA))" -.PHONY: bin default generate test testacc updatedeps vet +.PHONY: bin checkversion ci default deps generate test testacc testrace updatedeps diff --git a/scripts/build.sh b/scripts/build.sh index 364e221dd..b2e3248e4 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -16,7 +16,7 @@ GIT_COMMIT=$(git rev-parse HEAD) GIT_DIRTY=$(test -n "`git status --porcelain`" && echo "+CHANGES" || true) # If its dev mode, only build for ourself -if [ "${TF_DEV}x" != "x" ]; then +if [ "${PACKER_DEV}x" != "x" ]; then XC_OS=${XC_OS:-$(go env GOOS)} XC_ARCH=${XC_ARCH:-$(go env GOARCH)} fi @@ -27,7 +27,7 @@ XC_OS=${XC_OS:-linux darwin windows freebsd openbsd} # Install dependencies echo "==> Getting dependencies..." -go get ./... +go get -d ./... # Delete the old dir echo "==> Removing old directory..." From 547e9dd3403c519879c97339f621f2fd82d6ed3a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 21 Aug 2015 18:37:51 -0700 Subject: [PATCH 786/956] Add deps pre-target and remove testrace sadness from ci --- Makefile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 7900a2830..1bd54cac0 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ GITBRANCH:=$(shell git symbolic-ref --short HEAD 2>/dev/null) default: test dev -ci: deps test testrace +ci: deps test release: updatedeps test bin @@ -29,10 +29,10 @@ dev: deps # generate runs `go generate` to build the dynamically generated # source files. -generate: +generate: deps go generate ./... -test: +test: deps go test $(TEST) $(TESTARGS) -timeout=15s @go vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ go get golang.org/x/tools/cmd/vet; \ @@ -43,11 +43,11 @@ test: fi # testacc runs acceptance tests -testacc: generate +testacc: deps generate @echo "WARN: Acceptance tests will take a long time to run and may cost money. Ctrl-C if you want to cancel." PACKER_ACC=1 go test -v $(TEST) $(TESTARGS) -timeout=45m -testrace: +testrace: deps go test -race $(TEST) $(TESTARGS) -timeout=15s # `go get -u` causes git to revert packer to the master branch. This causes all From f8f7b7a34c1be06058f5aca23a51247db12cdbc5 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 21 Aug 2015 19:54:01 -0700 Subject: [PATCH 787/956] Bump version to 0.8.6 --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index a32442840..860971edb 100644 --- a/version.go +++ b/version.go @@ -9,4 +9,4 @@ const Version = "0.8.6" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" From 7d3afc882a19b746ffab025f2176c7fb546824b8 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Sat, 22 Aug 2015 13:15:59 +0200 Subject: [PATCH 788/956] Add check for Parallels Desktop edition in PD 11. Starting since Parallels Desktop 11, the command line functionality is available only in Pro and Business editions. --- builder/parallels/common/driver.go | 19 +++++- builder/parallels/common/driver_11.go | 61 +++++++++++++++++++ builder/parallels/common/driver_9.go | 4 ++ .../docs/builders/parallels.html.markdown | 3 + 4 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 builder/parallels/common/driver_11.go diff --git a/builder/parallels/common/driver.go b/builder/parallels/common/driver.go index 03a4e0f09..898070ae0 100644 --- a/builder/parallels/common/driver.go +++ b/builder/parallels/common/driver.go @@ -57,6 +57,7 @@ type Driver interface { func NewDriver() (Driver, error) { var drivers map[string]Driver var prlctlPath string + var prlsrvctlPath string var supportedVersions []string dhcp_lease_file := "/Library/Preferences/Parallels/parallels_dhcp_leases" @@ -75,21 +76,34 @@ func NewDriver() (Driver, error) { log.Printf("prlctl path: %s", prlctlPath) + if prlsrvctlPath == "" { + var err error + prlsrvctlPath, err = exec.LookPath("prlsrvctl") + if err != nil { + return nil, err + } + } + + log.Printf("prlsrvctl path: %s", prlsrvctlPath) + drivers = map[string]Driver{ - "11": &Parallels10Driver{ + "11": &Parallels11Driver{ Parallels9Driver: Parallels9Driver{ PrlctlPath: prlctlPath, + PrlsrvctlPath: prlsrvctlPath, dhcp_lease_file: dhcp_lease_file, }, }, "10": &Parallels10Driver{ Parallels9Driver: Parallels9Driver{ PrlctlPath: prlctlPath, + PrlsrvctlPath: prlsrvctlPath, dhcp_lease_file: dhcp_lease_file, }, }, "9": &Parallels9Driver{ PrlctlPath: prlctlPath, + PrlsrvctlPath: prlsrvctlPath, dhcp_lease_file: dhcp_lease_file, }, } @@ -97,6 +111,9 @@ func NewDriver() (Driver, error) { for v, d := range drivers { version, _ := d.Version() if strings.HasPrefix(version, v) { + if err := d.Verify(); err != nil { + return nil, err + } return d, nil } supportedVersions = append(supportedVersions, v) diff --git a/builder/parallels/common/driver_11.go b/builder/parallels/common/driver_11.go new file mode 100644 index 000000000..c59b6c111 --- /dev/null +++ b/builder/parallels/common/driver_11.go @@ -0,0 +1,61 @@ +package common + +import ( + "fmt" + "os/exec" + "regexp" +) + +// Parallels11Driver are inherited from Parallels9Driver. +// Used for Parallels Desktop 11, requires Pro or Business Edition +type Parallels11Driver struct { + Parallels9Driver +} + +func (d *Parallels11Driver) Verify() error { + + stdout, err := exec.Command(d.PrlsrvctlPath, "info", "--license").Output() + if err != nil { + return err + } + + editionRe := regexp.MustCompile(`edition="(\w+)"`) + matches := editionRe.FindStringSubmatch(string(stdout)) + if matches == nil { + return fmt.Errorf( + "Could not determine your Parallels Desktop edition using: %s info --license", d.PrlsrvctlPath) + } else { + switch matches[1] { + case "pro", "business": + break + default: + return fmt.Errorf("Packer can be used only with Parallels Desktop 11 Pro or Business edition. You use: %s edition", matches[1]) + } + } + + return nil +} + +func (d *Parallels11Driver) SetDefaultConfiguration(vmName string) error { + commands := make([][]string, 12) + commands[0] = []string{"set", vmName, "--cpus", "1"} + commands[1] = []string{"set", vmName, "--memsize", "512"} + commands[2] = []string{"set", vmName, "--startup-view", "same"} + commands[3] = []string{"set", vmName, "--on-shutdown", "close"} + commands[4] = []string{"set", vmName, "--on-window-close", "keep-running"} + commands[5] = []string{"set", vmName, "--auto-share-camera", "off"} + commands[6] = []string{"set", vmName, "--smart-guard", "off"} + commands[7] = []string{"set", vmName, "--shared-cloud", "off"} + commands[8] = []string{"set", vmName, "--shared-profile", "off"} + commands[9] = []string{"set", vmName, "--smart-mount", "off"} + commands[10] = []string{"set", vmName, "--sh-app-guest-to-host", "off"} + commands[11] = []string{"set", vmName, "--sh-app-host-to-guest", "off"} + + for _, command := range commands { + err := d.Prlctl(command...) + if err != nil { + return err + } + } + return nil +} diff --git a/builder/parallels/common/driver_9.go b/builder/parallels/common/driver_9.go index c577151dc..5e0c41aa2 100644 --- a/builder/parallels/common/driver_9.go +++ b/builder/parallels/common/driver_9.go @@ -19,6 +19,10 @@ import ( type Parallels9Driver struct { // This is the path to the "prlctl" application. PrlctlPath string + + // This is the path to the "prlsrvctl" application. + PrlsrvctlPath string + // The path to the parallels_dhcp_leases file dhcp_lease_file string } diff --git a/website/source/docs/builders/parallels.html.markdown b/website/source/docs/builders/parallels.html.markdown index 582f8e0af..f855a2402 100644 --- a/website/source/docs/builders/parallels.html.markdown +++ b/website/source/docs/builders/parallels.html.markdown @@ -35,3 +35,6 @@ Virtualization SDK](http://www.parallels.com/downloads/desktop/). The SDK can be installed by downloading and following the instructions in the dmg. + +Parallels Desktop for Mac 9 and later is supported, from PD 11 Pro or Business +edition is required. From 599ef8e936e39467a4b88c0a3158ee779bfc7356 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Sat, 22 Aug 2015 13:43:33 -0700 Subject: [PATCH 789/956] Add 0.8.6 release date to the changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 733247805..4bc93c7f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.8.6 (Unreleased) +## 0.8.6 (Aug 22, 2015) IMPROVEMENTS: From a55d2f1243338397c56b85b4ae89b16370072b55 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Sat, 22 Aug 2015 14:14:58 -0700 Subject: [PATCH 790/956] Allow failures in 1.5 since the build scripts don't support switching for --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 5880a73a4..497bbe1ba 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,3 +18,4 @@ matrix: fast_finish: true allow_failures: - go: tip + - go: 1.5 From 1fe44b6624c2d1fb01553a258e7241d967614069 Mon Sep 17 00:00:00 2001 From: Lantrix Date: Mon, 24 Aug 2015 17:32:01 +1000 Subject: [PATCH 791/956] Powershell debugging options as per Issue #2550 --- website/source/docs/other/debugging.html.markdown | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/website/source/docs/other/debugging.html.markdown b/website/source/docs/other/debugging.html.markdown index efe01a0cf..2ca5ab7d1 100644 --- a/website/source/docs/other/debugging.html.markdown +++ b/website/source/docs/other/debugging.html.markdown @@ -55,5 +55,13 @@ to force the log to always go to a specific file when logging is enabled. Note that even when `PACKER_LOG_PATH` is set, `PACKER_LOG` must be set in order for any logging to be enabled. +### Debugging Packer in Powershell/Windows + +In Windows you can set the detailed logs environmental variable `PACKER_LOG` or +the log variable `PACKER_LOG_PATH` using powershell environment variables. For example: + + $env:PACKER_LOG=1 + $env:PACKER_LOG_PATH="packerlog.txt" + If you find a bug with Packer, please include the detailed log by using a service such as [gist](http://gist.github.com). From 83980d2326ee7fdc08209bf677878d53b7317c77 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Mon, 24 Aug 2015 15:09:29 +0200 Subject: [PATCH 792/956] Enable headless mode by default on Parallels Desktop 11 --- builder/parallels/common/driver_11.go | 2 +- builder/parallels/common/run_config.go | 1 - builder/parallels/common/step_run.go | 8 ---- builder/parallels/iso/builder.go | 1 - builder/parallels/pvm/builder.go | 1 - fix/fixer.go | 2 + fix/fixer_parallels_headless.go | 51 +++++++++++++++++++++ fix/fixer_parallels_headless_test.go | 61 ++++++++++++++++++++++++++ 8 files changed, 115 insertions(+), 12 deletions(-) create mode 100644 fix/fixer_parallels_headless.go create mode 100644 fix/fixer_parallels_headless_test.go diff --git a/builder/parallels/common/driver_11.go b/builder/parallels/common/driver_11.go index c59b6c111..32a1fc92a 100644 --- a/builder/parallels/common/driver_11.go +++ b/builder/parallels/common/driver_11.go @@ -40,7 +40,7 @@ func (d *Parallels11Driver) SetDefaultConfiguration(vmName string) error { commands := make([][]string, 12) commands[0] = []string{"set", vmName, "--cpus", "1"} commands[1] = []string{"set", vmName, "--memsize", "512"} - commands[2] = []string{"set", vmName, "--startup-view", "same"} + commands[2] = []string{"set", vmName, "--startup-view", "headless"} commands[3] = []string{"set", vmName, "--on-shutdown", "close"} commands[4] = []string{"set", vmName, "--on-window-close", "keep-running"} commands[5] = []string{"set", vmName, "--auto-share-camera", "off"} diff --git a/builder/parallels/common/run_config.go b/builder/parallels/common/run_config.go index c755cdafb..072895f41 100644 --- a/builder/parallels/common/run_config.go +++ b/builder/parallels/common/run_config.go @@ -8,7 +8,6 @@ import ( ) type RunConfig struct { - Headless bool `mapstructure:"headless"` RawBootWait string `mapstructure:"boot_wait"` BootWait time.Duration `` diff --git a/builder/parallels/common/step_run.go b/builder/parallels/common/step_run.go index e9c3ab27d..43f0e92eb 100644 --- a/builder/parallels/common/step_run.go +++ b/builder/parallels/common/step_run.go @@ -17,7 +17,6 @@ import ( // Produces: type StepRun struct { BootWait time.Duration - Headless bool vmName string } @@ -28,13 +27,6 @@ func (s *StepRun) Run(state multistep.StateBag) multistep.StepAction { vmName := state.Get("vmName").(string) ui.Say("Starting the virtual machine...") - //guiArgument := "gui" - if s.Headless == true { - ui.Message("WARNING: The VM will be started in headless mode, as configured.\n" + - "In headless mode, errors during the boot sequence or OS setup\n" + - "won't be easily visible. Use at your own discretion.") - //guiArgument = "headless" - } command := []string{"start", vmName} if err := driver.Prlctl(command...); err != nil { err := fmt.Errorf("Error starting VM: %s", err) diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index 6b731544d..cba02dd19 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -241,7 +241,6 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, ¶llelscommon.StepRun{ BootWait: b.config.BootWait, - Headless: b.config.Headless, // TODO: migth work on Enterprise Ed. }, ¶llelscommon.StepTypeBootCommand{ BootCommand: b.config.BootCommand, diff --git a/builder/parallels/pvm/builder.go b/builder/parallels/pvm/builder.go index 471b59bef..b0b675fb6 100644 --- a/builder/parallels/pvm/builder.go +++ b/builder/parallels/pvm/builder.go @@ -74,7 +74,6 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, ¶llelscommon.StepRun{ BootWait: b.config.BootWait, - Headless: b.config.Headless, }, ¶llelscommon.StepTypeBootCommand{ BootCommand: b.config.BootCommand, diff --git a/fix/fixer.go b/fix/fixer.go index 8da82f48f..5b5006215 100644 --- a/fix/fixer.go +++ b/fix/fixer.go @@ -26,6 +26,7 @@ func init() { "virtualbox-gaattach": new(FixerVirtualBoxGAAttach), "virtualbox-rename": new(FixerVirtualBoxRename), "vmware-rename": new(FixerVMwareRename), + "parallels-headless": new(FixerParallelsHeadless), } FixerOrder = []string{ @@ -35,5 +36,6 @@ func init() { "pp-vagrant-override", "virtualbox-rename", "vmware-rename", + "parallels-headless", } } diff --git a/fix/fixer_parallels_headless.go b/fix/fixer_parallels_headless.go new file mode 100644 index 000000000..f7bc8874a --- /dev/null +++ b/fix/fixer_parallels_headless.go @@ -0,0 +1,51 @@ +package fix + +import ( + "github.com/mitchellh/mapstructure" +) + +// FixerParallelsHeadless removes "headless" from a template in a Parallels builder +type FixerParallelsHeadless struct{} + +func (FixerParallelsHeadless) Fix(input map[string]interface{}) (map[string]interface{}, error) { + // The type we'll decode into; we only care about builders + type template struct { + Builders []map[string]interface{} + } + + // Decode the input into our structure, if we can + var tpl template + if err := mapstructure.Decode(input, &tpl); err != nil { + return nil, err + } + + for _, builder := range tpl.Builders { + builderTypeRaw, ok := builder["type"] + if !ok { + continue + } + + builderType, ok := builderTypeRaw.(string) + if !ok { + continue + } + + if builderType != "parallels-iso" && builderType != "parallels-pvm" { + continue + } + + _, ok = builder["headless"] + if !ok { + continue + } + + delete(builder, "headless") + } + + input["builders"] = tpl.Builders + return input, nil +} + +func (FixerParallelsHeadless) Synopsis() string { + return `Removes unused "headless" from Parallels builders` +} diff --git a/fix/fixer_parallels_headless_test.go b/fix/fixer_parallels_headless_test.go new file mode 100644 index 000000000..c6c92e981 --- /dev/null +++ b/fix/fixer_parallels_headless_test.go @@ -0,0 +1,61 @@ +package fix + +import ( + "reflect" + "testing" +) + +func TestFixerParallelsHeadless_Impl(t *testing.T) { + var _ Fixer = new(FixerParallelsHeadless) +} + +func TestFixerParallelsHeadless_Fix(t *testing.T) { + cases := []struct { + Input map[string]interface{} + Expected map[string]interface{} + }{ + // No headless field + { + Input: map[string]interface{}{ + "type": "parallels-iso", + }, + + Expected: map[string]interface{}{ + "type": "parallels-iso", + }, + }, + + // Headless field + { + Input: map[string]interface{}{ + "type": "parallels-iso", + "headless": false, + }, + + Expected: map[string]interface{}{ + "type": "parallels-iso", + }, + }, + } + + for _, tc := range cases { + var f FixerParallelsHeadless + + input := map[string]interface{}{ + "builders": []map[string]interface{}{tc.Input}, + } + + expected := map[string]interface{}{ + "builders": []map[string]interface{}{tc.Expected}, + } + + output, err := f.Fix(input) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(output, expected) { + t.Fatalf("unexpected: %#v\nexpected: %#v\n", output, expected) + } + } +} From 4e20522afdf899f0916ffb140030a42bd69c7c23 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Mon, 24 Aug 2015 20:19:49 +0200 Subject: [PATCH 793/956] Fixes #2667: Added vmware-rename to fix command help text --- command/fix.go | 1 + 1 file changed, 1 insertion(+) diff --git a/command/fix.go b/command/fix.go index 2d9bcce37..451742ebb 100644 --- a/command/fix.go +++ b/command/fix.go @@ -127,6 +127,7 @@ Fixes that are run: pp-vagrant-override Replaces old-style provider overrides for the Vagrant post-processor to new-style as of Packer 0.5.0. virtualbox-rename Updates "virtualbox" builders to "virtualbox-iso" + vmware-rename Updates "vmware" builders to "vmware-iso" Options: From 2a94b596ce93237517026ddd220da8b12f0132c7 Mon Sep 17 00:00:00 2001 From: Or Cohen Date: Tue, 25 Aug 2015 00:19:11 +0300 Subject: [PATCH 794/956] Fix and refactor block device mapping builder Fix NoDevice not properly configured #2398. Refactor the mapping builder to match BlockDeviceMapping from AWS SDK: * If NoDevice is specified, include NoDevice only. * If VirtualName starts with ephemeral, don't create Ebs (they are mutually exclusive anyway) * Otherwise, assume Ebs and create it with the exact specified attributes by the user. Change/add tests to reflect these changes. --- builder/amazon/common/block_device.go | 55 ++++++++++++---------- builder/amazon/common/block_device_test.go | 34 +++++++++++-- 2 files changed, 60 insertions(+), 29 deletions(-) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index 094738869..73d17c45f 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -30,35 +30,40 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { var blockDevices []*ec2.BlockDeviceMapping for _, blockDevice := range b { - ebsBlockDevice := &ec2.EbsBlockDevice{ - VolumeType: aws.String(blockDevice.VolumeType), - VolumeSize: aws.Int64(blockDevice.VolumeSize), - DeleteOnTermination: aws.Bool(blockDevice.DeleteOnTermination), - } - - // IOPS is only valid for SSD Volumes - if blockDevice.VolumeType != "" && blockDevice.VolumeType != "standard" && blockDevice.VolumeType != "gp2" { - ebsBlockDevice.Iops = aws.Int64(blockDevice.IOPS) - } - - // You cannot specify Encrypted if you specify a Snapshot ID - if blockDevice.SnapshotId != "" { - ebsBlockDevice.SnapshotId = aws.String(blockDevice.SnapshotId) - } else if blockDevice.Encrypted { - ebsBlockDevice.Encrypted = aws.Bool(blockDevice.Encrypted) - } - - mapping := &ec2.BlockDeviceMapping{ - DeviceName: aws.String(blockDevice.DeviceName), - VirtualName: aws.String(blockDevice.VirtualName), - } - - if !strings.HasPrefix(blockDevice.VirtualName, "ephemeral") { - mapping.Ebs = ebsBlockDevice + mapping := &ec2.BlockDeviceMapping { + DeviceName: aws.String(blockDevice.DeviceName), } if blockDevice.NoDevice { mapping.NoDevice = aws.String("") + } else if strings.HasPrefix(blockDevice.VirtualName, "ephemeral") { + mapping.VirtualName = aws.String(blockDevice.VirtualName) + } else { + ebsBlockDevice := &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(blockDevice.DeleteOnTermination), + } + + if blockDevice.VolumeType != "" { + ebsBlockDevice.VolumeType = aws.String(blockDevice.VolumeType) + } + + if blockDevice.VolumeSize > 0 { + ebsBlockDevice.VolumeSize = aws.Int64(blockDevice.VolumeSize) + } + + // IOPS is only valid for io1 type + if blockDevice.VolumeType == "io1" { + ebsBlockDevice.Iops = aws.Int64(blockDevice.IOPS) + } + + // You cannot specify Encrypted if you specify a Snapshot ID + if blockDevice.SnapshotId != "" { + ebsBlockDevice.SnapshotId = aws.String(blockDevice.SnapshotId) + } else if blockDevice.Encrypted { + ebsBlockDevice.Encrypted = aws.Bool(blockDevice.Encrypted) + } + + mapping.Ebs = ebsBlockDevice } blockDevices = append(blockDevices, mapping) diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index 99514009b..b73f4da36 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -24,7 +24,6 @@ func TestBlockDevice(t *testing.T) { Result: &ec2.BlockDeviceMapping{ DeviceName: aws.String("/dev/sdb"), - VirtualName: aws.String(""), Ebs: &ec2.EbsBlockDevice{ SnapshotId: aws.String("snap-1234"), VolumeType: aws.String("standard"), @@ -41,9 +40,7 @@ func TestBlockDevice(t *testing.T) { Result: &ec2.BlockDeviceMapping{ DeviceName: aws.String("/dev/sdb"), - VirtualName: aws.String(""), Ebs: &ec2.EbsBlockDevice{ - VolumeType: aws.String(""), VolumeSize: aws.Int64(8), DeleteOnTermination: aws.Bool(false), }, @@ -60,7 +57,6 @@ func TestBlockDevice(t *testing.T) { Result: &ec2.BlockDeviceMapping{ DeviceName: aws.String("/dev/sdb"), - VirtualName: aws.String(""), Ebs: &ec2.EbsBlockDevice{ VolumeType: aws.String("io1"), VolumeSize: aws.Int64(8), @@ -69,6 +65,25 @@ func TestBlockDevice(t *testing.T) { }, }, }, + { + Config: &BlockDevice{ + DeviceName: "/dev/sdb", + VolumeType: "gp2", + VolumeSize: 8, + DeleteOnTermination: true, + Encrypted: true, + }, + + Result: &ec2.BlockDeviceMapping{ + DeviceName: aws.String("/dev/sdb"), + Ebs: &ec2.EbsBlockDevice{ + VolumeType: aws.String("gp2"), + VolumeSize: aws.Int64(8), + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + }, + }, + }, { Config: &BlockDevice{ DeviceName: "/dev/sdb", @@ -80,6 +95,17 @@ func TestBlockDevice(t *testing.T) { VirtualName: aws.String("ephemeral0"), }, }, + { + Config: &BlockDevice{ + DeviceName: "/dev/sdb", + NoDevice: true, + }, + + Result: &ec2.BlockDeviceMapping{ + DeviceName: aws.String("/dev/sdb"), + NoDevice: aws.String(""), + }, + }, } for _, tc := range cases { From 72e8119233f433e0ac64a9c78bf77235368cb67e Mon Sep 17 00:00:00 2001 From: Tim Smith Date: Tue, 25 Aug 2015 21:53:50 -0700 Subject: [PATCH 795/956] Download chef from chef.io --- provisioner/chef-client/provisioner.go | 2 +- provisioner/chef-solo/provisioner.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index 62b3732de..ddb55714b 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -89,7 +89,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { if p.config.InstallCommand == "" { p.config.InstallCommand = "curl -L " + - "https://www.opscode.com/chef/install.sh | " + + "https://www.chef.io/chef/install.sh | " + "{{if .Sudo}}sudo {{end}}bash" } diff --git a/provisioner/chef-solo/provisioner.go b/provisioner/chef-solo/provisioner.go index 61f734805..04ecadb1c 100644 --- a/provisioner/chef-solo/provisioner.go +++ b/provisioner/chef-solo/provisioner.go @@ -91,7 +91,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } if p.config.InstallCommand == "" { - p.config.InstallCommand = "curl -L https://www.opscode.com/chef/install.sh | {{if .Sudo}}sudo {{end}}bash" + p.config.InstallCommand = "curl -L https://www.chef.io/chef/install.sh | {{if .Sudo}}sudo {{end}}bash" } if p.config.RunList == nil { From 0d973145f5111b9a5e7547c01ced33433be70be4 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 26 Aug 2015 12:00:13 -0700 Subject: [PATCH 796/956] Added a note to the push docs about dot being disallowed in atlas names --- website/source/docs/templates/push.html.markdown | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/source/docs/templates/push.html.markdown b/website/source/docs/templates/push.html.markdown index b46bef3e8..efc6029cb 100644 --- a/website/source/docs/templates/push.html.markdown +++ b/website/source/docs/templates/push.html.markdown @@ -38,7 +38,8 @@ each category, the available configuration keys are alphabetized. ### Required - `name` (string) - Name of the build configuration in the build service. If - this doesn't exist, it will be created (by default). + this doesn't exist, it will be created (by default). Note that the name can + cannot contain dots. `[a-zA-Z0-9-_]+` are safe. ### Optional From c834a00e22385681b48c3b1126f189c63000bffd Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 26 Aug 2015 12:16:18 -0700 Subject: [PATCH 797/956] Added a name validator to the push command --- command/push.go | 11 +++++++++++ website/source/docs/templates/push.html.markdown | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/command/push.go b/command/push.go index f888186de..dac5aa180 100644 --- a/command/push.go +++ b/command/push.go @@ -6,6 +6,7 @@ import ( "os" "os/signal" "path/filepath" + "regexp" "strings" "github.com/hashicorp/atlas-go/archive" @@ -16,6 +17,11 @@ import ( // archiveTemplateEntry is the name the template always takes within the slug. const archiveTemplateEntry = ".packer-template" +var ( + reName = regexp.MustCompile("^[a-zA-Z0-9-_/]+$") + errInvalidName = fmt.Errorf("Your build name can only contain these characters: [a-zA-Z0-9-_]+") +) + type PushCommand struct { Meta @@ -88,6 +94,11 @@ func (c *PushCommand) Run(args []string) int { return 1 } + if !reName.MatchString(name) { + c.Ui.Error(errInvalidName.Error()) + return 1 + } + // Determine our token if token == "" { token = push.Token diff --git a/website/source/docs/templates/push.html.markdown b/website/source/docs/templates/push.html.markdown index efc6029cb..1e7d06ac5 100644 --- a/website/source/docs/templates/push.html.markdown +++ b/website/source/docs/templates/push.html.markdown @@ -39,7 +39,7 @@ each category, the available configuration keys are alphabetized. - `name` (string) - Name of the build configuration in the build service. If this doesn't exist, it will be created (by default). Note that the name can - cannot contain dots. `[a-zA-Z0-9-_]+` are safe. + cannot contain dots. `[a-zA-Z0-9-_/]+` are safe. ### Optional From 6d1376db948d451521ad40a87884c242ea59bd4b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 26 Aug 2015 21:24:47 -0700 Subject: [PATCH 798/956] up version for dev --- version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version.go b/version.go index 860971edb..8ec15b794 100644 --- a/version.go +++ b/version.go @@ -4,9 +4,9 @@ package main var GitCommit string // The main version number that is being run at the moment. -const Version = "0.8.6" +const Version = "0.8.7" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" From 5c7481362c8e008cf92b06e9bd21d930699e66bb Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 27 Aug 2015 14:35:54 -0700 Subject: [PATCH 799/956] Changed AWS_SECURITY_TOKEN to AWS_SESSION_TOKEN Fixes #2687 --- website/source/docs/builders/amazon-ebs.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index f97404d19..1f2f39b96 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -177,7 +177,7 @@ builder. - `token` (string) - The access token to use. This is different from the access key and secret key. If you're not sure what this is, then you - probably don't need it. This will also be read from the `AWS_SECURITY_TOKEN` + probably don't need it. This will also be read from the `AWS_SESSION_TOKEN` environmental variable. - `user_data` (string) - User data to apply when launching the instance. Note From 70083fc8697147abcfa312acacdf42eb0c7d7f25 Mon Sep 17 00:00:00 2001 From: Gonzalo Peci Date: Fri, 7 Aug 2015 12:18:17 +1200 Subject: [PATCH 800/956] Add reboot checks before moving out of the reboot. --- provisioner/windows-restart/provisioner.go | 32 ++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/provisioner/windows-restart/provisioner.go b/provisioner/windows-restart/provisioner.go index 2e4b7c371..26736344e 100644 --- a/provisioner/windows-restart/provisioner.go +++ b/provisioner/windows-restart/provisioner.go @@ -16,6 +16,8 @@ import ( var DefaultRestartCommand = "powershell \"& {Restart-Computer -force }\"" var DefaultRestartCheckCommand = winrm.Powershell(`echo "${env:COMPUTERNAME} restarted."`) var retryableSleep = 5 * time.Second +var TryCheckReboot = "shutdown.exe -f -r -t 60" +var AbortReboot = "shutdown.exe -a" type Config struct { common.PackerConfig `mapstructure:",squash"` @@ -94,16 +96,42 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return fmt.Errorf("Restart script exited with non-zero exit status: %d", cmd.ExitStatus) } - return waitForRestart(p) + return waitForRestart(p, comm) } -var waitForRestart = func(p *Provisioner) error { +var waitForRestart = func(p *Provisioner, comm packer.Communicator) error { ui := p.ui ui.Say("Waiting for machine to restart...") waitDone := make(chan bool, 1) timeout := time.After(p.config.RestartTimeout) var err error + p.comm = comm + var cmd *packer.RemoteCmd + trycommand := TryCheckReboot + abortcommand := AbortReboot + // Stolen from Vagrant reboot checker + for { + log.Printf("Check if machine is rebooting...") + cmd = &packer.RemoteCmd{Command: trycommand} + err = cmd.StartWithUi(comm, ui) + if err != nil { + // Couldnt execute, we asume machine is rebooting already + break + } + if cmd.ExitStatus != 0 { + // Reboot already in progress but not completed + log.Printf("Reboot already in progress, waiting...") + time.Sleep(10 * time.Second) + } + if cmd.ExitStatus == 0 { + // Cancel reboot we created to test if machine was already rebooting + cmd = &packer.RemoteCmd{Command: abortcommand} + cmd.StartWithUi(comm, ui) + break + } + } + go func() { log.Printf("Waiting for machine to become available...") err = waitForCommunicator(p) From 08359e409a0df8c3060dd56cfb1d0e4b5740f659 Mon Sep 17 00:00:00 2001 From: Gonzalo Peci Date: Wed, 19 Aug 2015 16:09:43 +1200 Subject: [PATCH 801/956] Revert the shutdown command with new parameters as this will return proper exit codes. This will work in the same way as restart-computer -force when using ```shutdown /r /f /t 0```. Note:The WinRM library does not return the exit code currently, this will be implemented on https://github.com/masterzen/winrm/pull/26 --- provisioner/windows-restart/provisioner.go | 2 +- provisioner/windows-restart/provisioner_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/provisioner/windows-restart/provisioner.go b/provisioner/windows-restart/provisioner.go index 26736344e..c5a683b73 100644 --- a/provisioner/windows-restart/provisioner.go +++ b/provisioner/windows-restart/provisioner.go @@ -13,7 +13,7 @@ import ( "github.com/mitchellh/packer/template/interpolate" ) -var DefaultRestartCommand = "powershell \"& {Restart-Computer -force }\"" +var DefaultRestartCommand = "shutdown /r /f /t 0 /c \"packer restart\"" var DefaultRestartCheckCommand = winrm.Powershell(`echo "${env:COMPUTERNAME} restarted."`) var retryableSleep = 5 * time.Second var TryCheckReboot = "shutdown.exe -f -r -t 60" diff --git a/provisioner/windows-restart/provisioner_test.go b/provisioner/windows-restart/provisioner_test.go index 247452c22..7a38e1579 100644 --- a/provisioner/windows-restart/provisioner_test.go +++ b/provisioner/windows-restart/provisioner_test.go @@ -35,7 +35,7 @@ func TestProvisionerPrepare_Defaults(t *testing.T) { t.Errorf("unexpected remote path: %s", p.config.RestartTimeout) } - if p.config.RestartCommand != "powershell \"& {Restart-Computer -force }\"" { + if p.config.RestartCommand != "shutdown /r /f /t 0 /c \"packer restart\"" { t.Errorf("unexpected remote path: %s", p.config.RestartCommand) } } From 8bd3e62853b7b8f4875c7bdbbffab193a0877dfa Mon Sep 17 00:00:00 2001 From: Gonzalo Peci Date: Wed, 19 Aug 2015 16:05:22 +1200 Subject: [PATCH 802/956] Because the new functionality makes the ```waitForRestart()``` function run commands it modifies the value of ```comm.StartCmd.Command``` that is being checked, we need to implement the same workaround that is being used for the ```waitForCommunicator()``` function. This should make the test work again and retain functionality. --- provisioner/windows-restart/provisioner_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/provisioner/windows-restart/provisioner_test.go b/provisioner/windows-restart/provisioner_test.go index 7a38e1579..f91f5a771 100644 --- a/provisioner/windows-restart/provisioner_test.go +++ b/provisioner/windows-restart/provisioner_test.go @@ -100,6 +100,10 @@ func TestProvisionerProvision_Success(t *testing.T) { waitForCommunicator = func(p *Provisioner) error { return nil } + waitForRestartOld := waitForRestart + waitForRestart = func(p *Provisioner, comm packer.Communicator) error { + return nil + } err := p.Provision(ui, comm) if err != nil { t.Fatal("should not have error") @@ -113,6 +117,7 @@ func TestProvisionerProvision_Success(t *testing.T) { } // Set this back! waitForCommunicator = waitForCommunicatorOld + waitForRestart = waitForRestartOld } func TestProvisionerProvision_CustomCommand(t *testing.T) { @@ -131,6 +136,10 @@ func TestProvisionerProvision_CustomCommand(t *testing.T) { waitForCommunicator = func(p *Provisioner) error { return nil } + waitForRestartOld := waitForRestart + waitForRestart = func(p *Provisioner, comm packer.Communicator) error { + return nil + } err := p.Provision(ui, comm) if err != nil { t.Fatal("should not have error") @@ -142,6 +151,7 @@ func TestProvisionerProvision_CustomCommand(t *testing.T) { } // Set this back! waitForCommunicator = waitForCommunicatorOld + waitForRestart = waitForRestartOld } func TestProvisionerProvision_RestartCommandFail(t *testing.T) { From f01f62dc88e81c2133e898eaa86c8881e4b3c75b Mon Sep 17 00:00:00 2001 From: Gonzalo Peci Date: Thu, 20 Aug 2015 09:00:38 +1200 Subject: [PATCH 803/956] Add specific exit codes listed on https://msdn.microsoft.com/en-us/library/windows/desktop/ms681383(v=vs.85).aspx --- provisioner/windows-restart/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/windows-restart/provisioner.go b/provisioner/windows-restart/provisioner.go index c5a683b73..6b7bd90d3 100644 --- a/provisioner/windows-restart/provisioner.go +++ b/provisioner/windows-restart/provisioner.go @@ -119,7 +119,7 @@ var waitForRestart = func(p *Provisioner, comm packer.Communicator) error { // Couldnt execute, we asume machine is rebooting already break } - if cmd.ExitStatus != 0 { + if cmd.ExitStatus == 1115 || cmd.ExitStatus == 1190 { // Reboot already in progress but not completed log.Printf("Reboot already in progress, waiting...") time.Sleep(10 * time.Second) From dbd1a13f0d53e3ee99bb66ef1735d55ba4f3d526 Mon Sep 17 00:00:00 2001 From: Jon Benson Date: Fri, 4 Sep 2015 14:51:35 -0500 Subject: [PATCH 804/956] Document recommended artifact-types --- website/source/docs/post-processors/atlas.html.markdown | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 435bec7c4..d803ad874 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -58,9 +58,10 @@ you can also use `token` configuration option. You must have access to the organization—hashicorp in this example—in order to add an artifact to the organization in Atlas. -- `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will - always be `amazon.ami`. This field must be defined because Atlas can host - other artifact types, such as Vagrant boxes. +- `artifact_type` (string) - For uploading artifacts to Atlas. `artifact_type` + can be set to any unique identifier, however, we recommend using the + following for consistency - `amazon.ami`, `vagrant.box`, `google.image`, + and `docker.image`. ### Optional: From 5e27f9353f44a33150edde0ea5ad7f7ae820f942 Mon Sep 17 00:00:00 2001 From: Jon Benson Date: Fri, 4 Sep 2015 15:01:35 -0500 Subject: [PATCH 805/956] Document recommended artifact_type --- website/source/docs/post-processors/atlas.html.markdown | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index d803ad874..81338b818 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -59,9 +59,8 @@ you can also use `token` configuration option. to add an artifact to the organization in Atlas. - `artifact_type` (string) - For uploading artifacts to Atlas. `artifact_type` - can be set to any unique identifier, however, we recommend using the - following for consistency - `amazon.ami`, `vagrant.box`, `google.image`, - and `docker.image`. + can be set to any unique identifier, however, the following are recommended + for consistency - `vagrant.box`, `amazon.ami`, `google.image`, and `docker.image`. ### Optional: From 673baa12d03b6ab337b3c4df3e7d394f5ed8fb08 Mon Sep 17 00:00:00 2001 From: Jon Benson Date: Fri, 4 Sep 2015 15:08:27 -0500 Subject: [PATCH 806/956] Add vmware and virtual box to artifact_types --- website/source/docs/post-processors/atlas.html.markdown | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 81338b818..72ef27af9 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -60,7 +60,8 @@ you can also use `token` configuration option. - `artifact_type` (string) - For uploading artifacts to Atlas. `artifact_type` can be set to any unique identifier, however, the following are recommended - for consistency - `vagrant.box`, `amazon.ami`, `google.image`, and `docker.image`. + for consistency - `vagrant.box`, `amazon.ami`, `google.image`, `docker.image`, + `vmware.image`, and `virtualbox.image`. ### Optional: From 4ebee7bf3f4fecf7b49018225e798de845b3dbe2 Mon Sep 17 00:00:00 2001 From: Mikhail Zholobov Date: Sun, 21 Jun 2015 12:17:06 +0300 Subject: [PATCH 807/956] builder/parallels: Add "DiskPath" driver function This function determines path to the first virtual disk image of the specified virtual machine. --- builder/parallels/common/driver.go | 3 +++ builder/parallels/common/driver_9.go | 17 +++++++++++++++++ builder/parallels/common/driver_mock.go | 11 +++++++++++ 3 files changed, 31 insertions(+) diff --git a/builder/parallels/common/driver.go b/builder/parallels/common/driver.go index 03a4e0f09..81b7cd974 100644 --- a/builder/parallels/common/driver.go +++ b/builder/parallels/common/driver.go @@ -18,6 +18,9 @@ type Driver interface { // Adds new CD/DVD drive to the VM and returns name of this device DeviceAddCdRom(string, string) (string, error) + // Get path to the first virtual disk image + DiskPath(string) (string, error) + // Import a VM Import(string, string, string, bool) error diff --git a/builder/parallels/common/driver_9.go b/builder/parallels/common/driver_9.go index c577151dc..1093351c8 100644 --- a/builder/parallels/common/driver_9.go +++ b/builder/parallels/common/driver_9.go @@ -121,6 +121,23 @@ func (d *Parallels9Driver) DeviceAddCdRom(name string, image string) (string, er return device_name, nil } +func (d *Parallels9Driver) DiskPath(name string) (string, error) { + out, err := exec.Command(d.PrlctlPath, "list", "-i", name).Output() + if err != nil { + return "", err + } + + hddRe := regexp.MustCompile("hdd0.* image='(.*)' type=*") + matches := hddRe.FindStringSubmatch(string(out)) + if matches == nil { + return "", fmt.Errorf( + "Could not determine hdd image path in the output:\n%s", string(out)) + } + + hdd_path := matches[1] + return hdd_path, nil +} + func (d *Parallels9Driver) IsRunning(name string) (bool, error) { var stdout bytes.Buffer diff --git a/builder/parallels/common/driver_mock.go b/builder/parallels/common/driver_mock.go index 5629a6db9..fc43247f5 100644 --- a/builder/parallels/common/driver_mock.go +++ b/builder/parallels/common/driver_mock.go @@ -11,6 +11,11 @@ type DriverMock struct { DeviceAddCdRomResult string DeviceAddCdRomErr error + DiskPathCalled bool + DiskPathName string + DiskPathResult string + DiskPathErr error + ImportCalled bool ImportName string ImportSrcPath string @@ -61,6 +66,12 @@ func (d *DriverMock) DeviceAddCdRom(name string, image string) (string, error) { return d.DeviceAddCdRomResult, d.DeviceAddCdRomErr } +func (d *DriverMock) DiskPath(name string) (string, error) { + d.DiskPathCalled = true + d.DiskPathName = name + return d.DiskPathResult, d.DiskPathErr +} + func (d *DriverMock) Import(name, srcPath, dstPath string, reassignMac bool) error { d.ImportCalled = true d.ImportName = name From 3fae902bc38088a4b51c9c965b79434eb9c06f06 Mon Sep 17 00:00:00 2001 From: Vasiliy Tolstov Date: Tue, 8 Sep 2015 10:40:23 +0000 Subject: [PATCH 808/956] enable discards only on qemu >= 2 older qemu versions does not have discard option, so not enable it Signed-off-by: Vasiliy Tolstov --- builder/qemu/driver.go | 7 ++++--- builder/qemu/step_run.go | 12 +++++++++++- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/builder/qemu/driver.go b/builder/qemu/driver.go index 5c402cecb..e93c87836 100644 --- a/builder/qemu/driver.go +++ b/builder/qemu/driver.go @@ -4,7 +4,6 @@ import ( "bufio" "bytes" "fmt" - "github.com/mitchellh/multistep" "io" "log" "os/exec" @@ -14,6 +13,8 @@ import ( "syscall" "time" "unicode" + + "github.com/mitchellh/multistep" ) type DriverCancelCallback func(state multistep.StateBag) bool @@ -188,8 +189,8 @@ func (d *QemuDriver) Version() (string, error) { versionOutput := strings.TrimSpace(stdout.String()) log.Printf("Qemu --version output: %s", versionOutput) - versionRe := regexp.MustCompile("qemu-kvm-[0-9]\\.[0-9]") - matches := versionRe.Split(versionOutput, 2) + versionRe := regexp.MustCompile("[0-9]\\.[0-9]\\.[0-9]") + matches := versionRe.FindStringSubmatch(versionOutput) if len(matches) == 0 { return "", fmt.Errorf("No version found: %s", versionOutput) } diff --git a/builder/qemu/step_run.go b/builder/qemu/step_run.go index 356481085..8226b8651 100644 --- a/builder/qemu/step_run.go +++ b/builder/qemu/step_run.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "path/filepath" + "strconv" "strings" "github.com/mitchellh/multistep" @@ -62,6 +63,7 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error vncPort := state.Get("vnc_port").(uint) sshHostPort := state.Get("sshHostPort").(uint) ui := state.Get("ui").(packer.Ui) + driver := state.Get("driver").(Driver) vnc := fmt.Sprintf("0.0.0.0:%d", vncPort-5900) vmName := config.VMName @@ -82,7 +84,15 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error defaultArgs["-netdev"] = fmt.Sprintf( "user,id=user.0,hostfwd=tcp::%v-:%d", sshHostPort, config.Comm.Port()) defaultArgs["-device"] = fmt.Sprintf("%s,netdev=user.0", config.NetDevice) - defaultArgs["-drive"] = fmt.Sprintf("file=%s,if=%s,cache=%s,discard=%s", imgPath, config.DiskInterface, config.DiskCache, config.DiskDiscard) + qemuVersion, err := driver.Version() + if err == nil { + parts := strings.Split(qemuVersion, ".") + if strconv.Atoi(parts[0]) >= 2 { + defaultArgs["-drive"] = fmt.Sprintf("file=%s,if=%s,cache=%s,discard=%s", imgPath, config.DiskInterface, config.DiskCache, config.DiskDiscard) + } + } else { + defaultArgs["-drive"] = fmt.Sprintf("file=%s,if=%s,cache=%s", imgPath, config.DiskInterface, config.DiskCache) + } if !config.DiskImage { defaultArgs["-cdrom"] = isoPath } From dd286ff63c18f134ffd8bf07b41ffaba78e48868 Mon Sep 17 00:00:00 2001 From: Jon Benson Date: Tue, 8 Sep 2015 12:51:24 -0500 Subject: [PATCH 809/956] Add artifact_types --- website/source/docs/post-processors/atlas.html.markdown | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 72ef27af9..8272ce159 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -60,8 +60,9 @@ you can also use `token` configuration option. - `artifact_type` (string) - For uploading artifacts to Atlas. `artifact_type` can be set to any unique identifier, however, the following are recommended - for consistency - `vagrant.box`, `amazon.ami`, `google.image`, `docker.image`, - `vmware.image`, and `virtualbox.image`. + for consistency - `amazon.image`, `digitalocean.image`, `docker.image`, + `googlecompute.image`, `openstack.image`, `parallels.image`, `qemu.image`, + `virtualbox.image`, `vmware.image`, `custom.image`, and `vagrant.box`. ### Optional: From f7b26e44fe747879e942e497efef78c20c45ce71 Mon Sep 17 00:00:00 2001 From: Mikhail Zholobov Date: Sun, 21 Jun 2015 13:34:35 +0300 Subject: [PATCH 810/956] builder/parallels: Add "CompactDisk" driver function This function compacts the specified virtual disk image. --- builder/parallels/common/driver.go | 3 +++ builder/parallels/common/driver_9.go | 27 +++++++++++++++++++++++++ builder/parallels/common/driver_mock.go | 10 +++++++++ 3 files changed, 40 insertions(+) diff --git a/builder/parallels/common/driver.go b/builder/parallels/common/driver.go index 81b7cd974..e6a5b7bb1 100644 --- a/builder/parallels/common/driver.go +++ b/builder/parallels/common/driver.go @@ -15,6 +15,9 @@ import ( // versions out of the builder steps, so sometimes the methods are // extremely specific. type Driver interface { + // Compact a virtual disk image. + CompactDisk(string) error + // Adds new CD/DVD drive to the VM and returns name of this device DeviceAddCdRom(string, string) (string, error) diff --git a/builder/parallels/common/driver_9.go b/builder/parallels/common/driver_9.go index 1093351c8..2c7f2fc2f 100644 --- a/builder/parallels/common/driver_9.go +++ b/builder/parallels/common/driver_9.go @@ -98,6 +98,33 @@ func getAppPath(bundleId string) (string, error) { return pathOutput, nil } +func (d *Parallels9Driver) CompactDisk(diskPath string) error { + prlDiskToolPath, err := exec.LookPath("prl_disk_tool") + if err != nil { + return err + } + + // Analyze the disk content and remove unused blocks + command := []string{ + "compact", + "--hdd", diskPath, + } + if err := exec.Command(prlDiskToolPath, command...).Run(); err != nil { + return err + } + + // Remove null blocks + command = []string{ + "compact", "--buildmap", + "--hdd", diskPath, + } + if err := exec.Command(prlDiskToolPath, command...).Run(); err != nil { + return err + } + + return nil +} + func (d *Parallels9Driver) DeviceAddCdRom(name string, image string) (string, error) { command := []string{ "set", name, diff --git a/builder/parallels/common/driver_mock.go b/builder/parallels/common/driver_mock.go index fc43247f5..fcd6b4b88 100644 --- a/builder/parallels/common/driver_mock.go +++ b/builder/parallels/common/driver_mock.go @@ -5,6 +5,10 @@ import "sync" type DriverMock struct { sync.Mutex + CompactDiskCalled bool + CompactDiskPath string + CompactDiskErr error + DeviceAddCdRomCalled bool DeviceAddCdRomName string DeviceAddCdRomImage string @@ -59,6 +63,12 @@ type DriverMock struct { IpAddressError error } +func (d *DriverMock) CompactDisk(path string) error { + d.CompactDiskCalled = true + d.CompactDiskPath = path + return d.CompactDiskErr +} + func (d *DriverMock) DeviceAddCdRom(name string, image string) (string, error) { d.DeviceAddCdRomCalled = true d.DeviceAddCdRomName = name From abfe706fc693f80c5bfe450bf4e9d860d69bb1d3 Mon Sep 17 00:00:00 2001 From: Mikhail Zholobov Date: Sun, 21 Jun 2015 13:35:52 +0300 Subject: [PATCH 811/956] builder/parallels: Add "StepCompactDisk" --- builder/parallels/common/step_compact_disk.go | 51 +++++++++++++ .../common/step_compact_disk_test.go | 73 +++++++++++++++++++ builder/parallels/iso/builder.go | 4 + 3 files changed, 128 insertions(+) create mode 100644 builder/parallels/common/step_compact_disk.go create mode 100644 builder/parallels/common/step_compact_disk_test.go diff --git a/builder/parallels/common/step_compact_disk.go b/builder/parallels/common/step_compact_disk.go new file mode 100644 index 000000000..0ebc7a134 --- /dev/null +++ b/builder/parallels/common/step_compact_disk.go @@ -0,0 +1,51 @@ +package common + +import ( + "fmt" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" +) + +// This step removes all empty blocks from expanding Parallels virtual disks +// and reduces the result disk size +// +// Uses: +// driver Driver +// vmName string +// ui packer.Ui +// +// Produces: +// +type StepCompactDisk struct { + Skip bool +} + +func (s *StepCompactDisk) Run(state multistep.StateBag) multistep.StepAction { + driver := state.Get("driver").(Driver) + vmName := state.Get("vmName").(string) + ui := state.Get("ui").(packer.Ui) + + if s.Skip { + ui.Say("Skipping disk compaction step...") + return multistep.ActionContinue + } + + ui.Say("Compacting the disk image") + diskPath, err := driver.DiskPath(vmName) + if err != nil { + err := fmt.Errorf("Error detecting virtual disk path: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + if err := driver.CompactDisk(diskPath); err != nil { + state.Put("error", fmt.Errorf("Error compacting disk: %s", err)) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (*StepCompactDisk) Cleanup(multistep.StateBag) {} diff --git a/builder/parallels/common/step_compact_disk_test.go b/builder/parallels/common/step_compact_disk_test.go new file mode 100644 index 000000000..ace932a2d --- /dev/null +++ b/builder/parallels/common/step_compact_disk_test.go @@ -0,0 +1,73 @@ +package common + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/mitchellh/multistep" +) + +func TestStepCompactDisk_impl(t *testing.T) { + var _ multistep.Step = new(StepCompactDisk) +} + +func TestStepCompactDisk(t *testing.T) { + tf, err := ioutil.TempFile("", "packer") + if err != nil { + t.Fatalf("err: %s", err) + } + tf.Close() + defer os.Remove(tf.Name()) + + state := testState(t) + step := new(StepCompactDisk) + + state.Put("vmName", "foo") + + driver := state.Get("driver").(*DriverMock) + + // Mock results + driver.DiskPathResult = tf.Name() + + // Test the run + if action := step.Run(state); action != multistep.ActionContinue { + t.Fatalf("bad action: %#v", action) + } + if _, ok := state.GetOk("error"); ok { + t.Fatal("should NOT have error") + } + + // Test the driver + if !driver.CompactDiskCalled { + t.Fatal("should've called") + } + + path, _ := driver.DiskPath("foo") + if path != tf.Name() { + t.Fatal("should call with right path") + } +} + +func TestStepCompactDisk_skip(t *testing.T) { + state := testState(t) + step := new(StepCompactDisk) + step.Skip = true + + state.Put("vmName", "foo") + + driver := state.Get("driver").(*DriverMock) + + // Test the run + if action := step.Run(state); action != multistep.ActionContinue { + t.Fatalf("bad action: %#v", action) + } + if _, ok := state.GetOk("error"); ok { + t.Fatal("should NOT have error") + } + + // Test the driver + if driver.CompactDiskCalled { + t.Fatal("should not have called") + } +} diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index 6b731544d..bfc7c23c4 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -45,6 +45,7 @@ type Config struct { ISOChecksum string `mapstructure:"iso_checksum"` ISOChecksumType string `mapstructure:"iso_checksum_type"` ISOUrls []string `mapstructure:"iso_urls"` + SkipCompaction bool `mapstructure:"skip_compaction"` VMName string `mapstructure:"vm_name"` RawSingleISOUrl string `mapstructure:"iso_url"` @@ -272,6 +273,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Commands: b.config.PrlctlPost, Ctx: b.config.ctx, }, + ¶llelscommon.StepCompactDisk{ + Skip: b.config.SkipCompaction, + }, } // Setup the state bag From 36a6fc2cc450d35e0ab41132fe50a1e3f0dfe54c Mon Sep 17 00:00:00 2001 From: Mikhail Zholobov Date: Fri, 11 Sep 2015 16:13:39 +0300 Subject: [PATCH 812/956] website/docs: Add description of "skip_compaction" option for Parallels builders --- website/source/docs/builders/parallels-iso.html.markdown | 5 +++++ website/source/docs/builders/parallels-pvm.html.markdown | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index 76278ec2b..4200adb73 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -196,6 +196,11 @@ builder. doesn't shut down in this time, it is an error. By default, the timeout is "5m", or five minutes. +- `skip_compaction` (boolean) - Virtual disk image is compacted at the end of + the build process using `prl_disk_tool` utility. In certain rare cases, this + might corrupt the resulting disk image. If you find this to be the case, + you can disable compaction using this configuration value. + - `vm_name` (string) - This is the name of the PVM directory for the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. diff --git a/website/source/docs/builders/parallels-pvm.html.markdown b/website/source/docs/builders/parallels-pvm.html.markdown index ce13f2c19..2c81ecd44 100644 --- a/website/source/docs/builders/parallels-pvm.html.markdown +++ b/website/source/docs/builders/parallels-pvm.html.markdown @@ -142,6 +142,11 @@ builder. doesn't shut down in this time, it is an error. By default, the timeout is "5m", or five minutes. +- `skip_compaction` (boolean) - Virtual disk image is compacted at the end of + the build process using `prl_disk_tool` utility. In certain rare cases, this + might corrupt the resulting disk image. If you find this to be the case, + you can disable compaction using this configuration value. + - `vm_name` (string) - This is the name of the virtual machine when it is imported as well as the name of the PVM directory when the virtual machine is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the From 4cd70a24671258243f002235f3f0260e8562e379 Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Fri, 11 Sep 2015 09:42:58 -0400 Subject: [PATCH 813/956] Fix Atlas /operations link --- .../source/intro/getting-started/remote-builds.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/intro/getting-started/remote-builds.html.markdown b/website/source/intro/getting-started/remote-builds.html.markdown index 6ddb4ece3..5dba242c4 100644 --- a/website/source/intro/getting-started/remote-builds.html.markdown +++ b/website/source/intro/getting-started/remote-builds.html.markdown @@ -72,8 +72,8 @@ Replace "ATLAS\_USERNAME" with your username, then run automatically starts the build. This build will fail since neither `aws_access_key` or `aws_secret_key` are set -in the Atlas environment. To set environment variables in Atlas, navigate to the -[operations tab](https://atlas.hashicorp.com/operations), click the +in the Atlas environment. To set environment variables in Atlas, navigate to +the [Builds tab](https://atlas.hashicorp.com/builds), click the "packer-tutorial" build configuration that was just created, and then click 'variables' in the left navigation. Set `aws_access_key` and `aws_secret_key` with their respective values. Now restart the Packer build by either clicking From 48f8612b72163e1ef719dc491b001310cec5f490 Mon Sep 17 00:00:00 2001 From: Vasiliy Tolstov Date: Tue, 8 Sep 2015 10:58:21 +0000 Subject: [PATCH 814/956] add virtio-scsi Signed-off-by: Vasiliy Tolstov --- builder/qemu/builder.go | 7 +++-- builder/qemu/driver.go | 2 +- builder/qemu/step_run.go | 62 ++++++++++++++++++++++++++++------------ 3 files changed, 49 insertions(+), 22 deletions(-) diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index 4ac22b59b..016515617 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -54,9 +54,10 @@ var netDevice = map[string]bool{ } var diskInterface = map[string]bool{ - "ide": true, - "scsi": true, - "virtio": true, + "ide": true, + "scsi": true, + "virtio": true, + "virtio-scsi": true, } var diskCache = map[string]bool{ diff --git a/builder/qemu/driver.go b/builder/qemu/driver.go index e93c87836..77c68737d 100644 --- a/builder/qemu/driver.go +++ b/builder/qemu/driver.go @@ -189,7 +189,7 @@ func (d *QemuDriver) Version() (string, error) { versionOutput := strings.TrimSpace(stdout.String()) log.Printf("Qemu --version output: %s", versionOutput) - versionRe := regexp.MustCompile("[0-9]\\.[0-9]\\.[0-9]") + versionRe := regexp.MustCompile("[\\.[0-9]+]*") matches := versionRe.FindStringSubmatch(versionOutput) if len(matches) == 0 { return "", fmt.Errorf("No version found: %s", versionOutput) diff --git a/builder/qemu/step_run.go b/builder/qemu/step_run.go index 8226b8651..74db88a15 100644 --- a/builder/qemu/step_run.go +++ b/builder/qemu/step_run.go @@ -69,30 +69,51 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error vmName := config.VMName imgPath := filepath.Join(config.OutputDir, vmName) - defaultArgs := make(map[string]string) + defaultArgs := make(map[string]interface{}) + var deviceArgs []string + var driveArgs []string + + defaultArgs["-name"] = vmName + defaultArgs["-machine"] = fmt.Sprintf("type=%s", config.MachineType) + defaultArgs["-netdev"] = fmt.Sprintf("user,id=user.0,hostfwd=tcp::%v-:%d", sshHostPort, config.Comm.Port()) + + qemuVersion, err := driver.Version() + if err != nil { + return nil, err + } + parts := strings.Split(qemuVersion, ".") + qemuMajor, err := strconv.Atoi(parts[0]) + if err != nil { + return nil, err + } + if qemuMajor >= 2 { + if config.DiskInterface == "virtio-scsi" { + deviceArgs = append(deviceArgs, "virtio-scsi-pci,id=scsi0", "scsi-hd,bus=scsi0.0,drive=drive0") + driveArgs = append(driveArgs, fmt.Sprintf("if=none,file=%s,id=drive0,cache=%s,discard=%s", imgPath, config.DiskCache, config.DiskDiscard)) + } else { + driveArgs = append(driveArgs, fmt.Sprintf("file=%s,if=%s,cache=%s,discard=%s", imgPath, config.DiskInterface, config.DiskCache, config.DiskDiscard)) + } + } else { + defaultArgs["-drive"] = fmt.Sprintf("file=%s,if=%s,cache=%s", imgPath, config.DiskInterface, config.DiskCache) + } + deviceArgs = append(deviceArgs, fmt.Sprintf("%s,netdev=user.0", config.NetDevice)) if config.Headless == true { ui.Message("WARNING: The VM will be started in headless mode, as configured.\n" + "In headless mode, errors during the boot sequence or OS setup\n" + "won't be easily visible. Use at your own discretion.") } else { - defaultArgs["-display"] = "sdl" + if qemuMajor >= 2 { + defaultArgs["-display"] = "sdl" + } else { + ui.Message("WARNING: The version of qemu on your host doesn't support display mode.\n" + + "The display parameter will be ignored.") + } } - defaultArgs["-name"] = vmName - defaultArgs["-machine"] = fmt.Sprintf("type=%s", config.MachineType) - defaultArgs["-netdev"] = fmt.Sprintf( - "user,id=user.0,hostfwd=tcp::%v-:%d", sshHostPort, config.Comm.Port()) - defaultArgs["-device"] = fmt.Sprintf("%s,netdev=user.0", config.NetDevice) - qemuVersion, err := driver.Version() - if err == nil { - parts := strings.Split(qemuVersion, ".") - if strconv.Atoi(parts[0]) >= 2 { - defaultArgs["-drive"] = fmt.Sprintf("file=%s,if=%s,cache=%s,discard=%s", imgPath, config.DiskInterface, config.DiskCache, config.DiskDiscard) - } - } else { - defaultArgs["-drive"] = fmt.Sprintf("file=%s,if=%s,cache=%s", imgPath, config.DiskInterface, config.DiskCache) - } + defaultArgs["-device"] = deviceArgs + defaultArgs["-drive"] = driveArgs + if !config.DiskImage { defaultArgs["-cdrom"] = isoPath } @@ -102,7 +123,7 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error // Append the accelerator to the machine type if it is specified if config.Accelerator != "none" { - defaultArgs["-machine"] += fmt.Sprintf(",accel=%s", config.Accelerator) + defaultArgs["-machine"] = fmt.Sprintf("%s,accel=%s", defaultArgs["-machine"], config.Accelerator) } else { ui.Message("WARNING: The VM will be started with no hardware acceleration.\n" + "The installation may take considerably longer to finish.\n") @@ -152,7 +173,12 @@ func getCommandArgs(bootDrive string, state multistep.StateBag) ([]string, error for key := range defaultArgs { if _, ok := inArgs[key]; !ok { arg := make([]string, 1) - arg[0] = defaultArgs[key] + switch defaultArgs[key].(type) { + case string: + arg[0] = defaultArgs[key].(string) + case []string: + arg = defaultArgs[key].([]string) + } inArgs[key] = arg } } From 7def47f376c6355f76d1ef74362e48e21d35d669 Mon Sep 17 00:00:00 2001 From: Jesse Callaway Date: Tue, 15 Sep 2015 10:33:44 -0400 Subject: [PATCH 815/956] indent ami_block_device_mappings subkeys --- .../docs/builders/amazon-ebs.html.markdown | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index f97404d19..bbd64b510 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -68,27 +68,27 @@ builder. - `ami_block_device_mappings` (array of block device mappings) - Add the block device mappings to the AMI. The block device mappings allow for keys: -- `device_name` (string) - The device name exposed to the instance (for - example, "/dev/sdh" or "xvdh") -- `virtual_name` (string) - The virtual device name. See the documentation on - [Block Device - Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) - for more information -- `snapshot_id` (string) - The ID of the snapshot -- `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) - volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic - volumes -- `volume_size` (integer) - The size of the volume, in GiB. Required if not - specifying a `snapshot_id` -- `delete_on_termination` (boolean) - Indicates whether the EBS volume is - deleted on instance termination -- `encrypted` (boolean) - Indicates whether to encrypt the volume or not -- `no_device` (boolean) - Suppresses the specified device included in the - block device mapping of the AMI -- `iops` (integer) - The number of I/O operations per second (IOPS) that the - volume supports. See the documentation on - [IOPs](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) - for more information + - `device_name` (string) - The device name exposed to the instance (for + example, "/dev/sdh" or "xvdh") + - `virtual_name` (string) - The virtual device name. See the documentation on + [Block Device + Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) + for more information + - `snapshot_id` (string) - The ID of the snapshot + - `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + volumes + - `volume_size` (integer) - The size of the volume, in GiB. Required if not + specifying a `snapshot_id` + - `delete_on_termination` (boolean) - Indicates whether the EBS volume is + deleted on instance termination + - `encrypted` (boolean) - Indicates whether to encrypt the volume or not + - `no_device` (boolean) - Suppresses the specified device included in the + block device mapping of the AMI + - `iops` (integer) - The number of I/O operations per second (IOPS) that the + volume supports. See the documentation on + [IOPs](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) + for more information - `ami_description` (string) - The description to set for the resulting AMI(s). By default this description is empty. From 9cf99289c5729f49674a69a8978594c7a5e79a4b Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Sun, 20 Sep 2015 10:14:07 +0200 Subject: [PATCH 816/956] Fixed GCE builder after dependency change. See https://github.com/google/google-api-go-client/commit/4af91da60108e4a6d2547bb4adce3f611c4d1f20 --- builder/googlecompute/driver_gce.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/googlecompute/driver_gce.go b/builder/googlecompute/driver_gce.go index f52ee6321..d7b745d58 100644 --- a/builder/googlecompute/driver_gce.go +++ b/builder/googlecompute/driver_gce.go @@ -219,7 +219,7 @@ func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) { for k, v := range c.Metadata { metadata = append(metadata, &compute.MetadataItems{ Key: k, - Value: v, + Value: &v, }) } From 569121519dee6977d6a855eb09ebc7012fa520c2 Mon Sep 17 00:00:00 2001 From: Eran Chetz Date: Mon, 21 Sep 2015 16:32:18 +0300 Subject: [PATCH 817/956] minor change that was majorly bugging me --- website/source/intro/getting-started/build-image.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/intro/getting-started/build-image.html.markdown b/website/source/intro/getting-started/build-image.html.markdown index ec1d851a9..c66a604c7 100644 --- a/website/source/intro/getting-started/build-image.html.markdown +++ b/website/source/intro/getting-started/build-image.html.markdown @@ -165,7 +165,7 @@ storing images at the end of this getting started guide. After running the above example, your AWS account now has an AMI associated with it. AMIs are stored in S3 by Amazon, so unless you want to be charged about -\$0.01 per month, you'll probably want to remove it. Remove the AMI by first +$0.01 per month, you'll probably want to remove it. Remove the AMI by first deregistering it on the [AWS AMI management page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Images). Next, delete the associated snapshot on the [AWS snapshot management From 6bf790a9754f4e8688c612fc3680cbe2cd47d7d0 Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Fri, 25 Sep 2015 11:52:21 -0700 Subject: [PATCH 818/956] post-processor/atlas: support sending compile ids Requires https://github.com/hashicorp/atlas-go/pull/44 --- post-processor/atlas/post-processor.go | 20 ++++++++++++++++++-- post-processor/atlas/post-processor_test.go | 14 ++++++++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/post-processor/atlas/post-processor.go b/post-processor/atlas/post-processor.go index 59335086c..7b8a4d98b 100644 --- a/post-processor/atlas/post-processor.go +++ b/post-processor/atlas/post-processor.go @@ -15,7 +15,10 @@ import ( "github.com/mitchellh/packer/template/interpolate" ) -const BuildEnvKey = "ATLAS_BUILD_ID" +const ( + BuildEnvKey = "ATLAS_BUILD_ID" + CompileEnvKey = "ATLAS_COMPILE_ID" +) // Artifacts can return a string for this state key and the post-processor // will use automatically use this as the type. The user's value overrides @@ -43,7 +46,8 @@ type Config struct { ctx interpolate.Context user, name string - buildId int + buildId int + compileId int } type PostProcessor struct { @@ -96,6 +100,17 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { p.config.buildId = int(raw) } + // If we have a compile ID, save it + if v := os.Getenv(CompileEnvKey); v != "" { + raw, err := strconv.ParseInt(v, 0, 0) + if err != nil { + return fmt.Errorf( + "Error parsing compile ID: %s", err) + } + + p.config.compileId = int(raw) + } + // Build the client p.client = atlas.DefaultClient() if p.config.ServerAddr != "" { @@ -150,6 +165,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ID: artifact.Id(), Metadata: p.metadata(artifact), BuildID: p.config.buildId, + CompileID: p.config.compileId, } if fs := artifact.Files(); len(fs) > 0 { diff --git a/post-processor/atlas/post-processor_test.go b/post-processor/atlas/post-processor_test.go index 2f8c63c47..add222bb6 100644 --- a/post-processor/atlas/post-processor_test.go +++ b/post-processor/atlas/post-processor_test.go @@ -40,6 +40,20 @@ func TestPostProcessorConfigure_buildId(t *testing.T) { } } +func TestPostProcessorConfigure_compileId(t *testing.T) { + defer os.Setenv(CompileEnvKey, os.Getenv(CompileEnvKey)) + os.Setenv(CompileEnvKey, "5") + + var p PostProcessor + if err := p.Configure(validDefaults()); err != nil { + t.Fatalf("err: %s", err) + } + + if p.config.compileId != 5 { + t.Fatalf("bad: %#v", p.config.compileId) + } +} + func TestPostProcessorMetadata(t *testing.T) { var p PostProcessor if err := p.Configure(validDefaults()); err != nil { From 8ab4a5f3a53ef323ab5cb3dce5cac77a6655e6c4 Mon Sep 17 00:00:00 2001 From: Jarl Friis Date: Sat, 26 Sep 2015 13:31:31 +0200 Subject: [PATCH 819/956] Makefile fix --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 1bd54cac0..f378bcea6 100644 --- a/Makefile +++ b/Makefile @@ -67,7 +67,7 @@ updatedeps: | grep -v github.com/mitchellh/packer \ | grep -v '/internal/' \ | sort -u \ - | xargs go get -f -u -v -d ; if [ $$? -eq 0 ]; then \ + | xargs go get -f -u -v -d ; if [ $$? -ne 0 ]; then \ echo "ERROR: go get failed. Your git branch may have changed; you were on $(GITBRANCH) ($(GITSHA))."; \ fi @if [ "$(GITBRANCH)" != "" ]; then git checkout -q $(GITBRANCH); else git checkout -q $(GITSHA); fi From 87f2f7c96da01a81759b387a77551cf2af4ac18a Mon Sep 17 00:00:00 2001 From: Robin Kearney Date: Wed, 30 Sep 2015 13:13:46 +0100 Subject: [PATCH 820/956] Confusing to specify this twice --- website/source/docs/builders/qemu.html.markdown | 1 - 1 file changed, 1 deletion(-) diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index 9288bbcab..106853755 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -35,7 +35,6 @@ to files, URLS for ISOs and checksums. "iso_checksum": "0d9dc37b5dd4befa1c440d2174e88a87", "iso_checksum_type": "md5", "output_directory": "output_centos_tdhtest", - "ssh_wait_timeout": "30s", "shutdown_command": "shutdown -P now", "disk_size": 5000, "format": "qcow2", From c48e2319abcd55158185ed3b36d61d8c33892bbd Mon Sep 17 00:00:00 2001 From: dragon788 Date: Wed, 30 Sep 2015 20:40:58 -0500 Subject: [PATCH 821/956] Added correct format for compression_level --- website/source/docs/post-processors/compress.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/post-processors/compress.html.markdown b/website/source/docs/post-processors/compress.html.markdown index 373230d44..1b36774fc 100644 --- a/website/source/docs/post-processors/compress.html.markdown +++ b/website/source/docs/post-processors/compress.html.markdown @@ -66,6 +66,6 @@ configuration: { "type": "compress", "output": "log_{{.BuildName}}.gz", - "compression": 9 + "compression_level": 9 } ``` From 2411c779d1d66c8bd091dd77f400e7e12a9f1919 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Thu, 1 Oct 2015 22:52:06 -0700 Subject: [PATCH 822/956] Make qemu ssh_wait_timeout example more reasonable --- website/source/docs/builders/qemu.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index 106853755..3f3080ccc 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -48,7 +48,7 @@ to files, URLS for ISOs and checksums. "ssh_username": "root", "ssh_password": "s0m3password", "ssh_port": 22, - "ssh_wait_timeout": "90m", + "ssh_wait_timeout": "30s", "vm_name": "tdhtest", "net_device": "virtio-net", "disk_interface": "virtio", From b60e1fd86236efb991e2503c0b6a7e7f796547c4 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Fri, 2 Oct 2015 17:06:33 -0400 Subject: [PATCH 823/956] Add suggestion to AWS Instance builder error message A common cause of the failure of the bundle volume step is missing the ec2-bundle-vol command on the target instance. This commit adds a note about this to the error message produced as Packer output if the bundling step fails (it is already in the documentation). --- builder/amazon/instance/step_bundle_volume.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/builder/amazon/instance/step_bundle_volume.go b/builder/amazon/instance/step_bundle_volume.go index bd362f91f..5d839177f 100644 --- a/builder/amazon/instance/step_bundle_volume.go +++ b/builder/amazon/instance/step_bundle_volume.go @@ -67,7 +67,9 @@ func (s *StepBundleVolume) Run(state multistep.StateBag) multistep.StepAction { if cmd.ExitStatus != 0 { state.Put("error", fmt.Errorf( "Volume bundling failed. Please see the output above for more\n"+ - "details on what went wrong.")) + "details on what went wrong.\n\n"+ + "One common cause for this error is ec2-bundle-vol not being\n"+ + "available on the target instance.")) ui.Error(state.Get("error").(error).Error()) return multistep.ActionHalt } From 647a07225753f0f974929dbc27919589d4c4394c Mon Sep 17 00:00:00 2001 From: Chris Becker Date: Mon, 5 Oct 2015 13:19:21 -0400 Subject: [PATCH 824/956] Fix template error on ansible provisioner documentation --- .../provisioners/ansible-local.html.markdown | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/website/source/docs/provisioners/ansible-local.html.markdown b/website/source/docs/provisioners/ansible-local.html.markdown index 7fd084c0a..aa2641db2 100644 --- a/website/source/docs/provisioners/ansible-local.html.markdown +++ b/website/source/docs/provisioners/ansible-local.html.markdown @@ -51,7 +51,12 @@ Optional: packer will assign the host `127.0.0.1`. A value of `my_group_1,my_group_2` will generate an Ansible inventory like: -`{.text} [my_group_1] 127.0.0.1 [my_group_2] 127.0.0.1` +```{.text} +[my_group_1] +127.0.0.1 +[my_group_2] +127.0.0.1 +``` - `inventory_file` (string) - The inventory file to be used by ansible. This file must exist on your local system and will be uploaded to the @@ -63,17 +68,25 @@ specified host you're buiding. The `--limit` argument can be provided in the An example inventory file may look like: -\`\`\` {.text} \[chi-dbservers\] db-01 ansible\_connection=local db-02 -ansible\_connection=local +```{.text} +[chi-dbservers] +db-01 ansible_connection=local +db-02 ansible_connection=local -\[chi-appservers\] app-01 ansible\_connection=local app-02 -ansible\_connection=local +[chi-appservers] +app-01 ansible_connection=local +app-02 ansible_connection=local -\[chi:children\] chi-dbservers chi-appservers +[chi:children] +chi-dbservers +chi-appservers -\[dbservers:children\] chi-dbservers +[dbservers:children] +chi-dbservers -\[appservers:children\] chi-appservers \`\`\` +[appservers:children] +chi-appservers +``` - `playbook_dir` (string) - a path to the complete ansible directory structure on your local system to be copied to the remote machine as the From 3495204382f8973f636d8f92c4beab61e8b27d9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Fievet?= <_@sebastien-fievet.fr> Date: Tue, 6 Oct 2015 15:04:04 +0200 Subject: [PATCH 825/956] Fix ansible inventory example --- .../provisioners/ansible-local.html.markdown | 23 ++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/website/source/docs/provisioners/ansible-local.html.markdown b/website/source/docs/provisioners/ansible-local.html.markdown index 7fd084c0a..00ec690c0 100644 --- a/website/source/docs/provisioners/ansible-local.html.markdown +++ b/website/source/docs/provisioners/ansible-local.html.markdown @@ -62,18 +62,25 @@ specified host you're buiding. The `--limit` argument can be provided in the `extra_arguments` option. An example inventory file may look like: +``` +[chi-dbservers] +db-01 ansible_connection=local +db-02 ansible_connection=local -\`\`\` {.text} \[chi-dbservers\] db-01 ansible\_connection=local db-02 -ansible\_connection=local +[chi-appservers] +app-01 ansible_connection=local +app-02 ansible_connection=local -\[chi-appservers\] app-01 ansible\_connection=local app-02 -ansible\_connection=local +[chi:children] +chi-dbservers +chi-appservers -\[chi:children\] chi-dbservers chi-appservers +[dbservers:children] +chi-dbservers -\[dbservers:children\] chi-dbservers - -\[appservers:children\] chi-appservers \`\`\` +[appservers:children] +chi-appservers +``` - `playbook_dir` (string) - a path to the complete ansible directory structure on your local system to be copied to the remote machine as the From 8349e22df950ee9e94be84d9707a2a8cccd245c5 Mon Sep 17 00:00:00 2001 From: Adrian Bridgett Date: Tue, 6 Oct 2015 21:36:21 +0100 Subject: [PATCH 826/956] Add ebs_optimized support --- builder/amazon/common/run_config.go | 1 + builder/amazon/common/step_run_source_instance.go | 2 ++ builder/amazon/ebs/builder.go | 1 + builder/amazon/instance/builder.go | 1 + website/source/docs/builders/amazon-ebs.html.markdown | 4 ++++ website/source/docs/builders/amazon-instance.html.markdown | 4 ++++ 6 files changed, 13 insertions(+) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index cc981596d..307a177ed 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -16,6 +16,7 @@ import ( type RunConfig struct { AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"` AvailabilityZone string `mapstructure:"availability_zone"` + EbsOptimized bool `mapstructure:"ebs_optimized"` IamInstanceProfile string `mapstructure:"iam_instance_profile"` InstanceType string `mapstructure:"instance_type"` RunTags map[string]string `mapstructure:"run_tags"` diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index be4120c19..a74d7c138 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -20,6 +20,7 @@ type StepRunSourceInstance struct { AvailabilityZone string BlockDevices BlockDevices Debug bool + EbsOptimized bool ExpectedRootDevice string InstanceType string IamInstanceProfile string @@ -146,6 +147,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi IamInstanceProfile: &ec2.IamInstanceProfileSpecification{Name: &s.IamInstanceProfile}, BlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(), Placement: &ec2.Placement{AvailabilityZone: &s.AvailabilityZone}, + EbsOptimized: &s.EbsOptimized, } if s.SubnetId != "" && s.AssociatePublicIpAddress { diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 1eab06247..26a525bcb 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -114,6 +114,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe IamInstanceProfile: b.config.IamInstanceProfile, SubnetId: b.config.SubnetId, AssociatePublicIpAddress: b.config.AssociatePublicIpAddress, + EbsOptimized: b.config.EbsOptimized, AvailabilityZone: b.config.AvailabilityZone, BlockDevices: b.config.BlockDevices, Tags: b.config.RunTags, diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index b69fc40ce..6ca908230 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -201,6 +201,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe SourceAMI: b.config.SourceAmi, SubnetId: b.config.SubnetId, AssociatePublicIpAddress: b.config.AssociatePublicIpAddress, + EbsOptimized: b.config.EbsOptimized, AvailabilityZone: b.config.AvailabilityZone, BlockDevices: b.config.BlockDevices, Tags: b.config.RunTags, diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index f97404d19..e7fd99ab2 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -116,6 +116,10 @@ builder. - `availability_zone` (string) - Destination availability zone to launch instance in. Leave this empty to allow Amazon to auto-assign. +- `ebs_optimized` (boolean) - Mark instance as [EBS + Optimized](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html). + Default `false`. + - `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 13ab1f293..378421214 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -157,6 +157,10 @@ builder. - `bundle_vol_command` (string) - The command to use to bundle the volume. See the "custom bundle commands" section below for more information. +- `ebs_optimized` (boolean) - Mark instance as [EBS + Optimized](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html). + Default `false`. + - `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. From da771c469ead683e64fd31241cb59d35c1846969 Mon Sep 17 00:00:00 2001 From: Adrian Bridgett Date: Tue, 6 Oct 2015 22:13:17 +0100 Subject: [PATCH 827/956] spot_price of "0" to mean on demand this helps avoid duplicating packer templates just to select spot instances or not --- builder/amazon/common/step_run_source_instance.go | 2 +- website/source/docs/builders/amazon-ebs.html.markdown | 3 ++- website/source/docs/builders/amazon-instance.html.markdown | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index be4120c19..ae8960268 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -135,7 +135,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi var instanceId string - if spotPrice == "" { + if spotPrice == "" || spotPrice == "0" { runOpts := &ec2.RunInstancesInput{ KeyName: &keyName, ImageId: &s.SourceAMI, diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index f97404d19..5b48b8a67 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -150,7 +150,8 @@ builder. when the current spot price is less than the maximum price you specify. Spot price will be updated based on available spot instance capacity and current spot instance requests. It may save you some costs. You can set this to - "auto" for Packer to automatically discover the best spot price. + "auto" for Packer to automatically discover the best spot price or to "0" + to use an on demand instance (default). - `spot_price_auto_product` (string) - Required if `spot_price` is set to "auto". This tells Packer what sort of AMI you're launching to find the diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 13ab1f293..726a5c4f4 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -191,7 +191,8 @@ builder. maximum price that you specify exceeds the current spot price. Spot price will be updated based on available spot instance capacity and current spot Instance requests. It may save you some costs. You can set this to "auto" - for Packer to automatically discover the best spot price. + for Packer to automatically discover the best spot price or to "0" to use + an on demand instance (default). - `spot_price_auto_product` (string) - Required if `spot_price` is set to "auto". This tells Packer what sort of AMI you're launching to find the From 5ef142c8bd32404f328ad428e46c4c8c3512918e Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Tue, 6 Oct 2015 16:45:30 -0700 Subject: [PATCH 828/956] Fixes #2777: isotime interpolation uses InitTime --- template/interpolate/funcs.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/template/interpolate/funcs.go b/template/interpolate/funcs.go index 75e1344ed..92c3ce5c4 100644 --- a/template/interpolate/funcs.go +++ b/template/interpolate/funcs.go @@ -93,14 +93,14 @@ func funcGenEnv(ctx *Context) interface{} { func funcGenIsotime(ctx *Context) interface{} { return func(format ...string) (string, error) { if len(format) == 0 { - return time.Now().UTC().Format(time.RFC3339), nil + return InitTime.Format(time.RFC3339), nil } if len(format) > 1 { return "", fmt.Errorf("too many values, 1 needed: %v", format) } - return time.Now().UTC().Format(format[0]), nil + return InitTime.Format(format[0]), nil } } From 77bfd1de2e46e8505b42870dbbfbd57e72bec649 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Tue, 6 Oct 2015 15:59:44 -0700 Subject: [PATCH 829/956] Workaround docker-machine shared folder mapping issue When using docker-machine on a Mac only the /Users folder is shared with the VM. Uploads fail since the normal tmpdir is not shared. This change uses the local packer directory (usually when run in the users home folders) allowing it to work without setting TMPDIR explicitly. A better fix would be to use the docker API directly but that would force users to use docker API version 20+. - fixes #901, fixes #1752, fixes #2436, fixes #2675, fixes #2697. --- builder/docker/step_temp_dir.go | 15 +++++++++++- builder/docker/step_temp_dir_test.go | 36 +++++++++++++++++++++++++++- 2 files changed, 49 insertions(+), 2 deletions(-) diff --git a/builder/docker/step_temp_dir.go b/builder/docker/step_temp_dir.go index c8b2fa7e6..8d68477eb 100644 --- a/builder/docker/step_temp_dir.go +++ b/builder/docker/step_temp_dir.go @@ -6,6 +6,7 @@ import ( "github.com/mitchellh/packer/packer" "io/ioutil" "os" + "path/filepath" ) // StepTempDir creates a temporary directory that we use in order to @@ -18,7 +19,19 @@ func (s *StepTempDir) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say("Creating a temporary directory for sharing data...") - td, err := ioutil.TempDir("", "packer-docker") + // Create the docker temp files in the current working directory + // to work around an issue when running with docker-machine + // using vm's needing access to shared folder content. This assumes + // the current working directory is mapped as a share folder. + // Allow TMPDIR to override this location. + path := "" + if tmpdir := os.Getenv("TMPDIR"); tmpdir == "" { + abspath, err := filepath.Abs(".") + if err == nil { + path = abspath + } + } + td, err := ioutil.TempDir(path, "packer-docker") if err != nil { err := fmt.Errorf("Error making temp dir: %s", err) state.Put("error", err) diff --git a/builder/docker/step_temp_dir_test.go b/builder/docker/step_temp_dir_test.go index a7d495f65..38ff7c360 100644 --- a/builder/docker/step_temp_dir_test.go +++ b/builder/docker/step_temp_dir_test.go @@ -3,6 +3,8 @@ package docker import ( "github.com/mitchellh/multistep" "os" + "path/filepath" + "runtime" "testing" ) @@ -10,7 +12,7 @@ func TestStepTempDir_impl(t *testing.T) { var _ multistep.Step = new(StepTempDir) } -func TestStepTempDir(t *testing.T) { +func testStepTempDir_impl(t *testing.T) string { state := testState(t) step := new(StepTempDir) defer step.Cleanup(state) @@ -41,4 +43,36 @@ func TestStepTempDir(t *testing.T) { if _, err := os.Stat(dir); err == nil { t.Fatalf("dir should be gone") } + + return dir +} + +func TestStepTempDir(t *testing.T) { + testStepTempDir_impl(t) +} + +func TestStepTempDir_notmpdir(t *testing.T) { + tempenv := "TMPDIR" + if runtime.GOOS == "windows" { + tempenv = "TMP" + } + // Verify empty TMPDIR maps to current working directory + oldenv := os.Getenv(tempenv) + os.Setenv(tempenv, "") + defer os.Setenv(tempenv, oldenv) + + dir1 := testStepTempDir_impl(t) + + // Now set TMPDIR to current directory + abspath, err := filepath.Abs(".") + if err != nil { + t.Fatalf("could not get current working directory") + } + os.Setenv(tempenv, abspath) + + dir2 := testStepTempDir_impl(t) + + if filepath.Dir(dir1) != filepath.Dir(dir2) { + t.Fatalf("temp base directories do not match: %s %s", filepath.Dir(dir1), filepath.Dir(dir2)) + } } From 52e7f89571c974b295f79039f995beeb8e975cdb Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 7 Oct 2015 15:33:01 -0700 Subject: [PATCH 830/956] Split bin from prerelease guard so we can cross-compile dev builds --- Makefile | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index f378bcea6..eddf2b0cf 100644 --- a/Makefile +++ b/Makefile @@ -8,9 +8,12 @@ default: test dev ci: deps test -release: updatedeps test bin +release: updatedeps test releasebin bin: deps + @sh -c "$(CURDIR)/scripts/build.sh" + +releasebin: deps @grep 'const VersionPrerelease = ""' version.go > /dev/null ; if [ $$? -ne 0 ]; then \ echo "ERROR: You must remove prerelease tags from version.go prior to release."; \ exit 1; \ @@ -77,4 +80,4 @@ updatedeps: fi @echo "INFO: Currently on $(GITBRANCH) ($(GITSHA))" -.PHONY: bin checkversion ci default deps generate test testacc testrace updatedeps +.PHONY: bin checkversion ci default deps generate releasebin test testacc testrace updatedeps From e384df43976fb0b4302cd702cdfa885d02904b49 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 7 Oct 2015 16:51:04 -0700 Subject: [PATCH 831/956] Add warning that dev builds should not be shipped to production --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index eddf2b0cf..5737d0d60 100644 --- a/Makefile +++ b/Makefile @@ -11,6 +11,7 @@ ci: deps test release: updatedeps test releasebin bin: deps + @echo "WARN: `make bin` is for debug / test builds only. Use `make release` for release builds." @sh -c "$(CURDIR)/scripts/build.sh" releasebin: deps From 1fea962a3aac6f73f5d77a5fcc96082f6d9058f8 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Thu, 8 Oct 2015 00:36:31 -0400 Subject: [PATCH 832/956] account_file can be verbatim JSON string --- builder/googlecompute/account.go | 43 +++++++++++++++---- builder/googlecompute/config.go | 5 +-- .../docs/builders/googlecompute.html.markdown | 4 +- 3 files changed, 40 insertions(+), 12 deletions(-) diff --git a/builder/googlecompute/account.go b/builder/googlecompute/account.go index 59baf6044..ea94b11f1 100644 --- a/builder/googlecompute/account.go +++ b/builder/googlecompute/account.go @@ -2,7 +2,10 @@ package googlecompute import ( "encoding/json" + "fmt" + "io/ioutil" "os" + "strings" ) // accountFile represents the structure of the account file JSON file. @@ -13,13 +16,37 @@ type accountFile struct { ClientId string `json:"client_id"` } -func loadJSON(result interface{}, path string) error { - f, err := os.Open(path) - if err != nil { - return err - } - defer f.Close() - - dec := json.NewDecoder(f) +func parseJSON(result interface{}, text string) error { + r := strings.NewReader(text) + dec := json.NewDecoder(r) return dec.Decode(result) } + +func processAccountFile(account_file *accountFile, text string) error { + // Assume text is a JSON string + if err := parseJSON(account_file, text); err != nil { + // If text was not JSON, assume it is a file path instead + if _, err := os.Stat(text); os.IsNotExist(err) { + return fmt.Errorf( + "account_file path does not exist: %s", + text) + } + + b, err := ioutil.ReadFile(text) + if err != nil { + return fmt.Errorf( + "Error reading account_file from path '%s': %s", + text, err) + } + + contents := string(b) + + if err := parseJSON(account_file, contents); err != nil { + return fmt.Errorf( + "Error parsing account file '%s': %s", + contents, err) + } + } + + return nil +} diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go index 317d64ace..4ca45c69f 100644 --- a/builder/googlecompute/config.go +++ b/builder/googlecompute/config.go @@ -131,9 +131,8 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { c.stateTimeout = stateTimeout if c.AccountFile != "" { - if err := loadJSON(&c.account, c.AccountFile); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Failed parsing account file: %s", err)) + if err := processAccountFile(&c.account, c.AccountFile); err != nil { + errs = packer.MultiErrorAppend(errs, err) } } diff --git a/website/source/docs/builders/googlecompute.html.markdown b/website/source/docs/builders/googlecompute.html.markdown index 56fdafdcd..c97cd672a 100644 --- a/website/source/docs/builders/googlecompute.html.markdown +++ b/website/source/docs/builders/googlecompute.html.markdown @@ -77,7 +77,9 @@ straightforwarded, it is documented here. Below is a fully functioning example. It doesn't do anything useful, since no provisioners are defined, but it will effectively repackage an existing GCE -image. The account file is obtained in the previous section. +image. The account_file is obtained in the previous section. If it parses as +JSON it is assumed to be the file itself, otherwise it is assumed to be +the path to the file containing the JSON. ``` {.javascript} { From 8f727219d991f230c2832e80d08c2a4d30ab7a32 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Thu, 8 Oct 2015 13:50:50 -0400 Subject: [PATCH 833/956] Use a faster deploy script --- scripts/website_push.sh | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/scripts/website_push.sh b/scripts/website_push.sh index fafcbd70c..95168f977 100755 --- a/scripts/website_push.sh +++ b/scripts/website_push.sh @@ -1,12 +1,40 @@ #!/bin/bash +# Set the tmpdir +if [ -z "$TMPDIR" ]; then + TMPDIR="/tmp" +fi + +# Create a temporary build dir and make sure we clean it up. For +# debugging, comment out the trap line. +DEPLOY=`mktemp -d $TMPDIR/packer-www-XXXXXX` +trap "rm -rf $DEPLOY" INT TERM EXIT + # Get the parent directory of where this script is. SOURCE="${BASH_SOURCE[0]}" while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" -# Change into that directory -cd $DIR +# Copy into tmpdir +shopt -s dotglob +cp -r $DIR/website/* $DEPLOY/ -# Push the subtree (force) -git push heroku `git subtree split --prefix website master`:master --force +# Change into that directory +pushd $DEPLOY &>/dev/null + +# Ignore some stuff +touch .gitignore +echo ".sass-cache" >> .gitignore +echo "build" >> .gitignore +echo "vendor" >> .gitignore + +# Add everything +git init -q . +git add . +git commit -q -m "Deploy by $USER" + +git remote add heroku git@heroku.com:packer-www.git +git push -f heroku master + +# Go back to our root +popd &>/dev/null From 8ff5c07d0c837d0a87cdf5310711e365dbb684cd Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 8 Oct 2015 16:37:14 -0700 Subject: [PATCH 834/956] Add a new packer template for testing large file downloads --- builder/docker/communicator_test.go | 56 +++++++++++++++++++++++++---- 1 file changed, 50 insertions(+), 6 deletions(-) diff --git a/builder/docker/communicator_test.go b/builder/docker/communicator_test.go index db0bfcfe8..48bc5035f 100644 --- a/builder/docker/communicator_test.go +++ b/builder/docker/communicator_test.go @@ -5,7 +5,6 @@ import ( "io/ioutil" "os" "os/exec" - "runtime" "strings" "testing" @@ -18,6 +17,7 @@ func TestCommunicator_impl(t *testing.T) { var _ packer.Communicator = new(Communicator) } +// TestUploadDownload verifies that basic upload / download functionality works func TestUploadDownload(t *testing.T) { ui := packer.TestUi(t) cache := &packer.FileCache{CacheDir: os.TempDir()} @@ -27,13 +27,9 @@ func TestUploadDownload(t *testing.T) { t.Fatalf("Unable to parse config: %s", err) } - // Make sure we only run this on linux hosts if os.Getenv("PACKER_ACC") == "" { t.Skip("This test is only run with PACKER_ACC=1") } - if runtime.GOOS != "linux" { - t.Skip("This test is only supported on linux") - } cmd := exec.Command("docker", "-v") cmd.Run() if !cmd.ProcessState.Success() { @@ -102,13 +98,26 @@ func TestUploadDownload(t *testing.T) { } } +// TestLargeDownload verifies that files are the apporpriate size after being +// downloaded. This is to identify and fix the race condition in #2793. You may +// need to use github.com/cbednarski/rerun to verify since this problem occurs +// only intermittently. +func TestLargeDownload(t *testing.T) { + // cupcake is 2097152 bytes + // weddingcake is 104857600 bytes + // we will want to verify the size of the file after we download it + + cupcake + weddingcake +} + const dockerBuilderConfig = ` { "builders": [ { "type": "docker", "image": "alpine", - "export_path": "alpine.tar", + "discard": true, "run_command": ["-d", "-i", "-t", "{{.Image}}", "/bin/sh"] } ], @@ -127,3 +136,38 @@ const dockerBuilderConfig = ` ] } ` + +const dockerLargeBuilderConfig = ` +{ + "builders": [ + { + "type": "docker", + "image": "alpine", + "discard": true + } + ], + "provisioners": [ + { + "type": "shell", + "inline": [ + "dd if=/dev/urandom of=/tmp/cupcake bs=1M count=2", + "dd if=/dev/urandom of=/tmp/weddingcake bs=1M count=100", + "sync", + "md5sum /tmp/cupcake /tmp/weddingcake" + ] + }, + { + "type": "file", + "source": "/tmp/cupcake", + "destination": "cupcake", + "direction": "download" + }, + { + "type": "file", + "source": "/tmp/weddingcake", + "destination": "weddingcake", + "direction": "download" + } + ] +} +` From cc153aa71f309c6682e38757ac2183480c1ce295 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 8 Oct 2015 17:15:27 -0700 Subject: [PATCH 835/956] Added a test to verify byte size for larger files downloaded from docker --- builder/docker/communicator_test.go | 118 +++++++++++++++++++++++++--- 1 file changed, 107 insertions(+), 11 deletions(-) diff --git a/builder/docker/communicator_test.go b/builder/docker/communicator_test.go index 48bc5035f..8448a6054 100644 --- a/builder/docker/communicator_test.go +++ b/builder/docker/communicator_test.go @@ -10,6 +10,7 @@ import ( "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/provisioner/file" + "github.com/mitchellh/packer/provisioner/shell" "github.com/mitchellh/packer/template" ) @@ -103,12 +104,107 @@ func TestUploadDownload(t *testing.T) { // need to use github.com/cbednarski/rerun to verify since this problem occurs // only intermittently. func TestLargeDownload(t *testing.T) { - // cupcake is 2097152 bytes - // weddingcake is 104857600 bytes - // we will want to verify the size of the file after we download it + ui := packer.TestUi(t) + cache := &packer.FileCache{CacheDir: os.TempDir()} + + tpl, err := template.Parse(strings.NewReader(dockerLargeBuilderConfig)) + if err != nil { + t.Fatalf("Unable to parse config: %s", err) + } + + if os.Getenv("PACKER_ACC") == "" { + t.Skip("This test is only run with PACKER_ACC=1") + } + cmd := exec.Command("docker", "-v") + cmd.Run() + if !cmd.ProcessState.Success() { + t.Error("docker command not found; please make sure docker is installed") + } + + // Setup the builder + builder := &Builder{} + warnings, err := builder.Prepare(tpl.Builders["docker"].Config) + if err != nil { + t.Fatalf("Error preparing configuration %s", err) + } + if len(warnings) > 0 { + t.Fatal("Encountered configuration warnings; aborting") + } + + // Setup the provisioners + shell := &shell.Provisioner{} + err = shell.Prepare(tpl.Provisioners[0].Config) + if err != nil { + t.Fatalf("Error preparing shell provisioner: %s", err) + } + downloadCupcake := &file.Provisioner{} + err = downloadCupcake.Prepare(tpl.Provisioners[1].Config) + if err != nil { + t.Fatalf("Error preparing downloadCupcake: %s", err) + } + downloadBigcake := &file.Provisioner{} + err = downloadBigcake.Prepare(tpl.Provisioners[2].Config) + if err != nil { + t.Fatalf("Error preparing downloadBigcake: %s", err) + } + + // Preemptive cleanup. + defer os.Remove("cupcake") + defer os.Remove("bigcake") + + // Add hooks so the provisioners run during the build + hooks := map[string][]packer.Hook{} + hooks[packer.HookProvision] = []packer.Hook{ + &packer.ProvisionHook{ + Provisioners: []packer.Provisioner{ + shell, + downloadCupcake, + downloadBigcake, + }, + }, + } + hook := &packer.DispatchHook{Mapping: hooks} + + // Run things + artifact, err := builder.Run(ui, hook, cache) + if err != nil { + t.Fatalf("Error running build %s", err) + } + // Preemptive cleanup + defer artifact.Destroy() + + // Verify that the things we downloaded are the right size. Complain loudly + // if they are not. + // + // cupcake should be 2097152 bytes + // bigcake should be 104857600 bytes + cupcake, err := os.Stat("cupcake") + if err != nil { + t.Fatalf("Unable to stat cupcake file") + } + cupcakeExpected := int64(2097152) + if cupcake.Size() != cupcakeExpected { + t.Errorf("Expected cupcake to be %s bytes; found %s", cupcakeExpected, cupcake.Size()) + } + + bigcake, err := os.Stat("bigcake") + if err != nil { + t.Fatalf("Unable to stat bigcake file") + } + bigcakeExpected := int64(104857600) + if bigcake.Size() != bigcakeExpected { + t.Errorf("Expected bigcake to be %s bytes; found %s", bigcakeExpected, bigcake.Size()) + } + + // TODO if we can, calculate a sha inside the container and compare to the + // one we get after we pull it down. We will probably have to parse the log + // or ui output to do this because we use /dev/urandom to create the file. + + // if sha256.Sum256(inputFile) != sha256.Sum256(outputFile) { + // t.Fatalf("Input and output files do not match\n"+ + // "Input:\n%s\nOutput:\n%s\n", inputFile, outputFile) + // } - cupcake - weddingcake } const dockerBuilderConfig = ` @@ -116,7 +212,7 @@ const dockerBuilderConfig = ` "builders": [ { "type": "docker", - "image": "alpine", + "image": "ubuntu", "discard": true, "run_command": ["-d", "-i", "-t", "{{.Image}}", "/bin/sh"] } @@ -142,7 +238,7 @@ const dockerLargeBuilderConfig = ` "builders": [ { "type": "docker", - "image": "alpine", + "image": "ubuntu", "discard": true } ], @@ -151,9 +247,9 @@ const dockerLargeBuilderConfig = ` "type": "shell", "inline": [ "dd if=/dev/urandom of=/tmp/cupcake bs=1M count=2", - "dd if=/dev/urandom of=/tmp/weddingcake bs=1M count=100", + "dd if=/dev/urandom of=/tmp/bigcake bs=1M count=100", "sync", - "md5sum /tmp/cupcake /tmp/weddingcake" + "md5sum /tmp/cupcake /tmp/bigcake" ] }, { @@ -164,8 +260,8 @@ const dockerLargeBuilderConfig = ` }, { "type": "file", - "source": "/tmp/weddingcake", - "destination": "weddingcake", + "source": "/tmp/bigcake", + "destination": "bigcake", "direction": "download" } ] From 5eddaa77bf253808d52d8cb11fcc130d09c45e6d Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 8 Oct 2015 17:43:54 -0700 Subject: [PATCH 836/956] Corrected sprintf formatting in error messages --- builder/docker/communicator_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/docker/communicator_test.go b/builder/docker/communicator_test.go index 8448a6054..be533b8e7 100644 --- a/builder/docker/communicator_test.go +++ b/builder/docker/communicator_test.go @@ -180,20 +180,20 @@ func TestLargeDownload(t *testing.T) { // bigcake should be 104857600 bytes cupcake, err := os.Stat("cupcake") if err != nil { - t.Fatalf("Unable to stat cupcake file") + t.Fatalf("Unable to stat cupcake file: %s", err) } cupcakeExpected := int64(2097152) if cupcake.Size() != cupcakeExpected { - t.Errorf("Expected cupcake to be %s bytes; found %s", cupcakeExpected, cupcake.Size()) + t.Errorf("Expected cupcake to be %d bytes; found %d", cupcakeExpected, cupcake.Size()) } bigcake, err := os.Stat("bigcake") if err != nil { - t.Fatalf("Unable to stat bigcake file") + t.Fatalf("Unable to stat bigcake file: %s", err) } bigcakeExpected := int64(104857600) if bigcake.Size() != bigcakeExpected { - t.Errorf("Expected bigcake to be %s bytes; found %s", bigcakeExpected, bigcake.Size()) + t.Errorf("Expected bigcake to be %d bytes; found %d", bigcakeExpected, bigcake.Size()) } // TODO if we can, calculate a sha inside the container and compare to the From d87b68efe8216faf18fb161f2c74938285a7b52b Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Sat, 10 Oct 2015 14:32:39 -0700 Subject: [PATCH 837/956] Syncronize cancellation in windows-restart tests Two windows-restart tests would timeout and fail due to the cancellation thread firing before the cancel object was created. This change syncronizes the start of the threads to prevent this from occurring. --- provisioner/windows-restart/provisioner_test.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/provisioner/windows-restart/provisioner_test.go b/provisioner/windows-restart/provisioner_test.go index 247452c22..1f3f70ba3 100644 --- a/provisioner/windows-restart/provisioner_test.go +++ b/provisioner/windows-restart/provisioner_test.go @@ -268,15 +268,18 @@ func TestProvision_waitForCommunicatorWithCancel(t *testing.T) { // Run 2 goroutines; // 1st to call waitForCommunicator (that will always fail) // 2nd to cancel the operation + waitStart := make(chan bool) waitDone := make(chan bool) go func() { + waitStart <- true err = waitForCommunicator(p) + waitDone <- true }() go func() { time.Sleep(10 * time.Millisecond) + <-waitStart p.Cancel() - waitDone <- true }() <-waitDone @@ -327,13 +330,15 @@ func TestProvision_Cancel(t *testing.T) { comm := new(packer.MockCommunicator) p.Prepare(config) + waitStart := make(chan bool) waitDone := make(chan bool) // Block until cancel comes through waitForCommunicator = func(p *Provisioner) error { + waitStart <- true for { select { - case <-waitDone: + case <-p.cancel: } } } @@ -346,6 +351,7 @@ func TestProvision_Cancel(t *testing.T) { }() go func() { + <-waitStart p.Cancel() }() <-waitDone From 2306f4a4e42f20e0cabc1c384d73dd1932d7ece7 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Sun, 11 Oct 2015 11:20:50 -0700 Subject: [PATCH 838/956] Fixes #2699: catch invalid communicator types --- helper/communicator/config.go | 4 ++++ helper/communicator/config_test.go | 17 +++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/helper/communicator/config.go b/helper/communicator/config.go index 0f19c4e68..53de3f81e 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -65,6 +65,10 @@ func (c *Config) Prepare(ctx *interpolate.Context) []error { if es := c.prepareWinRM(ctx); len(es) > 0 { errs = append(errs, es...) } + case "none": + break + default: + return []error{fmt.Errorf("Communicator type %s is invalid", c.Type)} } return errs diff --git a/helper/communicator/config_test.go b/helper/communicator/config_test.go index dc1bd965d..cbdaafa98 100644 --- a/helper/communicator/config_test.go +++ b/helper/communicator/config_test.go @@ -30,6 +30,23 @@ func TestConfig_none(t *testing.T) { } } +func TestConfig_badtype(t *testing.T) { + c := &Config{Type: "foo"} + if err := c.Prepare(testContext(t)); len(err) != 1 { + t.Fatalf("bad: %#v", err) + } +} + +func TestConfig_winrm(t *testing.T) { + c := &Config{ + Type: "winrm", + WinRMUser: "admin", + } + if err := c.Prepare(testContext(t)); len(err) > 0 { + t.Fatalf("bad: %#v", err) + } +} + func testContext(t *testing.T) *interpolate.Context { return nil } From 268ce81dd3874822c8afa221e8bd1f8da35fa534 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Sun, 11 Oct 2015 11:48:16 -0700 Subject: [PATCH 839/956] helper/communicator: allow docker custom communicator --- helper/communicator/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helper/communicator/config.go b/helper/communicator/config.go index 53de3f81e..572394fff 100644 --- a/helper/communicator/config.go +++ b/helper/communicator/config.go @@ -65,7 +65,7 @@ func (c *Config) Prepare(ctx *interpolate.Context) []error { if es := c.prepareWinRM(ctx); len(es) > 0 { errs = append(errs, es...) } - case "none": + case "docker", "none": break default: return []error{fmt.Errorf("Communicator type %s is invalid", c.Type)} From 45829c30e58b042e684b620b9f1d9b20f1e9411b Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Sun, 11 Oct 2015 12:18:23 -0700 Subject: [PATCH 840/956] builder/amazon/*: fix go vet --- builder/amazon/chroot/step_create_volume.go | 2 +- builder/amazon/common/block_device_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/builder/amazon/chroot/step_create_volume.go b/builder/amazon/chroot/step_create_volume.go index a79e22c47..b190fbed1 100644 --- a/builder/amazon/chroot/step_create_volume.go +++ b/builder/amazon/chroot/step_create_volume.go @@ -56,7 +56,7 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction { VolumeType: rootDevice.Ebs.VolumeType, Iops: rootDevice.Ebs.Iops, } - log.Printf("Create args: %s", createVolume) + log.Printf("Create args: %+v", createVolume) createVolumeResp, err := ec2conn.CreateVolume(createVolume) if err != nil { diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index 99514009b..069a529e1 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -91,12 +91,12 @@ func TestBlockDevice(t *testing.T) { expected := []*ec2.BlockDeviceMapping{tc.Result} got := blockDevices.BuildAMIDevices() if !reflect.DeepEqual(expected, got) { - t.Fatalf("Bad block device, \nexpected: %s\n\ngot: %s", + t.Fatalf("Bad block device, \nexpected: %#v\n\ngot: %#v", expected, got) } if !reflect.DeepEqual(expected, blockDevices.BuildLaunchDevices()) { - t.Fatalf("Bad block device, \nexpected: %s\n\ngot: %s", + t.Fatalf("Bad block device, \nexpected: %#v\n\ngot: %#v", expected, blockDevices.BuildLaunchDevices()) } From e5a713ff0175b7b04bb5e1c7434a9e365c876f4d Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Sun, 11 Oct 2015 12:35:13 -0700 Subject: [PATCH 841/956] Alternative fix for #2641: make random script name actually random --- builder/virtualbox/iso/builder.go | 5 ----- builder/virtualbox/ovf/builder.go | 5 ----- builder/vmware/iso/builder.go | 4 ---- main.go | 7 +++++++ main_test.go | 7 +++++++ packer/plugin/server.go | 7 +++++++ packer/plugin/server_test.go | 12 ++++++++++++ 7 files changed, 33 insertions(+), 14 deletions(-) create mode 100644 packer/plugin/server_test.go diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go index 0758e9bdd..cb7e1077b 100644 --- a/builder/virtualbox/iso/builder.go +++ b/builder/virtualbox/iso/builder.go @@ -4,9 +4,7 @@ import ( "errors" "fmt" "log" - "math/rand" "strings" - "time" "github.com/mitchellh/multistep" vboxcommon "github.com/mitchellh/packer/builder/virtualbox/common" @@ -211,9 +209,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { - // Seed the random number generator - rand.Seed(time.Now().UTC().UnixNano()) - // Create the driver that we'll use to communicate with VirtualBox driver, err := vboxcommon.NewDriver() if err != nil { diff --git a/builder/virtualbox/ovf/builder.go b/builder/virtualbox/ovf/builder.go index 8b9932d54..d7c93c9eb 100644 --- a/builder/virtualbox/ovf/builder.go +++ b/builder/virtualbox/ovf/builder.go @@ -4,8 +4,6 @@ import ( "errors" "fmt" "log" - "math/rand" - "time" "github.com/mitchellh/multistep" vboxcommon "github.com/mitchellh/packer/builder/virtualbox/common" @@ -35,9 +33,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { // Run executes a Packer build and returns a packer.Artifact representing // a VirtualBox appliance. func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { - // Seed the random number generator - rand.Seed(time.Now().UTC().UnixNano()) - // Create the driver that we'll use to communicate with VirtualBox driver, err := vboxcommon.NewDriver() if err != nil { diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index f2489c50f..d3148eefe 100755 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -5,7 +5,6 @@ import ( "fmt" "io/ioutil" "log" - "math/rand" "os" "strings" "time" @@ -246,9 +245,6 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe state.Put("hook", hook) state.Put("ui", ui) - // Seed the random number generator - rand.Seed(time.Now().UTC().UnixNano()) - steps := []multistep.Step{ &vmwcommon.StepPrepareTools{ RemoteType: b.config.RemoteType, diff --git a/main.go b/main.go index 4d23339d1..a0d3190d1 100644 --- a/main.go +++ b/main.go @@ -6,10 +6,12 @@ import ( "io" "io/ioutil" "log" + "math/rand" "os" "path/filepath" "runtime" "sync" + "time" "github.com/mitchellh/cli" "github.com/mitchellh/packer/command" @@ -292,3 +294,8 @@ func copyOutput(r io.Reader, doneCh chan<- struct{}) { wg.Wait() } + +func init() { + // Seed the random number generator + rand.Seed(time.Now().UTC().UnixNano()) +} diff --git a/main_test.go b/main_test.go index 298c69bb1..7a14bed19 100644 --- a/main_test.go +++ b/main_test.go @@ -1,6 +1,7 @@ package main import ( + "math/rand" "reflect" "testing" ) @@ -33,3 +34,9 @@ func TestExtractMachineReadable(t *testing.T) { t.Fatal("should be mr") } } + +func TestRandom(t *testing.T) { + if rand.Intn(9999999) == 8498210 { + t.Fatal("math.rand is not seeded properly") + } +} diff --git a/packer/plugin/server.go b/packer/plugin/server.go index 23f39c028..191b8ba31 100644 --- a/packer/plugin/server.go +++ b/packer/plugin/server.go @@ -13,12 +13,14 @@ import ( packrpc "github.com/mitchellh/packer/packer/rpc" "io/ioutil" "log" + "math/rand" "net" "os" "os/signal" "runtime" "strconv" "sync/atomic" + "time" ) // This is a count of the number of interrupts the process has received. @@ -138,3 +140,8 @@ func serverListener_unix() (net.Listener, error) { return net.Listen("unix", path) } + +func init() { + // Seed the random number generator + rand.Seed(time.Now().UTC().UnixNano()) +} diff --git a/packer/plugin/server_test.go b/packer/plugin/server_test.go new file mode 100644 index 000000000..027c8d897 --- /dev/null +++ b/packer/plugin/server_test.go @@ -0,0 +1,12 @@ +package plugin + +import ( + "math/rand" + "testing" +) + +func TestPluginServerRandom(t *testing.T) { + if rand.Intn(9999999) == 8498210 { + t.Fatal("math.rand is not seeded properly") + } +} From 1aad5cf6d5e10254e4659cd7836bc31fbe412b1e Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Mon, 12 Oct 2015 13:20:18 -0700 Subject: [PATCH 842/956] provisioner/windows-restart: remove race in test --- provisioner/windows-restart/provisioner_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/provisioner/windows-restart/provisioner_test.go b/provisioner/windows-restart/provisioner_test.go index 1f3f70ba3..9baf65845 100644 --- a/provisioner/windows-restart/provisioner_test.go +++ b/provisioner/windows-restart/provisioner_test.go @@ -193,12 +193,14 @@ func TestProvision_waitForRestartTimeout(t *testing.T) { p.Prepare(config) waitForCommunicatorOld := waitForCommunicator waitDone := make(chan bool) + waitContinue := make(chan bool) // Block until cancel comes through waitForCommunicator = func(p *Provisioner) error { for { select { case <-waitDone: + waitContinue <- true } } } @@ -207,7 +209,7 @@ func TestProvision_waitForRestartTimeout(t *testing.T) { err = p.Provision(ui, comm) waitDone <- true }() - <-waitDone + <-waitContinue if err == nil { t.Fatal("should not have error") From 74d604e5a889a74e09a4bca2c5e270c4c816f329 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 12 Oct 2015 13:50:24 -0700 Subject: [PATCH 843/956] Switch from hashicorp/go-msgpack to the original upstream ugorgi/go to fix a slew of race conditions --- packer/rpc/client.go | 5 +++-- packer/rpc/server.go | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/packer/rpc/client.go b/packer/rpc/client.go index 2f682f47a..85d1ff550 100644 --- a/packer/rpc/client.go +++ b/packer/rpc/client.go @@ -1,11 +1,12 @@ package rpc import ( - "github.com/hashicorp/go-msgpack/codec" - "github.com/mitchellh/packer/packer" "io" "log" "net/rpc" + + "github.com/mitchellh/packer/packer" + "github.com/ugorji/go/codec" ) // Client is the client end that communicates with a Packer RPC server. diff --git a/packer/rpc/server.go b/packer/rpc/server.go index ceb77a8d3..c82772049 100644 --- a/packer/rpc/server.go +++ b/packer/rpc/server.go @@ -6,8 +6,8 @@ import ( "net/rpc" "sync/atomic" - "github.com/hashicorp/go-msgpack/codec" "github.com/mitchellh/packer/packer" + "github.com/ugorji/go/codec" ) var endpointId uint64 From b51cd5406ac101b48f56ac0df4f0fedac4c65950 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 12 Oct 2015 18:12:22 -0700 Subject: [PATCH 844/956] Add explicit wait after Communicator.Download to make sure serveSingleCopy completes --- packer/rpc/communicator.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/packer/rpc/communicator.go b/packer/rpc/communicator.go index abf841af8..525418ee2 100644 --- a/packer/rpc/communicator.go +++ b/packer/rpc/communicator.go @@ -2,11 +2,12 @@ package rpc import ( "encoding/gob" - "github.com/mitchellh/packer/packer" "io" "log" "net/rpc" "os" + + "github.com/mitchellh/packer/packer" ) // An implementation of packer.Communicator where the communicator is actually @@ -137,7 +138,13 @@ func (c *communicator) UploadDir(dst string, src string, exclude []string) error func (c *communicator) Download(path string, w io.Writer) (err error) { // Serve a single connection and a single copy streamId := c.mux.NextId() - go serveSingleCopy("downloadWriter", c.mux, streamId, w, nil) + + waitServer := make(chan bool) + + go func() { + serveSingleCopy("downloadWriter", c.mux, streamId, w, nil) + waitServer <- true + }() args := CommunicatorDownloadArgs{ Path: path, @@ -145,6 +152,9 @@ func (c *communicator) Download(path string, w io.Writer) (err error) { } err = c.client.Call("Communicator.Download", &args, new(interface{})) + + <-waitServer + return } From 48565440272157c20612026d0f6815a5b27ec6fd Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 12 Oct 2015 18:42:17 -0700 Subject: [PATCH 845/956] Add a comment to indicate why we're waiting on the channel --- packer/rpc/communicator.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packer/rpc/communicator.go b/packer/rpc/communicator.go index 525418ee2..48c6ea09f 100644 --- a/packer/rpc/communicator.go +++ b/packer/rpc/communicator.go @@ -140,7 +140,6 @@ func (c *communicator) Download(path string, w io.Writer) (err error) { streamId := c.mux.NextId() waitServer := make(chan bool) - go func() { serveSingleCopy("downloadWriter", c.mux, streamId, w, nil) waitServer <- true @@ -151,8 +150,10 @@ func (c *communicator) Download(path string, w io.Writer) (err error) { WriterStreamId: streamId, } + // Start sending data to the RPC server err = c.client.Call("Communicator.Download", &args, new(interface{})) + // Wait for the RPC server to finish receiving the data before we return <-waitServer return From a4eba1102b6842402f0768ddbcb9d1f28eedabe1 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 12 Oct 2015 22:04:13 -0700 Subject: [PATCH 846/956] Switch bool chan to struct chan for less memory usage --- packer/rpc/communicator.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packer/rpc/communicator.go b/packer/rpc/communicator.go index 48c6ea09f..34bb86da2 100644 --- a/packer/rpc/communicator.go +++ b/packer/rpc/communicator.go @@ -139,10 +139,10 @@ func (c *communicator) Download(path string, w io.Writer) (err error) { // Serve a single connection and a single copy streamId := c.mux.NextId() - waitServer := make(chan bool) + waitServer := make(chan struct{}) go func() { serveSingleCopy("downloadWriter", c.mux, streamId, w, nil) - waitServer <- true + close(waitServer) }() args := CommunicatorDownloadArgs{ From 81f3296dd5f249aef1578f200a30d0c0f231cac9 Mon Sep 17 00:00:00 2001 From: Alvaro Miranda Date: Wed, 14 Oct 2015 17:54:01 +1300 Subject: [PATCH 847/956] remove backquotes from Makefile backquotes get evaluated in WARN message, replacing with simple quotes --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5737d0d60..401efde69 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ ci: deps test release: updatedeps test releasebin bin: deps - @echo "WARN: `make bin` is for debug / test builds only. Use `make release` for release builds." + @echo "WARN: 'make bin' is for debug / test builds only. Use 'make release' for release builds." @sh -c "$(CURDIR)/scripts/build.sh" releasebin: deps From af055ad3e02fe3b676d7ee65f76aca966e322b42 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Wed, 14 Oct 2015 08:25:13 -0700 Subject: [PATCH 848/956] Synchronize access to artifact map to remove race --- command/build.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/command/build.go b/command/build.go index 6d2fef67c..43345f6e1 100644 --- a/command/build.go +++ b/command/build.go @@ -128,7 +128,10 @@ func (c BuildCommand) Run(args []string) int { // Run all the builds in parallel and wait for them to complete var interruptWg, wg sync.WaitGroup interrupted := false - artifacts := make(map[string][]packer.Artifact) + var artifacts = struct { + sync.RWMutex + m map[string][]packer.Artifact + }{m: make(map[string][]packer.Artifact)} errors := make(map[string]error) for _, b := range builds { // Increment the waitgroup so we wait for this item to finish properly @@ -163,7 +166,9 @@ func (c BuildCommand) Run(args []string) int { errors[name] = err } else { ui.Say(fmt.Sprintf("Build '%s' finished.", name)) - artifacts[name] = runArtifacts + artifacts.Lock() + artifacts.m[name] = runArtifacts + artifacts.Unlock() } }(b) @@ -213,9 +218,9 @@ func (c BuildCommand) Run(args []string) int { } } - if len(artifacts) > 0 { + if len(artifacts.m) > 0 { c.Ui.Say("\n==> Builds finished. The artifacts of successful builds are:") - for name, buildArtifacts := range artifacts { + for name, buildArtifacts := range artifacts.m { // Create a UI for the machine readable stuff to be targetted ui := &packer.TargettedUi{ Target: name, From e27b2bcf23bd6e84ab84a688c08e317ad75052fa Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Wed, 14 Oct 2015 08:25:39 -0700 Subject: [PATCH 849/956] communicator/winrm: fix race in runCommand --- communicator/winrm/communicator.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go index 59034fcf0..8530ad82b 100644 --- a/communicator/winrm/communicator.go +++ b/communicator/winrm/communicator.go @@ -5,6 +5,7 @@ import ( "io" "log" "os" + "sync" "github.com/masterzen/winrm/winrm" "github.com/mitchellh/packer/packer" @@ -84,20 +85,28 @@ func (c *Communicator) Start(rc *packer.RemoteCmd) error { func runCommand(shell *winrm.Shell, cmd *winrm.Command, rc *packer.RemoteCmd) { defer shell.Close() + var wg sync.WaitGroup + + copyFunc := func(w io.Writer, r io.Reader) { + wg.Add(1) + defer wg.Done() + io.Copy(w, r) + } if rc.Stdout != nil && cmd.Stdout != nil { - go io.Copy(rc.Stdout, cmd.Stdout) + go copyFunc(rc.Stdout, cmd.Stdout) } else { log.Printf("[WARN] Failed to read stdout for command '%s'", rc.Command) } if rc.Stderr != nil && cmd.Stderr != nil { - go io.Copy(rc.Stderr, cmd.Stderr) + go copyFunc(rc.Stderr, cmd.Stderr) } else { log.Printf("[WARN] Failed to read stderr for command '%s'", rc.Command) } cmd.Wait() + wg.Wait() code := cmd.ExitCode() log.Printf("[INFO] command '%s' exited with code: %d", rc.Command, code) From cad9899cfb7c21bec3460d816c1648d4f2c75cbe Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Wed, 14 Oct 2015 09:08:39 -0700 Subject: [PATCH 850/956] communicator/winrm: call wg.Add() before running goroutine --- communicator/winrm/communicator.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go index 8530ad82b..54dcfb48d 100644 --- a/communicator/winrm/communicator.go +++ b/communicator/winrm/communicator.go @@ -88,18 +88,19 @@ func runCommand(shell *winrm.Shell, cmd *winrm.Command, rc *packer.RemoteCmd) { var wg sync.WaitGroup copyFunc := func(w io.Writer, r io.Reader) { - wg.Add(1) defer wg.Done() io.Copy(w, r) } if rc.Stdout != nil && cmd.Stdout != nil { + wg.Add(1) go copyFunc(rc.Stdout, cmd.Stdout) } else { log.Printf("[WARN] Failed to read stdout for command '%s'", rc.Command) } if rc.Stderr != nil && cmd.Stderr != nil { + wg.Add(1) go copyFunc(rc.Stderr, cmd.Stderr) } else { log.Printf("[WARN] Failed to read stderr for command '%s'", rc.Command) From 31f57218634559eb8127fdc7af623de0ce0655a2 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 14 Oct 2015 16:31:43 -0700 Subject: [PATCH 851/956] Ignore logs from packer tests --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 2965e7967..eef855351 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ test/.env website/.bundle website/vendor + +packer-test*.log \ No newline at end of file From b6bf2f899efd7856cea75d11303399fe1643c803 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 14 Oct 2015 16:32:21 -0700 Subject: [PATCH 852/956] Update makefile to tee test logs to a file so it's easier to review them after the run complete --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 401efde69..1e615127d 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ generate: deps go generate ./... test: deps - go test $(TEST) $(TESTARGS) -timeout=15s + go test $(TEST) $(TESTARGS) -timeout=15s | tee packer-test.log @go vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ go get golang.org/x/tools/cmd/vet; \ fi @@ -49,10 +49,10 @@ test: deps # testacc runs acceptance tests testacc: deps generate @echo "WARN: Acceptance tests will take a long time to run and may cost money. Ctrl-C if you want to cancel." - PACKER_ACC=1 go test -v $(TEST) $(TESTARGS) -timeout=45m + PACKER_ACC=1 go test -v $(TEST) $(TESTARGS) -timeout=45m | tee packer-test-acc.log testrace: deps - go test -race $(TEST) $(TESTARGS) -timeout=15s + go test -race $(TEST) $(TESTARGS) -timeout=15s | tee packer-test-race.log # `go get -u` causes git to revert packer to the master branch. This causes all # kinds of headaches. We record the git sha when make starts try to correct it From 141dcd5c03ce8805d94e2e588635880240abd078 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Thu, 15 Oct 2015 09:26:11 +0200 Subject: [PATCH 853/956] Fixes #2828: parallels prlctl_post don't interpolate values. --- builder/parallels/iso/builder.go | 1 + builder/parallels/pvm/config.go | 1 + 2 files changed, 2 insertions(+) diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index 2d1d96ba3..43caab4a9 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -65,6 +65,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { Exclude: []string{ "boot_command", "prlctl", + "prlctl_post", "parallels_tools_guest_path", }, }, diff --git a/builder/parallels/pvm/config.go b/builder/parallels/pvm/config.go index 89c3ec1f9..f3f27d314 100644 --- a/builder/parallels/pvm/config.go +++ b/builder/parallels/pvm/config.go @@ -41,6 +41,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { Exclude: []string{ "boot_command", "prlctl", + "prlctl_post", "parallels_tools_guest_path", }, }, From e863dbe10008f08f83b7b46a0eb2b8eb6ab5ba48 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Thu, 15 Oct 2015 14:31:13 -0400 Subject: [PATCH 854/956] Revert "Merge pull request #2807 from markpeek/markpeek-docker-tmpdir" This reverts commit 31d367881408587d662527fd72016ce70de35e28, reversing changes made to a3a7c974d0976d5a25d3f8966ddf313e3f7ecfbf. --- builder/docker/step_temp_dir.go | 15 +----------- builder/docker/step_temp_dir_test.go | 36 +--------------------------- 2 files changed, 2 insertions(+), 49 deletions(-) diff --git a/builder/docker/step_temp_dir.go b/builder/docker/step_temp_dir.go index 8d68477eb..c8b2fa7e6 100644 --- a/builder/docker/step_temp_dir.go +++ b/builder/docker/step_temp_dir.go @@ -6,7 +6,6 @@ import ( "github.com/mitchellh/packer/packer" "io/ioutil" "os" - "path/filepath" ) // StepTempDir creates a temporary directory that we use in order to @@ -19,19 +18,7 @@ func (s *StepTempDir) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say("Creating a temporary directory for sharing data...") - // Create the docker temp files in the current working directory - // to work around an issue when running with docker-machine - // using vm's needing access to shared folder content. This assumes - // the current working directory is mapped as a share folder. - // Allow TMPDIR to override this location. - path := "" - if tmpdir := os.Getenv("TMPDIR"); tmpdir == "" { - abspath, err := filepath.Abs(".") - if err == nil { - path = abspath - } - } - td, err := ioutil.TempDir(path, "packer-docker") + td, err := ioutil.TempDir("", "packer-docker") if err != nil { err := fmt.Errorf("Error making temp dir: %s", err) state.Put("error", err) diff --git a/builder/docker/step_temp_dir_test.go b/builder/docker/step_temp_dir_test.go index 38ff7c360..a7d495f65 100644 --- a/builder/docker/step_temp_dir_test.go +++ b/builder/docker/step_temp_dir_test.go @@ -3,8 +3,6 @@ package docker import ( "github.com/mitchellh/multistep" "os" - "path/filepath" - "runtime" "testing" ) @@ -12,7 +10,7 @@ func TestStepTempDir_impl(t *testing.T) { var _ multistep.Step = new(StepTempDir) } -func testStepTempDir_impl(t *testing.T) string { +func TestStepTempDir(t *testing.T) { state := testState(t) step := new(StepTempDir) defer step.Cleanup(state) @@ -43,36 +41,4 @@ func testStepTempDir_impl(t *testing.T) string { if _, err := os.Stat(dir); err == nil { t.Fatalf("dir should be gone") } - - return dir -} - -func TestStepTempDir(t *testing.T) { - testStepTempDir_impl(t) -} - -func TestStepTempDir_notmpdir(t *testing.T) { - tempenv := "TMPDIR" - if runtime.GOOS == "windows" { - tempenv = "TMP" - } - // Verify empty TMPDIR maps to current working directory - oldenv := os.Getenv(tempenv) - os.Setenv(tempenv, "") - defer os.Setenv(tempenv, oldenv) - - dir1 := testStepTempDir_impl(t) - - // Now set TMPDIR to current directory - abspath, err := filepath.Abs(".") - if err != nil { - t.Fatalf("could not get current working directory") - } - os.Setenv(tempenv, abspath) - - dir2 := testStepTempDir_impl(t) - - if filepath.Dir(dir1) != filepath.Dir(dir2) { - t.Fatalf("temp base directories do not match: %s %s", filepath.Dir(dir1), filepath.Dir(dir2)) - } } From 4015c1789b21c47aac6af4ee394fbc9cad690f88 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Fri, 16 Oct 2015 19:57:39 +0200 Subject: [PATCH 855/956] Fixes #2836 - Use "/Applications/Parallels Desktop.app" as fallback app path. --- builder/parallels/common/driver_9.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/builder/parallels/common/driver_9.go b/builder/parallels/common/driver_9.go index db229597b..aa11d1c0e 100644 --- a/builder/parallels/common/driver_9.go +++ b/builder/parallels/common/driver_9.go @@ -95,6 +95,12 @@ func getAppPath(bundleId string) (string, error) { pathOutput := strings.TrimSpace(stdout.String()) if pathOutput == "" { + if fi, err := os.Stat("/Applications/Parallels Desktop.app"); err == nil { + if fi.IsDir() { + return "/Applications/Parallels Desktop.app", nil + } + } + return "", fmt.Errorf( "Could not detect Parallels Desktop! Make sure it is properly installed.") } From 38c81cf3e3ad8830a7588e8318070ebccb2872ae Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Fri, 16 Oct 2015 17:32:36 -0700 Subject: [PATCH 856/956] Move ConfigFile() and ConfigDir() from package main to packer --- checkpoint.go | 3 ++- config.go | 15 +-------------- main.go | 2 +- packer/config_file.go | 14 ++++++++++++++ config_unix.go => packer/config_file_unix.go | 2 +- .../config_file_windows.go | 2 +- 6 files changed, 20 insertions(+), 18 deletions(-) create mode 100644 packer/config_file.go rename config_unix.go => packer/config_file_unix.go (98%) rename config_windows.go => packer/config_file_windows.go (98%) diff --git a/checkpoint.go b/checkpoint.go index f8db2be28..87ef594c2 100644 --- a/checkpoint.go +++ b/checkpoint.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/go-checkpoint" "github.com/mitchellh/packer/command" + "github.com/mitchellh/packer/packer" ) func init() { @@ -25,7 +26,7 @@ func runCheckpoint(c *config) { return } - configDir, err := ConfigDir() + configDir, err := packer.ConfigDir() if err != nil { log.Printf("[ERR] Checkpoint setup error: %s", err) checkpointResult <- nil diff --git a/config.go b/config.go index efb4e7d31..62b92d9f5 100644 --- a/config.go +++ b/config.go @@ -25,19 +25,6 @@ type config struct { Provisioners map[string]string } -// ConfigFile returns the default path to the configuration file. On -// Unix-like systems this is the ".packerconfig" file in the home directory. -// On Windows, this is the "packer.config" file in the application data -// directory. -func ConfigFile() (string, error) { - return configFile() -} - -// ConfigDir returns the configuration directory for Packer. -func ConfigDir() (string, error) { - return configDir() -} - // Decodes configuration in JSON format from the given io.Reader into // the config object pointed to. func decodeConfig(r io.Reader, c *config) error { @@ -64,7 +51,7 @@ func (c *config) Discover() error { } // Next, look in the plugins directory. - dir, err := ConfigDir() + dir, err := packer.ConfigDir() if err != nil { log.Printf("[ERR] Error loading config directory: %s", err) } else { diff --git a/main.go b/main.go index a0d3190d1..69bec4a8d 100644 --- a/main.go +++ b/main.go @@ -223,7 +223,7 @@ func loadConfig() (*config, error) { configFilePath := os.Getenv("PACKER_CONFIG") if configFilePath == "" { var err error - configFilePath, err = configFile() + configFilePath, err = packer.ConfigFile() if err != nil { log.Printf("Error detecting default config file path: %s", err) diff --git a/packer/config_file.go b/packer/config_file.go new file mode 100644 index 000000000..f5d36e9e4 --- /dev/null +++ b/packer/config_file.go @@ -0,0 +1,14 @@ +package packer + +// ConfigFile returns the default path to the configuration file. On +// Unix-like systems this is the ".packerconfig" file in the home directory. +// On Windows, this is the "packer.config" file in the application data +// directory. +func ConfigFile() (string, error) { + return configFile() +} + +// ConfigDir returns the configuration directory for Packer. +func ConfigDir() (string, error) { + return configDir() +} diff --git a/config_unix.go b/packer/config_file_unix.go similarity index 98% rename from config_unix.go rename to packer/config_file_unix.go index 2c8a7a304..82260c2a2 100644 --- a/config_unix.go +++ b/packer/config_file_unix.go @@ -1,6 +1,6 @@ // +build darwin freebsd linux netbsd openbsd -package main +package packer import ( "bytes" diff --git a/config_windows.go b/packer/config_file_windows.go similarity index 98% rename from config_windows.go rename to packer/config_file_windows.go index fa3ab94b7..d0bcc1c50 100644 --- a/config_windows.go +++ b/packer/config_file_windows.go @@ -1,6 +1,6 @@ // +build windows -package main +package packer import ( "path/filepath" From 914e78f60263936833c069ea0d67ed041ae9bdad Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Sun, 18 Oct 2015 11:00:05 -0700 Subject: [PATCH 857/956] builder/amazon/common: go fmt --- builder/amazon/common/block_device.go | 2 +- builder/amazon/common/block_device_test.go | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index 73d17c45f..0a255fe6c 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -30,7 +30,7 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { var blockDevices []*ec2.BlockDeviceMapping for _, blockDevice := range b { - mapping := &ec2.BlockDeviceMapping { + mapping := &ec2.BlockDeviceMapping{ DeviceName: aws.String(blockDevice.DeviceName), } diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index d0a9c0cb5..e9fdb6c0f 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -23,7 +23,7 @@ func TestBlockDevice(t *testing.T) { }, Result: &ec2.BlockDeviceMapping{ - DeviceName: aws.String("/dev/sdb"), + DeviceName: aws.String("/dev/sdb"), Ebs: &ec2.EbsBlockDevice{ SnapshotId: aws.String("snap-1234"), VolumeType: aws.String("standard"), @@ -39,7 +39,7 @@ func TestBlockDevice(t *testing.T) { }, Result: &ec2.BlockDeviceMapping{ - DeviceName: aws.String("/dev/sdb"), + DeviceName: aws.String("/dev/sdb"), Ebs: &ec2.EbsBlockDevice{ VolumeSize: aws.Int64(8), DeleteOnTermination: aws.Bool(false), @@ -56,7 +56,7 @@ func TestBlockDevice(t *testing.T) { }, Result: &ec2.BlockDeviceMapping{ - DeviceName: aws.String("/dev/sdb"), + DeviceName: aws.String("/dev/sdb"), Ebs: &ec2.EbsBlockDevice{ VolumeType: aws.String("io1"), VolumeSize: aws.Int64(8), @@ -75,7 +75,7 @@ func TestBlockDevice(t *testing.T) { }, Result: &ec2.BlockDeviceMapping{ - DeviceName: aws.String("/dev/sdb"), + DeviceName: aws.String("/dev/sdb"), Ebs: &ec2.EbsBlockDevice{ VolumeType: aws.String("gp2"), VolumeSize: aws.Int64(8), @@ -102,8 +102,8 @@ func TestBlockDevice(t *testing.T) { }, Result: &ec2.BlockDeviceMapping{ - DeviceName: aws.String("/dev/sdb"), - NoDevice: aws.String(""), + DeviceName: aws.String("/dev/sdb"), + NoDevice: aws.String(""), }, }, } From f6c7e3740f9229d14b19293293010984c28c7f3e Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Sun, 18 Oct 2015 11:05:21 -0700 Subject: [PATCH 858/956] Don't create EBS block device if VirtualName is specified --- builder/amazon/common/block_device.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index 0a255fe6c..985c582ea 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -36,8 +36,10 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping { if blockDevice.NoDevice { mapping.NoDevice = aws.String("") - } else if strings.HasPrefix(blockDevice.VirtualName, "ephemeral") { - mapping.VirtualName = aws.String(blockDevice.VirtualName) + } else if blockDevice.VirtualName != "" { + if strings.HasPrefix(blockDevice.VirtualName, "ephemeral") { + mapping.VirtualName = aws.String(blockDevice.VirtualName) + } } else { ebsBlockDevice := &ec2.EbsBlockDevice{ DeleteOnTermination: aws.Bool(blockDevice.DeleteOnTermination), From c48548b3bb4f72867235bcd1742844adac06d613 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Sun, 18 Oct 2015 11:13:09 -0700 Subject: [PATCH 859/956] go fmt --- builder/qemu/step_type_boot_command.go | 4 ++-- post-processor/atlas/post-processor.go | 18 +++++++++--------- provisioner/salt-masterless/provisioner.go | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/builder/qemu/step_type_boot_command.go b/builder/qemu/step_type_boot_command.go index 0daa0813b..cd1cd05ec 100644 --- a/builder/qemu/step_type_boot_command.go +++ b/builder/qemu/step_type_boot_command.go @@ -177,9 +177,9 @@ func vncSendString(c *vnc.ClientConn, original string) { } c.KeyEvent(keyCode, true) - time.Sleep(time.Second/10) + time.Sleep(time.Second / 10) c.KeyEvent(keyCode, false) - time.Sleep(time.Second/10) + time.Sleep(time.Second / 10) if keyShift { c.KeyEvent(KeyLeftShift, false) diff --git a/post-processor/atlas/post-processor.go b/post-processor/atlas/post-processor.go index 7b8a4d98b..be445b238 100644 --- a/post-processor/atlas/post-processor.go +++ b/post-processor/atlas/post-processor.go @@ -46,8 +46,8 @@ type Config struct { ctx interpolate.Context user, name string - buildId int - compileId int + buildId int + compileId int } type PostProcessor struct { @@ -159,13 +159,13 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac } opts := &atlas.UploadArtifactOpts{ - User: p.config.user, - Name: p.config.name, - Type: p.config.Type, - ID: artifact.Id(), - Metadata: p.metadata(artifact), - BuildID: p.config.buildId, - CompileID: p.config.compileId, + User: p.config.user, + Name: p.config.name, + Type: p.config.Type, + ID: artifact.Id(), + Metadata: p.metadata(artifact), + BuildID: p.config.buildId, + CompileID: p.config.compileId, } if fs := artifact.Files(); len(fs) > 0 { diff --git a/provisioner/salt-masterless/provisioner.go b/provisioner/salt-masterless/provisioner.go index 1ec740018..80c836241 100644 --- a/provisioner/salt-masterless/provisioner.go +++ b/provisioner/salt-masterless/provisioner.go @@ -194,7 +194,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } ui.Message("Running highstate") - cmd := &packer.RemoteCmd{Command: fmt.Sprintf(p.sudo("salt-call --local state.highstate --file-root=%s --pillar-root=%s -l info --retcode-passthrough"),p.config.RemoteStateTree, p.config.RemotePillarRoots)} + cmd := &packer.RemoteCmd{Command: fmt.Sprintf(p.sudo("salt-call --local state.highstate --file-root=%s --pillar-root=%s -l info --retcode-passthrough"), p.config.RemoteStateTree, p.config.RemotePillarRoots)} if err = cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 { if err == nil { err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) From e6df3e07a017499457e9360ff7185522b9355224 Mon Sep 17 00:00:00 2001 From: Ruslan Salikhov Date: Sun, 18 Oct 2015 23:36:57 +0500 Subject: [PATCH 860/956] Fixing DigitalOcean documentation urls Fixed inserting %5C into links to digitalocean resources. --- website/source/docs/builders/digitalocean.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/docs/builders/digitalocean.html.markdown b/website/source/docs/builders/digitalocean.html.markdown index b5657ce9d..e9d4456c8 100644 --- a/website/source/docs/builders/digitalocean.html.markdown +++ b/website/source/docs/builders/digitalocean.html.markdown @@ -40,17 +40,17 @@ builder. - `image` (string) - The name (or slug) of the base image to use. This is the image that will be used to launch a new droplet and provision it. See - https://developers.digitalocean.com/documentation/v2/\#list-all-images for + [https://developers.digitalocean.com/documentation/v2/\#list-all-images](https://developers.digitalocean.com/documentation/v2/#list-all-images) for details on how to get a list of the the accepted image names/slugs. - `region` (string) - The name (or slug) of the region to launch the droplet in. Consequently, this is the region where the snapshot will be available. See - https://developers.digitalocean.com/documentation/v2/\#list-all-regions for + [https://developers.digitalocean.com/documentation/v2/\#list-all-regions](https://developers.digitalocean.com/documentation/v2/#list-all-regions) for the accepted region names/slugs. - `size` (string) - The name (or slug) of the droplet size to use. See - https://developers.digitalocean.com/documentation/v2/\#list-all-sizes for + [https://developers.digitalocean.com/documentation/v2/\#list-all-sizes](https://developers.digitalocean.com/documentation/v2/#list-all-sizes) for the accepted size names/slugs. ### Optional: From fc39f07eccdfc5f144f196b6cf0319201beea77e Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Sun, 18 Oct 2015 11:37:14 -0700 Subject: [PATCH 861/956] Add EbsOptimized to RequestSpotInstances (#2806) --- builder/amazon/common/step_run_source_instance.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index a74d7c138..5333ee67a 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -147,7 +147,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi IamInstanceProfile: &ec2.IamInstanceProfileSpecification{Name: &s.IamInstanceProfile}, BlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(), Placement: &ec2.Placement{AvailabilityZone: &s.AvailabilityZone}, - EbsOptimized: &s.EbsOptimized, + EbsOptimized: &s.EbsOptimized, } if s.SubnetId != "" && s.AssociatePublicIpAddress { @@ -198,6 +198,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi AvailabilityZone: &availabilityZone, }, BlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(), + EbsOptimized: &s.EbsOptimized, }, }) if err != nil { From 25e2ff7b85a6d4a7cfc4aa0e24239f2eed703826 Mon Sep 17 00:00:00 2001 From: Denis Bardadym Date: Mon, 19 Oct 2015 13:41:30 +0300 Subject: [PATCH 862/956] Fix not allowed comma, add all allowed special characters --- builder/amazon/common/template_funcs.go | 4 +++- builder/amazon/common/template_funcs_test.go | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/builder/amazon/common/template_funcs.go b/builder/amazon/common/template_funcs.go index 30d49fdb4..7a0998b34 100644 --- a/builder/amazon/common/template_funcs.go +++ b/builder/amazon/common/template_funcs.go @@ -19,8 +19,10 @@ func isalphanumeric(b byte) bool { } // Clean up AMI name by replacing invalid characters with "-" +// For allowed characters see docs for Name parameter +// at http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateImage.html func templateCleanAMIName(s string) string { - allowed := []byte{'(', ')', ',', '/', '-', '_', ' '} + allowed := []byte{'(', ')', '[', ']', ' ', '.', '/', '-', '\'', '@', '_'} b := []byte(s) newb := make([]byte, len(b)) for i, c := range b { diff --git a/builder/amazon/common/template_funcs_test.go b/builder/amazon/common/template_funcs_test.go index e4126bf61..11ad70aac 100644 --- a/builder/amazon/common/template_funcs_test.go +++ b/builder/amazon/common/template_funcs_test.go @@ -5,8 +5,8 @@ import ( ) func TestAMITemplatePrepare_clean(t *testing.T) { - origName := "AMZamz09(),/-_:&^ $%" - expected := "AMZamz09(),/-_--- --" + origName := "AMZamz09()./-_:&^ $%[]#'@" + expected := "AMZamz09()./-_--- --[]-'@" name := templateCleanAMIName(origName) From bc0f438db0b77717994420076f8505d780dc3ca0 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Fri, 16 Oct 2015 17:36:29 -0700 Subject: [PATCH 863/956] Use alternate temp directories for docker The temporary directories will be created under the packer config directory. Setting PACKER_TMP_DIR will override this path. --- builder/docker/step_temp_dir.go | 11 +++++- builder/docker/step_temp_dir_test.go | 56 +++++++++++++++++++++++++++- packer/config_file.go | 26 +++++++++++++ 3 files changed, 89 insertions(+), 4 deletions(-) diff --git a/builder/docker/step_temp_dir.go b/builder/docker/step_temp_dir.go index c8b2fa7e6..58b264a4d 100644 --- a/builder/docker/step_temp_dir.go +++ b/builder/docker/step_temp_dir.go @@ -18,7 +18,14 @@ func (s *StepTempDir) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) ui.Say("Creating a temporary directory for sharing data...") - td, err := ioutil.TempDir("", "packer-docker") + + var err error + var tempdir string + + configTmpDir, err := packer.ConfigTmpDir() + if err == nil { + tempdir, err = ioutil.TempDir(configTmpDir, "packer-docker") + } if err != nil { err := fmt.Errorf("Error making temp dir: %s", err) state.Put("error", err) @@ -26,7 +33,7 @@ func (s *StepTempDir) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - s.tempDir = td + s.tempDir = tempdir state.Put("temp_dir", s.tempDir) return multistep.ActionContinue } diff --git a/builder/docker/step_temp_dir_test.go b/builder/docker/step_temp_dir_test.go index a7d495f65..5cf851f77 100644 --- a/builder/docker/step_temp_dir_test.go +++ b/builder/docker/step_temp_dir_test.go @@ -1,16 +1,19 @@ package docker import ( - "github.com/mitchellh/multistep" "os" + "path/filepath" "testing" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" ) func TestStepTempDir_impl(t *testing.T) { var _ multistep.Step = new(StepTempDir) } -func TestStepTempDir(t *testing.T) { +func testStepTempDir_impl(t *testing.T) string { state := testState(t) step := new(StepTempDir) defer step.Cleanup(state) @@ -41,4 +44,53 @@ func TestStepTempDir(t *testing.T) { if _, err := os.Stat(dir); err == nil { t.Fatalf("dir should be gone") } + + return dir +} + +func TestStepTempDir(t *testing.T) { + testStepTempDir_impl(t) +} + +func TestStepTempDir_notmpdir(t *testing.T) { + tempenv := "PACKER_TMP_DIR" + + oldenv := os.Getenv(tempenv) + defer os.Setenv(tempenv, oldenv) + os.Setenv(tempenv, "") + + dir1 := testStepTempDir_impl(t) + + cd, err := packer.ConfigDir() + if err != nil { + t.Fatalf("bad ConfigDir") + } + td := filepath.Join(cd, "tmp") + os.Setenv(tempenv, td) + + dir2 := testStepTempDir_impl(t) + + if filepath.Dir(dir1) != filepath.Dir(dir2) { + t.Fatalf("temp base directories do not match: %s %s", filepath.Dir(dir1), filepath.Dir(dir2)) + } +} + +func TestStepTempDir_packertmpdir(t *testing.T) { + tempenv := "PACKER_TMP_DIR" + + oldenv := os.Getenv(tempenv) + defer os.Setenv(tempenv, oldenv) + os.Setenv(tempenv, ".") + + dir1 := testStepTempDir_impl(t) + + abspath, err := filepath.Abs(".") + if err != nil { + t.Fatalf("bad absolute path") + } + dir2 := filepath.Join(abspath, "tmp") + + if filepath.Dir(dir1) != filepath.Dir(dir2) { + t.Fatalf("temp base directories do not match: %s %s", filepath.Dir(dir1), filepath.Dir(dir2)) + } } diff --git a/packer/config_file.go b/packer/config_file.go index f5d36e9e4..edd10edee 100644 --- a/packer/config_file.go +++ b/packer/config_file.go @@ -1,5 +1,10 @@ package packer +import ( + "os" + "path/filepath" +) + // ConfigFile returns the default path to the configuration file. On // Unix-like systems this is the ".packerconfig" file in the home directory. // On Windows, this is the "packer.config" file in the application data @@ -12,3 +17,24 @@ func ConfigFile() (string, error) { func ConfigDir() (string, error) { return configDir() } + +// ConfigTmpDir returns the configuration tmp directory for Packer +func ConfigTmpDir() (string, error) { + if tmpdir := os.Getenv("PACKER_TMP_DIR"); tmpdir != "" { + return filepath.Abs(tmpdir) + } + configdir, err := configDir() + if err != nil { + return "", err + } + td := filepath.Join(configdir, "tmp") + _, err = os.Stat(td) + if os.IsNotExist(err) { + if err = os.MkdirAll(td, 0755); err != nil { + return "", err + } + } else if err != nil { + return "", err + } + return td, nil +} From a563944b580ed61f61c4fc13d320a7c9457c8848 Mon Sep 17 00:00:00 2001 From: Brian Dwyer Date: Thu, 1 Oct 2015 11:53:27 -0400 Subject: [PATCH 864/956] Fix #2695: Prevent duplicate ISO download for multi-builder builds Add extension to VMware ISO builder to bring in sync with Virtualbox ISO builder --- builder/vmware/iso/builder.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index f1f7830f0..bac3dbdff 100755 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -257,6 +257,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Description: "ISO", ResultKey: "iso_path", Url: b.config.ISOUrls, + Extension: "iso", TargetPath: b.config.TargetPath, }, &vmwcommon.StepOutputDir{ From cdcffecc2d40bd5ce2f2e367017ca74de3a6bfd7 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Tue, 20 Oct 2015 16:27:47 -0700 Subject: [PATCH 865/956] Refactor builder ISO options The ISO builders (parallels, qemu, virtualbox, and vmware) had too much common code which needed to be maintained separately. This change moves that code to a common ISO configuration. --- builder/parallels/iso/builder.go | 63 ++-------- builder/parallels/iso/builder_test.go | 157 ----------------------- builder/qemu/builder.go | 67 ++-------- builder/qemu/builder_test.go | 145 +--------------------- builder/virtualbox/iso/builder.go | 63 ++-------- builder/virtualbox/iso/builder_test.go | 157 ----------------------- builder/vmware/iso/builder.go | 63 ++-------- builder/vmware/iso/builder_test.go | 156 ----------------------- common/iso_config.go | 73 +++++++++++ common/iso_config_test.go | 165 +++++++++++++++++++++++++ 10 files changed, 277 insertions(+), 832 deletions(-) create mode 100644 common/iso_config.go create mode 100644 common/iso_config_test.go diff --git a/builder/parallels/iso/builder.go b/builder/parallels/iso/builder.go index 5e8b178fc..4dda28cbe 100644 --- a/builder/parallels/iso/builder.go +++ b/builder/parallels/iso/builder.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "log" - "strings" "github.com/mitchellh/multistep" parallelscommon "github.com/mitchellh/packer/builder/parallels/common" @@ -24,6 +23,7 @@ type Builder struct { type Config struct { common.PackerConfig `mapstructure:",squash"` + common.ISOConfig `mapstructure:",squash"` parallelscommon.FloppyConfig `mapstructure:",squash"` parallelscommon.OutputConfig `mapstructure:",squash"` parallelscommon.PrlctlConfig `mapstructure:",squash"` @@ -42,14 +42,8 @@ type Config struct { HTTPDir string `mapstructure:"http_directory"` HTTPPortMin uint `mapstructure:"http_port_min"` HTTPPortMax uint `mapstructure:"http_port_max"` - ISOChecksum string `mapstructure:"iso_checksum"` - ISOChecksumType string `mapstructure:"iso_checksum_type"` - ISOUrls []string `mapstructure:"iso_urls"` SkipCompaction bool `mapstructure:"skip_compaction"` VMName string `mapstructure:"vm_name"` - TargetPath string `mapstructure:"iso_target_path"` - - RawSingleISOUrl string `mapstructure:"iso_url"` // Deprecated parameters GuestOSDistribution string `mapstructure:"guest_os_distribution"` @@ -77,6 +71,12 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { // Accumulate any errors and warnings var errs *packer.MultiError + warnings := make([]string, 0) + + isoWarnings, isoErrs := b.config.ISOConfig.Prepare(&b.config.ctx) + warnings = append(warnings, isoWarnings...) + errs = packer.MultiErrorAppend(errs, isoErrs...) + errs = packer.MultiErrorAppend(errs, b.config.FloppyConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend( errs, b.config.OutputConfig.Prepare(&b.config.ctx, &b.config.PackerConfig)...) @@ -87,7 +87,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { errs = packer.MultiErrorAppend(errs, b.config.ShutdownConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.SSHConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.ToolsConfig.Prepare(&b.config.ctx)...) - warnings := make([]string, 0) if b.config.DiskSize == 0 { b.config.DiskSize = 40000 @@ -138,52 +137,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { errs, errors.New("http_port_min must be less than http_port_max")) } - if b.config.ISOChecksumType == "" { - errs = packer.MultiErrorAppend( - errs, errors.New("The iso_checksum_type must be specified.")) - } else { - b.config.ISOChecksumType = strings.ToLower(b.config.ISOChecksumType) - if b.config.ISOChecksumType != "none" { - if b.config.ISOChecksum == "" { - errs = packer.MultiErrorAppend( - errs, errors.New("Due to large file sizes, an iso_checksum is required")) - } else { - b.config.ISOChecksum = strings.ToLower(b.config.ISOChecksum) - } - - if h := common.HashForType(b.config.ISOChecksumType); h == nil { - errs = packer.MultiErrorAppend( - errs, - fmt.Errorf("Unsupported checksum type: %s", b.config.ISOChecksumType)) - } - } - } - - if b.config.RawSingleISOUrl == "" && len(b.config.ISOUrls) == 0 { - errs = packer.MultiErrorAppend( - errs, errors.New("One of iso_url or iso_urls must be specified.")) - } else if b.config.RawSingleISOUrl != "" && len(b.config.ISOUrls) > 0 { - errs = packer.MultiErrorAppend( - errs, errors.New("Only one of iso_url or iso_urls may be specified.")) - } else if b.config.RawSingleISOUrl != "" { - b.config.ISOUrls = []string{b.config.RawSingleISOUrl} - } - - for i, url := range b.config.ISOUrls { - b.config.ISOUrls[i], err = common.DownloadableURL(url) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Failed to parse iso_url %d: %s", i+1, err)) - } - } - // Warnings - if b.config.ISOChecksumType == "none" { - warnings = append(warnings, - "A checksum type of 'none' was specified. Since ISO files are so big,\n"+ - "a checksum is highly recommended.") - } - if b.config.ShutdownCommand == "" { warnings = append(warnings, "A shutdown_command was not specified. Without a shutdown command, Packer\n"+ @@ -219,9 +173,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Checksum: b.config.ISOChecksum, ChecksumType: b.config.ISOChecksumType, Description: "ISO", + Extension: "iso", ResultKey: "iso_path", - Url: b.config.ISOUrls, TargetPath: b.config.TargetPath, + Url: b.config.ISOUrls, }, ¶llelscommon.StepOutputDir{ Force: b.config.PackerForce, diff --git a/builder/parallels/iso/builder_test.go b/builder/parallels/iso/builder_test.go index b7d4cac50..ff0af5582 100644 --- a/builder/parallels/iso/builder_test.go +++ b/builder/parallels/iso/builder_test.go @@ -2,7 +2,6 @@ package iso import ( "github.com/mitchellh/packer/packer" - "reflect" "testing" ) @@ -193,162 +192,6 @@ func TestBuilderPrepare_InvalidKey(t *testing.T) { } } -func TestBuilderPrepare_ISOChecksum(t *testing.T) { - var b Builder - config := testConfig() - - // Test bad - config["iso_checksum"] = "" - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test good - config["iso_checksum"] = "FOo" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.ISOChecksum != "foo" { - t.Fatalf("should've lowercased: %s", b.config.ISOChecksum) - } -} - -func TestBuilderPrepare_ISOChecksumType(t *testing.T) { - var b Builder - config := testConfig() - - // Test bad - config["iso_checksum_type"] = "" - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test good - config["iso_checksum_type"] = "mD5" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.ISOChecksumType != "md5" { - t.Fatalf("should've lowercased: %s", b.config.ISOChecksumType) - } - - // Test unknown - config["iso_checksum_type"] = "fake" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test none - config["iso_checksum_type"] = "none" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) == 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.ISOChecksumType != "none" { - t.Fatalf("should've lowercased: %s", b.config.ISOChecksumType) - } -} - -func TestBuilderPrepare_ISOUrl(t *testing.T) { - var b Builder - config := testConfig() - delete(config, "iso_url") - delete(config, "iso_urls") - - // Test both epty - config["iso_url"] = "" - b = Builder{} - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test iso_url set - config["iso_url"] = "http://www.packer.io" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Errorf("should not have error: %s", err) - } - - expected := []string{"http://www.packer.io"} - if !reflect.DeepEqual(b.config.ISOUrls, expected) { - t.Fatalf("bad: %#v", b.config.ISOUrls) - } - - // Test both set - config["iso_url"] = "http://www.packer.io" - config["iso_urls"] = []string{"http://www.packer.io"} - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test just iso_urls set - delete(config, "iso_url") - config["iso_urls"] = []string{ - "http://www.packer.io", - "http://www.hashicorp.com", - } - - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Errorf("should not have error: %s", err) - } - - expected = []string{ - "http://www.packer.io", - "http://www.hashicorp.com", - } - if !reflect.DeepEqual(b.config.ISOUrls, expected) { - t.Fatalf("bad: %#v", b.config.ISOUrls) - } -} - func TestBuilderPrepare_ParallelsToolsHostPath(t *testing.T) { var b Builder config := testConfig() diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index 585250534..568e8c80d 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -8,7 +8,6 @@ import ( "os/exec" "path/filepath" "runtime" - "strings" "time" "github.com/mitchellh/multistep" @@ -22,10 +21,10 @@ import ( const BuilderId = "transcend.qemu" var accels = map[string]struct{}{ - "none": struct{}{}, - "kvm": struct{}{}, - "tcg": struct{}{}, - "xen": struct{}{}, + "none": {}, + "kvm": {}, + "tcg": {}, + "xen": {}, } var netDevice = map[string]bool{ @@ -80,6 +79,7 @@ type Builder struct { type Config struct { common.PackerConfig `mapstructure:",squash"` + common.ISOConfig `mapstructure:",squash"` Comm communicator.Config `mapstructure:",squash"` Accelerator string `mapstructure:"accelerator"` @@ -95,9 +95,6 @@ type Config struct { HTTPDir string `mapstructure:"http_directory"` HTTPPortMin uint `mapstructure:"http_port_min"` HTTPPortMax uint `mapstructure:"http_port_max"` - ISOChecksum string `mapstructure:"iso_checksum"` - ISOChecksumType string `mapstructure:"iso_checksum_type"` - ISOUrls []string `mapstructure:"iso_urls"` MachineType string `mapstructure:"machine_type"` NetDevice string `mapstructure:"net_device"` OutputDir string `mapstructure:"output_directory"` @@ -106,7 +103,6 @@ type Config struct { ShutdownCommand string `mapstructure:"shutdown_command"` SSHHostPortMin uint `mapstructure:"ssh_host_port_min"` SSHHostPortMax uint `mapstructure:"ssh_host_port_max"` - TargetPath string `mapstructure:"iso_target_path"` VNCPortMin uint `mapstructure:"vnc_port_min"` VNCPortMax uint `mapstructure:"vnc_port_max"` VMName string `mapstructure:"vm_name"` @@ -120,7 +116,6 @@ type Config struct { RunOnce bool `mapstructure:"run_once"` RawBootWait string `mapstructure:"boot_wait"` - RawSingleISOUrl string `mapstructure:"iso_url"` RawShutdownTimeout string `mapstructure:"shutdown_timeout"` bootWait time.Duration `` @@ -234,6 +229,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { var errs *packer.MultiError warnings := make([]string, 0) + isoWarnings, isoErrs := b.config.ISOConfig.Prepare(&b.config.ctx) + warnings = append(warnings, isoWarnings...) + errs = packer.MultiErrorAppend(errs, isoErrs...) + if es := b.config.Comm.Prepare(&b.config.ctx); len(es) > 0 { errs = packer.MultiErrorAppend(errs, es...) } @@ -273,45 +272,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { errs, errors.New("http_port_min must be less than http_port_max")) } - if b.config.ISOChecksumType == "" { - errs = packer.MultiErrorAppend( - errs, errors.New("The iso_checksum_type must be specified.")) - } else { - b.config.ISOChecksumType = strings.ToLower(b.config.ISOChecksumType) - if b.config.ISOChecksumType != "none" { - if b.config.ISOChecksum == "" { - errs = packer.MultiErrorAppend( - errs, errors.New("Due to large file sizes, an iso_checksum is required")) - } else { - b.config.ISOChecksum = strings.ToLower(b.config.ISOChecksum) - } - - if h := common.HashForType(b.config.ISOChecksumType); h == nil { - errs = packer.MultiErrorAppend( - errs, - fmt.Errorf("Unsupported checksum type: %s", b.config.ISOChecksumType)) - } - } - } - - if b.config.RawSingleISOUrl == "" && len(b.config.ISOUrls) == 0 { - errs = packer.MultiErrorAppend( - errs, errors.New("One of iso_url or iso_urls must be specified.")) - } else if b.config.RawSingleISOUrl != "" && len(b.config.ISOUrls) > 0 { - errs = packer.MultiErrorAppend( - errs, errors.New("Only one of iso_url or iso_urls may be specified.")) - } else if b.config.RawSingleISOUrl != "" { - b.config.ISOUrls = []string{b.config.RawSingleISOUrl} - } - - for i, url := range b.config.ISOUrls { - b.config.ISOUrls[i], err = common.DownloadableURL(url) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Failed to parse iso_url %d: %s", i+1, err)) - } - } - if !b.config.PackerForce { if _, err := os.Stat(b.config.OutputDir); err == nil { errs = packer.MultiErrorAppend( @@ -350,12 +310,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.QemuArgs = make([][]string, 0) } - if b.config.ISOChecksumType == "none" { - warnings = append(warnings, - "A checksum type of 'none' was specified. Since ISO files are so big,\n"+ - "a checksum is highly recommended.") - } - if errs != nil && len(errs.Errors) > 0 { return warnings, errs } @@ -384,9 +338,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Checksum: b.config.ISOChecksum, ChecksumType: b.config.ISOChecksumType, Description: "ISO", + Extension: "iso", ResultKey: "iso_path", - Url: b.config.ISOUrls, TargetPath: b.config.TargetPath, + Url: b.config.ISOUrls, }, new(stepPrepareOutputDir), &common.StepCreateFloppy{ diff --git a/builder/qemu/builder_test.go b/builder/qemu/builder_test.go index 84d1d40c3..69442c01f 100644 --- a/builder/qemu/builder_test.go +++ b/builder/qemu/builder_test.go @@ -255,147 +255,6 @@ func TestBuilderPrepare_InvalidKey(t *testing.T) { } } -func TestBuilderPrepare_ISOChecksum(t *testing.T) { - var b Builder - config := testConfig() - - // Test bad - config["iso_checksum"] = "" - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test good - config["iso_checksum"] = "FOo" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.ISOChecksum != "foo" { - t.Fatalf("should've lowercased: %s", b.config.ISOChecksum) - } -} - -func TestBuilderPrepare_ISOChecksumType(t *testing.T) { - var b Builder - config := testConfig() - - // Test bad - config["iso_checksum_type"] = "" - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test good - config["iso_checksum_type"] = "mD5" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.ISOChecksumType != "md5" { - t.Fatalf("should've lowercased: %s", b.config.ISOChecksumType) - } - - // Test unknown - config["iso_checksum_type"] = "fake" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } -} - -func TestBuilderPrepare_ISOUrl(t *testing.T) { - var b Builder - config := testConfig() - delete(config, "iso_url") - delete(config, "iso_urls") - - // Test both epty - config["iso_url"] = "" - b = Builder{} - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test iso_url set - config["iso_url"] = "http://www.packer.io" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Errorf("should not have error: %s", err) - } - - expected := []string{"http://www.packer.io"} - if !reflect.DeepEqual(b.config.ISOUrls, expected) { - t.Fatalf("bad: %#v", b.config.ISOUrls) - } - - // Test both set - config["iso_url"] = "http://www.packer.io" - config["iso_urls"] = []string{"http://www.packer.io"} - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test just iso_urls set - delete(config, "iso_url") - config["iso_urls"] = []string{ - "http://www.packer.io", - "http://www.hashicorp.com", - } - - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Errorf("should not have error: %s", err) - } - - expected = []string{ - "http://www.packer.io", - "http://www.hashicorp.com", - } - if !reflect.DeepEqual(b.config.ISOUrls, expected) { - t.Fatalf("bad: %#v", b.config.ISOUrls) - } -} - func TestBuilderPrepare_OutputDir(t *testing.T) { var b Builder config := testConfig() @@ -638,7 +497,7 @@ func TestBuilderPrepare_QemuArgs(t *testing.T) { // Test with a good one config["qemuargs"] = [][]interface{}{ - []interface{}{"foo", "bar", "baz"}, + {"foo", "bar", "baz"}, } b = Builder{} @@ -651,7 +510,7 @@ func TestBuilderPrepare_QemuArgs(t *testing.T) { } expected := [][]string{ - []string{"foo", "bar", "baz"}, + {"foo", "bar", "baz"}, } if !reflect.DeepEqual(b.config.QemuArgs, expected) { diff --git a/builder/virtualbox/iso/builder.go b/builder/virtualbox/iso/builder.go index d70ee2a73..587d2f28e 100644 --- a/builder/virtualbox/iso/builder.go +++ b/builder/virtualbox/iso/builder.go @@ -24,6 +24,7 @@ type Builder struct { type Config struct { common.PackerConfig `mapstructure:",squash"` + common.ISOConfig `mapstructure:",squash"` vboxcommon.ExportConfig `mapstructure:",squash"` vboxcommon.ExportOpts `mapstructure:",squash"` vboxcommon.FloppyConfig `mapstructure:",squash"` @@ -43,15 +44,9 @@ type Config struct { GuestAdditionsSHA256 string `mapstructure:"guest_additions_sha256"` GuestOSType string `mapstructure:"guest_os_type"` HardDriveInterface string `mapstructure:"hard_drive_interface"` - ISOChecksum string `mapstructure:"iso_checksum"` - ISOChecksumType string `mapstructure:"iso_checksum_type"` ISOInterface string `mapstructure:"iso_interface"` - ISOUrls []string `mapstructure:"iso_urls"` - TargetPath string `mapstructure:"iso_target_path"` VMName string `mapstructure:"vm_name"` - RawSingleISOUrl string `mapstructure:"iso_url"` - ctx interpolate.Context } @@ -75,6 +70,12 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { // Accumulate any errors and warnings var errs *packer.MultiError + warnings := make([]string, 0) + + isoWarnings, isoErrs := b.config.ISOConfig.Prepare(&b.config.ctx) + warnings = append(warnings, isoWarnings...) + errs = packer.MultiErrorAppend(errs, isoErrs...) + errs = packer.MultiErrorAppend(errs, b.config.ExportConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.ExportOpts.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.FloppyConfig.Prepare(&b.config.ctx)...) @@ -86,7 +87,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { errs = packer.MultiErrorAppend(errs, b.config.VBoxManageConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.VBoxManagePostConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.VBoxVersionConfig.Prepare(&b.config.ctx)...) - warnings := make([]string, 0) if b.config.DiskSize == 0 { b.config.DiskSize = 40000 @@ -122,50 +122,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { errs, errors.New("hard_drive_interface can only be ide, sata, or scsi")) } - if b.config.ISOChecksumType == "" { - errs = packer.MultiErrorAppend( - errs, errors.New("The iso_checksum_type must be specified.")) - } else { - b.config.ISOChecksumType = strings.ToLower(b.config.ISOChecksumType) - if b.config.ISOChecksumType != "none" { - if b.config.ISOChecksum == "" { - errs = packer.MultiErrorAppend( - errs, errors.New("Due to large file sizes, an iso_checksum is required")) - } else { - b.config.ISOChecksum = strings.ToLower(b.config.ISOChecksum) - } - - if h := common.HashForType(b.config.ISOChecksumType); h == nil { - errs = packer.MultiErrorAppend( - errs, - fmt.Errorf("Unsupported checksum type: %s", b.config.ISOChecksumType)) - } - } - } - if b.config.ISOInterface != "ide" && b.config.ISOInterface != "sata" { errs = packer.MultiErrorAppend( errs, errors.New("iso_interface can only be ide or sata")) } - if b.config.RawSingleISOUrl == "" && len(b.config.ISOUrls) == 0 { - errs = packer.MultiErrorAppend( - errs, errors.New("One of iso_url or iso_urls must be specified.")) - } else if b.config.RawSingleISOUrl != "" && len(b.config.ISOUrls) > 0 { - errs = packer.MultiErrorAppend( - errs, errors.New("Only one of iso_url or iso_urls may be specified.")) - } else if b.config.RawSingleISOUrl != "" { - b.config.ISOUrls = []string{b.config.RawSingleISOUrl} - } - - for i, url := range b.config.ISOUrls { - b.config.ISOUrls[i], err = common.DownloadableURL(url) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Failed to parse iso_url %d: %s", i+1, err)) - } - } - validMode := false validModes := []string{ vboxcommon.GuestAdditionsModeDisable, @@ -190,12 +151,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } // Warnings - if b.config.ISOChecksumType == "none" { - warnings = append(warnings, - "A checksum type of 'none' was specified. Since ISO files are so big,\n"+ - "a checksum is highly recommended.") - } - if b.config.ShutdownCommand == "" { warnings = append(warnings, "A shutdown_command was not specified. Without a shutdown command, Packer\n"+ @@ -227,10 +182,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Checksum: b.config.ISOChecksum, ChecksumType: b.config.ISOChecksumType, Description: "ISO", - ResultKey: "iso_path", - Url: b.config.ISOUrls, Extension: "iso", + ResultKey: "iso_path", TargetPath: b.config.TargetPath, + Url: b.config.ISOUrls, }, &vboxcommon.StepOutputDir{ Force: b.config.PackerForce, diff --git a/builder/virtualbox/iso/builder_test.go b/builder/virtualbox/iso/builder_test.go index 714587f24..eb2d2fb75 100644 --- a/builder/virtualbox/iso/builder_test.go +++ b/builder/virtualbox/iso/builder_test.go @@ -3,7 +3,6 @@ package iso import ( "github.com/mitchellh/packer/builder/virtualbox/common" "github.com/mitchellh/packer/packer" - "reflect" "testing" ) @@ -315,92 +314,6 @@ func TestBuilderPrepare_InvalidKey(t *testing.T) { } } -func TestBuilderPrepare_ISOChecksum(t *testing.T) { - var b Builder - config := testConfig() - - // Test bad - config["iso_checksum"] = "" - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test good - config["iso_checksum"] = "FOo" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.ISOChecksum != "foo" { - t.Fatalf("should've lowercased: %s", b.config.ISOChecksum) - } -} - -func TestBuilderPrepare_ISOChecksumType(t *testing.T) { - var b Builder - config := testConfig() - - // Test bad - config["iso_checksum_type"] = "" - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test good - config["iso_checksum_type"] = "mD5" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.ISOChecksumType != "md5" { - t.Fatalf("should've lowercased: %s", b.config.ISOChecksumType) - } - - // Test unknown - config["iso_checksum_type"] = "fake" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test none - config["iso_checksum_type"] = "none" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) == 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.ISOChecksumType != "none" { - t.Fatalf("should've lowercased: %s", b.config.ISOChecksumType) - } -} - func TestBuilderPrepare_ISOInterface(t *testing.T) { var b Builder config := testConfig() @@ -441,73 +354,3 @@ func TestBuilderPrepare_ISOInterface(t *testing.T) { t.Fatalf("should not have error: %s", err) } } - -func TestBuilderPrepare_ISOUrl(t *testing.T) { - var b Builder - config := testConfig() - delete(config, "iso_url") - delete(config, "iso_urls") - - // Test both epty - config["iso_url"] = "" - b = Builder{} - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test iso_url set - config["iso_url"] = "http://www.packer.io" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Errorf("should not have error: %s", err) - } - - expected := []string{"http://www.packer.io"} - if !reflect.DeepEqual(b.config.ISOUrls, expected) { - t.Fatalf("bad: %#v", b.config.ISOUrls) - } - - // Test both set - config["iso_url"] = "http://www.packer.io" - config["iso_urls"] = []string{"http://www.packer.io"} - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test just iso_urls set - delete(config, "iso_url") - config["iso_urls"] = []string{ - "http://www.packer.io", - "http://www.hashicorp.com", - } - - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Errorf("should not have error: %s", err) - } - - expected = []string{ - "http://www.packer.io", - "http://www.hashicorp.com", - } - if !reflect.DeepEqual(b.config.ISOUrls, expected) { - t.Fatalf("bad: %#v", b.config.ISOUrls) - } -} diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index bac3dbdff..c2c8bc470 100755 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -6,7 +6,6 @@ import ( "io/ioutil" "log" "os" - "strings" "time" "github.com/mitchellh/multistep" @@ -27,6 +26,7 @@ type Builder struct { type Config struct { common.PackerConfig `mapstructure:",squash"` + common.ISOConfig `mapstructure:",squash"` vmwcommon.DriverConfig `mapstructure:",squash"` vmwcommon.OutputConfig `mapstructure:",squash"` vmwcommon.RunConfig `mapstructure:",squash"` @@ -41,14 +41,10 @@ type Config struct { DiskTypeId string `mapstructure:"disk_type_id"` FloppyFiles []string `mapstructure:"floppy_files"` GuestOSType string `mapstructure:"guest_os_type"` - ISOChecksum string `mapstructure:"iso_checksum"` - ISOChecksumType string `mapstructure:"iso_checksum_type"` - ISOUrls []string `mapstructure:"iso_urls"` Version string `mapstructure:"version"` VMName string `mapstructure:"vm_name"` BootCommand []string `mapstructure:"boot_command"` SkipCompaction bool `mapstructure:"skip_compaction"` - TargetPath string `mapstructure:"iso_target_path"` VMXTemplatePath string `mapstructure:"vmx_template_path"` VMXDiskTemplatePath string `mapstructure:"vmx_disk_template_path"` @@ -61,8 +57,6 @@ type Config struct { RemoteUser string `mapstructure:"remote_username"` RemotePassword string `mapstructure:"remote_password"` - RawSingleISOUrl string `mapstructure:"iso_url"` - ctx interpolate.Context } @@ -83,6 +77,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { // Accumulate any errors and warnings var errs *packer.MultiError + warnings := make([]string, 0) + + isoWarnings, isoErrs := b.config.ISOConfig.Prepare(&b.config.ctx) + warnings = append(warnings, isoWarnings...) + errs = packer.MultiErrorAppend(errs, isoErrs...) errs = packer.MultiErrorAppend(errs, b.config.DriverConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.OutputConfig.Prepare(&b.config.ctx, &b.config.PackerConfig)...) @@ -91,7 +90,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { errs = packer.MultiErrorAppend(errs, b.config.SSHConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.ToolsConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.VMXConfig.Prepare(&b.config.ctx)...) - warnings := make([]string, 0) if b.config.DiskName == "" { b.config.DiskName = "disk" @@ -146,45 +144,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.RemotePort = 22 } - if b.config.ISOChecksumType == "" { - errs = packer.MultiErrorAppend( - errs, errors.New("The iso_checksum_type must be specified.")) - } else { - b.config.ISOChecksumType = strings.ToLower(b.config.ISOChecksumType) - if b.config.ISOChecksumType != "none" { - if b.config.ISOChecksum == "" { - errs = packer.MultiErrorAppend( - errs, errors.New("Due to large file sizes, an iso_checksum is required")) - } else { - b.config.ISOChecksum = strings.ToLower(b.config.ISOChecksum) - } - - if h := common.HashForType(b.config.ISOChecksumType); h == nil { - errs = packer.MultiErrorAppend( - errs, - fmt.Errorf("Unsupported checksum type: %s", b.config.ISOChecksumType)) - } - } - } - - if b.config.RawSingleISOUrl == "" && len(b.config.ISOUrls) == 0 { - errs = packer.MultiErrorAppend( - errs, errors.New("One of iso_url or iso_urls must be specified.")) - } else if b.config.RawSingleISOUrl != "" && len(b.config.ISOUrls) > 0 { - errs = packer.MultiErrorAppend( - errs, errors.New("Only one of iso_url or iso_urls may be specified.")) - } else if b.config.RawSingleISOUrl != "" { - b.config.ISOUrls = []string{b.config.RawSingleISOUrl} - } - - for i, url := range b.config.ISOUrls { - b.config.ISOUrls[i], err = common.DownloadableURL(url) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Failed to parse iso_url %d: %s", i+1, err)) - } - } - if b.config.VMXTemplatePath != "" { if err := b.validateVMXTemplatePath(); err != nil { errs = packer.MultiErrorAppend( @@ -202,12 +161,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } // Warnings - if b.config.ISOChecksumType == "none" { - warnings = append(warnings, - "A checksum type of 'none' was specified. Since ISO files are so big,\n"+ - "a checksum is highly recommended.") - } - if b.config.ShutdownCommand == "" { warnings = append(warnings, "A shutdown_command was not specified. Without a shutdown command, Packer\n"+ @@ -255,10 +208,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Checksum: b.config.ISOChecksum, ChecksumType: b.config.ISOChecksumType, Description: "ISO", - ResultKey: "iso_path", - Url: b.config.ISOUrls, Extension: "iso", + ResultKey: "iso_path", TargetPath: b.config.TargetPath, + Url: b.config.ISOUrls, }, &vmwcommon.StepOutputDir{ Force: b.config.PackerForce, diff --git a/builder/vmware/iso/builder_test.go b/builder/vmware/iso/builder_test.go index 13a9622f7..2be304a54 100644 --- a/builder/vmware/iso/builder_test.go +++ b/builder/vmware/iso/builder_test.go @@ -29,92 +29,6 @@ func TestBuilder_ImplementsBuilder(t *testing.T) { } } -func TestBuilderPrepare_ISOChecksum(t *testing.T) { - var b Builder - config := testConfig() - - // Test bad - config["iso_checksum"] = "" - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test good - config["iso_checksum"] = "FOo" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.ISOChecksum != "foo" { - t.Fatalf("should've lowercased: %s", b.config.ISOChecksum) - } -} - -func TestBuilderPrepare_ISOChecksumType(t *testing.T) { - var b Builder - config := testConfig() - - // Test bad - config["iso_checksum_type"] = "" - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test good - config["iso_checksum_type"] = "mD5" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.ISOChecksumType != "md5" { - t.Fatalf("should've lowercased: %s", b.config.ISOChecksumType) - } - - // Test unknown - config["iso_checksum_type"] = "fake" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test none - config["iso_checksum_type"] = "none" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) == 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Fatalf("should not have error: %s", err) - } - - if b.config.ISOChecksumType != "none" { - t.Fatalf("should've lowercased: %s", b.config.ISOChecksumType) - } -} - func TestBuilderPrepare_Defaults(t *testing.T) { var b Builder config := testConfig() @@ -262,76 +176,6 @@ func TestBuilderPrepare_InvalidKey(t *testing.T) { } } -func TestBuilderPrepare_ISOUrl(t *testing.T) { - var b Builder - config := testConfig() - delete(config, "iso_url") - delete(config, "iso_urls") - - // Test both epty - config["iso_url"] = "" - b = Builder{} - warns, err := b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test iso_url set - config["iso_url"] = "http://www.packer.io" - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Errorf("should not have error: %s", err) - } - - expected := []string{"http://www.packer.io"} - if !reflect.DeepEqual(b.config.ISOUrls, expected) { - t.Fatalf("bad: %#v", b.config.ISOUrls) - } - - // Test both set - config["iso_url"] = "http://www.packer.io" - config["iso_urls"] = []string{"http://www.packer.io"} - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err == nil { - t.Fatal("should have error") - } - - // Test just iso_urls set - delete(config, "iso_url") - config["iso_urls"] = []string{ - "http://www.packer.io", - "http://www.hashicorp.com", - } - - b = Builder{} - warns, err = b.Prepare(config) - if len(warns) > 0 { - t.Fatalf("bad: %#v", warns) - } - if err != nil { - t.Errorf("should not have error: %s", err) - } - - expected = []string{ - "http://www.packer.io", - "http://www.hashicorp.com", - } - if !reflect.DeepEqual(b.config.ISOUrls, expected) { - t.Fatalf("bad: %#v", b.config.ISOUrls) - } -} - func TestBuilderPrepare_OutputDir(t *testing.T) { var b Builder config := testConfig() diff --git a/common/iso_config.go b/common/iso_config.go new file mode 100644 index 000000000..6e3cd1f61 --- /dev/null +++ b/common/iso_config.go @@ -0,0 +1,73 @@ +package common + +import ( + "errors" + "fmt" + "strings" + + "github.com/mitchellh/packer/template/interpolate" +) + +// ISOConfig contains configuration for downloading ISO images. +type ISOConfig struct { + ISOChecksum string `mapstructure:"iso_checksum"` + ISOChecksumType string `mapstructure:"iso_checksum_type"` + ISOUrls []string `mapstructure:"iso_urls"` + TargetPath string `mapstructure:"iso_target_path"` + RawSingleISOUrl string `mapstructure:"iso_url"` +} + +func (c *ISOConfig) Prepare(ctx *interpolate.Context) ([]string, []error) { + // Validation + var errs []error + var err error + var warnings []string + + if c.ISOChecksumType == "" { + errs = append( + errs, errors.New("The iso_checksum_type must be specified.")) + } else { + c.ISOChecksumType = strings.ToLower(c.ISOChecksumType) + if c.ISOChecksumType != "none" { + if c.ISOChecksum == "" { + errs = append( + errs, errors.New("Due to large file sizes, an iso_checksum is required")) + } else { + c.ISOChecksum = strings.ToLower(c.ISOChecksum) + } + + if h := HashForType(c.ISOChecksumType); h == nil { + errs = append( + errs, + fmt.Errorf("Unsupported checksum type: %s", c.ISOChecksumType)) + } + } + } + + if c.RawSingleISOUrl == "" && len(c.ISOUrls) == 0 { + errs = append( + errs, errors.New("One of iso_url or iso_urls must be specified.")) + } else if c.RawSingleISOUrl != "" && len(c.ISOUrls) > 0 { + errs = append( + errs, errors.New("Only one of iso_url or iso_urls may be specified.")) + } else if c.RawSingleISOUrl != "" { + c.ISOUrls = []string{c.RawSingleISOUrl} + } + + for i, url := range c.ISOUrls { + c.ISOUrls[i], err = DownloadableURL(url) + if err != nil { + errs = append( + errs, fmt.Errorf("Failed to parse iso_url %d: %s", i+1, err)) + } + } + + // Warnings + if c.ISOChecksumType == "none" { + warnings = append(warnings, + "A checksum type of 'none' was specified. Since ISO files are so big,\n"+ + "a checksum is highly recommended.") + } + + return warnings, errs +} diff --git a/common/iso_config_test.go b/common/iso_config_test.go new file mode 100644 index 000000000..01e801ff4 --- /dev/null +++ b/common/iso_config_test.go @@ -0,0 +1,165 @@ +package common + +import ( + "reflect" + "testing" +) + +func testISOConfig() ISOConfig { + return ISOConfig{ + ISOChecksum: "foo", + ISOChecksumType: "md5", + RawSingleISOUrl: "http://www.packer.io", + } +} + +func TestISOConfigPrepare_ISOChecksum(t *testing.T) { + i := testISOConfig() + + // Test bad + i.ISOChecksum = "" + warns, err := i.Prepare(nil) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatal("should have error") + } + + // Test good + i = testISOConfig() + i.ISOChecksum = "FOo" + warns, err = i.Prepare(nil) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + if i.ISOChecksum != "foo" { + t.Fatalf("should've lowercased: %s", i.ISOChecksum) + } +} + +func TestISOConfigPrepare_ISOChecksumType(t *testing.T) { + i := testISOConfig() + + // Test bad + i.ISOChecksumType = "" + warns, err := i.Prepare(nil) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatal("should have error") + } + + // Test good + i = testISOConfig() + i.ISOChecksumType = "mD5" + warns, err = i.Prepare(nil) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + if i.ISOChecksumType != "md5" { + t.Fatalf("should've lowercased: %s", i.ISOChecksumType) + } + + // Test unknown + i = testISOConfig() + i.ISOChecksumType = "fake" + warns, err = i.Prepare(nil) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatal("should have error") + } + + // Test none + i = testISOConfig() + i.ISOChecksumType = "none" + warns, err = i.Prepare(nil) + if len(warns) == 0 { + t.Fatalf("bad: %#v", warns) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + + if i.ISOChecksumType != "none" { + t.Fatalf("should've lowercased: %s", i.ISOChecksumType) + } +} + +func TestISOConfigPrepare_ISOUrl(t *testing.T) { + i := testISOConfig() + + // Test both empty + i.RawSingleISOUrl = "" + i.ISOUrls = []string{} + warns, err := i.Prepare(nil) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatal("should have error") + } + + // Test iso_url set + i = testISOConfig() + i.RawSingleISOUrl = "http://www.packer.io" + warns, err = i.Prepare(nil) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err != nil { + t.Errorf("should not have error: %s", err) + } + + expected := []string{"http://www.packer.io"} + if !reflect.DeepEqual(i.ISOUrls, expected) { + t.Fatalf("bad: %#v", i.ISOUrls) + } + + // Test both set + i = testISOConfig() + i.RawSingleISOUrl = "http://www.packer.io" + i.ISOUrls = []string{"http://www.packer.io"} + warns, err = i.Prepare(nil) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatal("should have error") + } + + // Test just iso_urls set + i = testISOConfig() + i.RawSingleISOUrl = "" + i.ISOUrls = []string{ + "http://www.packer.io", + "http://www.hashicorp.com", + } + + warns, err = i.Prepare(nil) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err != nil { + t.Errorf("should not have error: %s", err) + } + + expected = []string{ + "http://www.packer.io", + "http://www.hashicorp.com", + } + if !reflect.DeepEqual(i.ISOUrls, expected) { + t.Fatalf("bad: %#v", i.ISOUrls) + } +} From 5ee224fe6ae086f30301244cd74d68387f1c5704 Mon Sep 17 00:00:00 2001 From: Jearvon Dharrie Date: Wed, 21 Oct 2015 13:41:49 -0400 Subject: [PATCH 866/956] Remove the word obviously --- website/source/docs/command-line/introduction.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/command-line/introduction.html.markdown b/website/source/docs/command-line/introduction.html.markdown index ea9834397..981c1b2f6 100644 --- a/website/source/docs/command-line/introduction.html.markdown +++ b/website/source/docs/command-line/introduction.html.markdown @@ -4,7 +4,7 @@ description: | is done via the `packer` tool. Like many other command-line tools, the `packer` tool takes a subcommand to execute, and that subcommand may have additional options as well. Subcommands are executed with `packer SUBCOMMAND`, where - "SUBCOMMAND" is obviously the actual command you wish to execute. + "SUBCOMMAND" is the actual command you wish to execute. layout: docs page_title: 'Packer Command-Line' ... @@ -15,7 +15,7 @@ Packer is controlled using a command-line interface. All interaction with Packer is done via the `packer` tool. Like many other command-line tools, the `packer` tool takes a subcommand to execute, and that subcommand may have additional options as well. Subcommands are executed with `packer SUBCOMMAND`, where -"SUBCOMMAND" is obviously the actual command you wish to execute. +"SUBCOMMAND" is the actual command you wish to execute. If you run `packer` by itself, help will be displayed showing all available subcommands and a brief synopsis of what they do. In addition to this, you can From d09a9ab0c7d94f4f938195367fe3dc59165707de Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 17 Aug 2015 01:26:03 -0700 Subject: [PATCH 867/956] Implemented internal plugins - Internal plugins are compiled into the same packer binary and invoked through the plugin command - Search paths allow disk-based plugins to override and should function as normal - This should allow for a 94% space savings vs statically compiling all the plugins as separate binaries.. approximately 24mb vs 431mb --- command/plugin.go | 147 ++++++++++++++++++++++++++++++++++++++++++++++ commands.go | 6 ++ config.go | 55 ++++++++++++++++- 3 files changed, 206 insertions(+), 2 deletions(-) create mode 100644 command/plugin.go diff --git a/command/plugin.go b/command/plugin.go new file mode 100644 index 000000000..76e97484e --- /dev/null +++ b/command/plugin.go @@ -0,0 +1,147 @@ +package command + +import ( + "fmt" + "log" + "os" + "strings" + + "github.com/mitchellh/packer/builder/amazon/chroot" + "github.com/mitchellh/packer/builder/amazon/ebs" + "github.com/mitchellh/packer/builder/amazon/instance" + "github.com/mitchellh/packer/builder/digitalocean" + "github.com/mitchellh/packer/builder/docker" + filebuilder "github.com/mitchellh/packer/builder/file" + "github.com/mitchellh/packer/builder/googlecompute" + "github.com/mitchellh/packer/builder/null" + "github.com/mitchellh/packer/builder/openstack" + parallelsiso "github.com/mitchellh/packer/builder/parallels/iso" + parallelspvm "github.com/mitchellh/packer/builder/parallels/pvm" + "github.com/mitchellh/packer/builder/qemu" + virtualboxiso "github.com/mitchellh/packer/builder/virtualbox/iso" + virtualboxovf "github.com/mitchellh/packer/builder/virtualbox/ovf" + vmwareiso "github.com/mitchellh/packer/builder/vmware/iso" + vmwarevmx "github.com/mitchellh/packer/builder/vmware/vmx" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/packer/plugin" + "github.com/mitchellh/packer/provisioner/ansible-local" + "github.com/mitchellh/packer/provisioner/chef-client" + "github.com/mitchellh/packer/provisioner/chef-solo" + fileprovisioner "github.com/mitchellh/packer/provisioner/file" + "github.com/mitchellh/packer/provisioner/powershell" + "github.com/mitchellh/packer/provisioner/puppet-masterless" + "github.com/mitchellh/packer/provisioner/puppet-server" + "github.com/mitchellh/packer/provisioner/salt-masterless" + "github.com/mitchellh/packer/provisioner/shell" + shelllocal "github.com/mitchellh/packer/provisioner/shell-local" + "github.com/mitchellh/packer/provisioner/windows-restart" + windowsshell "github.com/mitchellh/packer/provisioner/windows-shell" +) + +type PluginCommand struct { + Meta +} + +var Builders = map[string]packer.Builder{ + "amazon-chroot": new(chroot.Builder), + "amazon-ebs": new(ebs.Builder), + "amazon-instance": new(instance.Builder), + "digitalocean": new(digitalocean.Builder), + "docker": new(docker.Builder), + "file": new(filebuilder.Builder), + "googlecompute": new(googlecompute.Builder), + "null": new(null.Builder), + "openstack": new(openstack.Builder), + "parallels-iso": new(parallelsiso.Builder), + "parallels-pvm": new(parallelspvm.Builder), + "qemu": new(qemu.Builder), + "virtualbox-iso": new(virtualboxiso.Builder), + "virtualbox-ovf": new(virtualboxovf.Builder), + "vmware-iso": new(vmwareiso.Builder), + "vmware-vmx": new(vmwarevmx.Builder), +} + +var Provisioners = map[string]packer.Provisioner{ + "ansible-local": new(ansiblelocal.Provisioner), + "chef-client": new(chefclient.Provisioner), + "chef-solo": new(chefsolo.Provisioner), + "file": new(fileprovisioner.Provisioner), + "powershell": new(powershell.Provisioner), + "puppet-masterless": new(puppetmasterless.Provisioner), + "puppet-server": new(puppetserver.Provisioner), + "salt-masterless": new(saltmasterless.Provisioner), + "shell": new(shell.Provisioner), + "shell-local": new(shelllocal.Provisioner), + "windows-restart": new(restart.Provisioner), + "windows-shell": new(windowsshell.Provisioner), +} + +var PostProcessors = map[string]packer.PostProcessor{} + +func (c *PluginCommand) Run(args []string) int { + // This is an internal call so we're not going to do much error checking. + // If there's a problem we'll usually just crash. + log.Printf("args: %#v", args) + if len(args) != 1 { + c.Ui.Error("Wrong number of args") + os.Exit(1) + } + + // Plugin should be called like "packer-builder-amazon-ebs" so we'll take it + // apart. + parts := strings.Split(args[0], "-") + pluginType := parts[1] + pluginName := "" + // Post-processor is split so we'll so some magic here. We could use a + // regexp but this is simpler. + if pluginType == "post" { + pluginType = strings.Join(parts[1:2], "-") + pluginName = strings.Join(parts[3:], "-") + } else { + pluginName = strings.Join(parts[2:], "-") + } + + server, err := plugin.Server() + if err != nil { + panic(err) + } + + if pluginType == "builder" { + builder, found := Builders[pluginName] + if !found { + c.Ui.Error(fmt.Sprintf("Could not load builder: %s", pluginName)) + } + server.RegisterBuilder(builder) + } else if pluginType == "provisioner" { + provisioner, found := Provisioners[pluginName] + if !found { + c.Ui.Error(fmt.Sprintf("Could not load provisioner: %s", pluginName)) + } + server.RegisterProvisioner(provisioner) + } else if pluginType == "post-processor" { + postProcessor, found := PostProcessors[pluginName] + if !found { + c.Ui.Error(fmt.Sprintf("Could not load post-processor: %s", pluginName)) + } + server.RegisterPostProcessor(postProcessor) + } + + server.Serve() + + return 0 +} + +func (*PluginCommand) Help() string { + helpText := ` +Usage: packer plugin PLUGIN + + Runs an internally-compiled version of a plugin from the packer binary. Note + that this is an internal command and you should not call it yourself. +` + + return strings.TrimSpace(helpText) +} + +func (c *PluginCommand) Synopsis() string { + return "call an internal plugin" +} diff --git a/commands.go b/commands.go index 510250721..21ed30df9 100644 --- a/commands.go +++ b/commands.go @@ -59,6 +59,12 @@ func init() { CheckFunc: commandVersionCheck, }, nil }, + + "plugin": func() (cli.Command, error) { + return &command.PluginCommand{ + Meta: *CommandMeta, + }, nil + }, } } diff --git a/config.go b/config.go index efb4e7d31..004315489 100644 --- a/config.go +++ b/config.go @@ -2,6 +2,7 @@ package main import ( "encoding/json" + "fmt" "io" "log" "os/exec" @@ -10,6 +11,7 @@ import ( "strings" "github.com/mitchellh/osext" + "github.com/mitchellh/packer/command" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer/plugin" ) @@ -73,11 +75,17 @@ func (c *config) Discover() error { } } - // Last, look in the CWD. + // Next, look in the CWD. if err := c.discover("."); err != nil { return err } + // Finally, try to use an internal plugin. Note that this will not Override + // any previously-loaded plugins. + if err := c.discoverInternal(); err != nil { + return err + } + return nil } @@ -196,6 +204,41 @@ func (c *config) discoverSingle(glob string, m *map[string]string) error { return nil } +func (c *config) discoverInternal() error { + // Get the packer binary path + packerPath, err := osext.Executable() + if err != nil { + log.Printf("[ERR] Error loading exe directory: %s", err) + return err + } + + for builder := range command.Builders { + _, found := (c.Builders)[builder] + if !found { + log.Printf("Using internal plugin for %s", builder) + (c.Builders)[builder] = fmt.Sprintf("%s-PACKERSPACE-plugin-PACKERSPACE-packer-builder-%s", packerPath, builder) + } + } + + for provisioner := range command.Provisioners { + _, found := (c.Provisioners)[provisioner] + if !found { + log.Printf("Using internal plugin for %s", provisioner) + (c.Provisioners)[provisioner] = fmt.Sprintf("%s-PACKERSPACE-plugin-PACKERSPACE-packer-provisioner-%s", packerPath, provisioner) + } + } + + for postProcessor := range command.PostProcessors { + _, found := (c.PostProcessors)[postProcessor] + if !found { + log.Printf("Using internal plugin for %s", postProcessor) + (c.PostProcessors)[postProcessor] = fmt.Sprintf("%s-PACKERSPACE-plugin-PACKERSPACE-packer-post-processor-%s", packerPath, postProcessor) + } + } + + return nil +} + func (c *config) pluginClient(path string) *plugin.Client { originalPath := path @@ -214,6 +257,14 @@ func (c *config) pluginClient(path string) *plugin.Client { } } + // Check for special case using `packer plugin PLUGIN` + args := []string{} + if strings.Contains(path, "-PACKERSPACE-") { + parts := strings.Split(path, "-PACKERSPACE-") + path = parts[0] + args = parts[1:] + } + // If everything failed, just use the original path and let the error // bubble through. if path == "" { @@ -222,7 +273,7 @@ func (c *config) pluginClient(path string) *plugin.Client { log.Printf("Creating plugin client for path: %s", path) var config plugin.ClientConfig - config.Cmd = exec.Command(path) + config.Cmd = exec.Command(path, args...) config.Managed = true config.MinPort = c.PluginMinPort config.MaxPort = c.PluginMaxPort From 9fa93712a1e18a44c54b431c981dc2bde7cc7388 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 17 Aug 2015 01:32:45 -0700 Subject: [PATCH 868/956] Added integrated post-processors --- command/plugin.go | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/command/plugin.go b/command/plugin.go index 76e97484e..1685db188 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -24,6 +24,16 @@ import ( vmwarevmx "github.com/mitchellh/packer/builder/vmware/vmx" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer/plugin" + "github.com/mitchellh/packer/post-processor/artifice" + "github.com/mitchellh/packer/post-processor/atlas" + "github.com/mitchellh/packer/post-processor/compress" + "github.com/mitchellh/packer/post-processor/docker-import" + "github.com/mitchellh/packer/post-processor/docker-push" + "github.com/mitchellh/packer/post-processor/docker-save" + "github.com/mitchellh/packer/post-processor/docker-tag" + "github.com/mitchellh/packer/post-processor/vagrant" + "github.com/mitchellh/packer/post-processor/vagrant-cloud" + "github.com/mitchellh/packer/post-processor/vsphere" "github.com/mitchellh/packer/provisioner/ansible-local" "github.com/mitchellh/packer/provisioner/chef-client" "github.com/mitchellh/packer/provisioner/chef-solo" @@ -76,7 +86,18 @@ var Provisioners = map[string]packer.Provisioner{ "windows-shell": new(windowsshell.Provisioner), } -var PostProcessors = map[string]packer.PostProcessor{} +var PostProcessors = map[string]packer.PostProcessor{ + "artifice": new(artifice.PostProcessor), + "atlas": new(atlas.PostProcessor), + "compress": new(compress.PostProcessor), + "docker-import": new(dockerimport.PostProcessor), + "docker-push": new(dockerpush.PostProcessor), + "docker-save": new(dockersave.PostProcessor), + "docker-tag": new(dockertag.PostProcessor), + "vagrant": new(vagrant.PostProcessor), + "vagrant-cloud": new(vagrantcloud.PostProcessor), + "vsphere": new(vsphere.PostProcessor), +} func (c *PluginCommand) Run(args []string) int { // This is an internal call so we're not going to do much error checking. From e080e73b04fc32017e6606e0760297b589e784a7 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 17 Aug 2015 20:33:50 -0700 Subject: [PATCH 869/956] Add some exit codes and use a constant for -PACKERSPACE- --- command/plugin.go | 16 ++++++++++------ config.go | 21 +++++++++++++++------ 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/command/plugin.go b/command/plugin.go index 1685db188..57d6f0299 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -3,7 +3,6 @@ package command import ( "fmt" "log" - "os" "strings" "github.com/mitchellh/packer/builder/amazon/chroot" @@ -105,7 +104,7 @@ func (c *PluginCommand) Run(args []string) int { log.Printf("args: %#v", args) if len(args) != 1 { c.Ui.Error("Wrong number of args") - os.Exit(1) + return 1 } // Plugin should be called like "packer-builder-amazon-ebs" so we'll take it @@ -124,25 +123,29 @@ func (c *PluginCommand) Run(args []string) int { server, err := plugin.Server() if err != nil { - panic(err) + c.Ui.Error(fmt.Sprintf("Error starting plugin server: %s", err)) + return 1 } if pluginType == "builder" { builder, found := Builders[pluginName] if !found { c.Ui.Error(fmt.Sprintf("Could not load builder: %s", pluginName)) + return 1 } server.RegisterBuilder(builder) } else if pluginType == "provisioner" { provisioner, found := Provisioners[pluginName] if !found { c.Ui.Error(fmt.Sprintf("Could not load provisioner: %s", pluginName)) + return 1 } server.RegisterProvisioner(provisioner) } else if pluginType == "post-processor" { postProcessor, found := PostProcessors[pluginName] if !found { c.Ui.Error(fmt.Sprintf("Could not load post-processor: %s", pluginName)) + return 1 } server.RegisterPostProcessor(postProcessor) } @@ -156,13 +159,14 @@ func (*PluginCommand) Help() string { helpText := ` Usage: packer plugin PLUGIN - Runs an internally-compiled version of a plugin from the packer binary. Note - that this is an internal command and you should not call it yourself. + Runs an internally-compiled version of a plugin from the packer binary. + + NOTE: this is an internal command and you should not call it yourself. ` return strings.TrimSpace(helpText) } func (c *PluginCommand) Synopsis() string { - return "call an internal plugin" + return "internal plugin command" } diff --git a/config.go b/config.go index 004315489..a122b120d 100644 --- a/config.go +++ b/config.go @@ -16,6 +16,10 @@ import ( "github.com/mitchellh/packer/packer/plugin" ) +// PACKERSPACE is used to represent the spaces that separate args for a command +// without being confused with spaces in the path to the command itself. +const PACKERSPACE = "-PACKERSPACE-" + type config struct { DisableCheckpoint bool `json:"disable_checkpoint"` DisableCheckpointSignature bool `json:"disable_checkpoint_signature"` @@ -80,7 +84,7 @@ func (c *config) Discover() error { return err } - // Finally, try to use an internal plugin. Note that this will not Override + // Finally, try to use an internal plugin. Note that this will not override // any previously-loaded plugins. if err := c.discoverInternal(); err != nil { return err @@ -216,7 +220,8 @@ func (c *config) discoverInternal() error { _, found := (c.Builders)[builder] if !found { log.Printf("Using internal plugin for %s", builder) - (c.Builders)[builder] = fmt.Sprintf("%s-PACKERSPACE-plugin-PACKERSPACE-packer-builder-%s", packerPath, builder) + (c.Builders)[builder] = fmt.Sprintf("%s%splugin%spacker-builder-%s", + packerPath, PACKERSPACE, PACKERSPACE, builder) } } @@ -224,7 +229,9 @@ func (c *config) discoverInternal() error { _, found := (c.Provisioners)[provisioner] if !found { log.Printf("Using internal plugin for %s", provisioner) - (c.Provisioners)[provisioner] = fmt.Sprintf("%s-PACKERSPACE-plugin-PACKERSPACE-packer-provisioner-%s", packerPath, provisioner) + (c.Provisioners)[provisioner] = fmt.Sprintf( + "%s%splugin%spacker-provisioner-%s", + packerPath, PACKERSPACE, PACKERSPACE, provisioner) } } @@ -232,7 +239,9 @@ func (c *config) discoverInternal() error { _, found := (c.PostProcessors)[postProcessor] if !found { log.Printf("Using internal plugin for %s", postProcessor) - (c.PostProcessors)[postProcessor] = fmt.Sprintf("%s-PACKERSPACE-plugin-PACKERSPACE-packer-post-processor-%s", packerPath, postProcessor) + (c.PostProcessors)[postProcessor] = fmt.Sprintf( + "%s%splugin%spacker-post-processor-%s", + packerPath, PACKERSPACE, PACKERSPACE, postProcessor) } } @@ -259,8 +268,8 @@ func (c *config) pluginClient(path string) *plugin.Client { // Check for special case using `packer plugin PLUGIN` args := []string{} - if strings.Contains(path, "-PACKERSPACE-") { - parts := strings.Split(path, "-PACKERSPACE-") + if strings.Contains(path, PACKERSPACE) { + parts := strings.Split(path, PACKERSPACE) path = parts[0] args = parts[1:] } From 1641a5e0cb862dffb61d9d3a6bd669a91ebed47a Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 18 Aug 2015 13:46:54 -0700 Subject: [PATCH 870/956] Replace string splitting with a regexp; this is clearer and less code because of the post-processor case --- command/plugin.go | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/command/plugin.go b/command/plugin.go index 57d6f0299..60776fc54 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -3,6 +3,7 @@ package command import ( "fmt" "log" + "regexp" "strings" "github.com/mitchellh/packer/builder/amazon/chroot" @@ -98,28 +99,26 @@ var PostProcessors = map[string]packer.PostProcessor{ "vsphere": new(vsphere.PostProcessor), } +var pluginRegexp = regexp.MustCompile("packer-(builder|post-processor|provisioner)-(.+)") + func (c *PluginCommand) Run(args []string) int { - // This is an internal call so we're not going to do much error checking. - // If there's a problem we'll usually just crash. + // This is an internal call (users should not call this directly) so we're + // not going to do much input validation. If there's a problem we'll often + // just crash. Error handling should be added to facilitate debugging. log.Printf("args: %#v", args) if len(args) != 1 { c.Ui.Error("Wrong number of args") return 1 } - // Plugin should be called like "packer-builder-amazon-ebs" so we'll take it - // apart. - parts := strings.Split(args[0], "-") - pluginType := parts[1] - pluginName := "" - // Post-processor is split so we'll so some magic here. We could use a - // regexp but this is simpler. - if pluginType == "post" { - pluginType = strings.Join(parts[1:2], "-") - pluginName = strings.Join(parts[3:], "-") - } else { - pluginName = strings.Join(parts[2:], "-") + // Plugin will match something like "packer-builder-amazon-ebs" + parts := pluginRegexp.FindStringSubmatch(args[0]) + if len(parts) != 3 { + c.Ui.Error(fmt.Sprintf("Error parsing plugin argument [DEBUG]: %#v", parts)) + return 1 } + pluginType := parts[1] // capture group 1 (builder|post-processor|provisioner) + pluginName := parts[2] // capture group 2 (.+) server, err := plugin.Server() if err != nil { From bfe5b5b4b6d09995da7f7c3f6f6cdb14295fa9d0 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 18 Aug 2015 15:21:11 -0700 Subject: [PATCH 871/956] Change if/else logic to switch --- command/plugin.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/command/plugin.go b/command/plugin.go index 60776fc54..91a8fe2b4 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -126,21 +126,22 @@ func (c *PluginCommand) Run(args []string) int { return 1 } - if pluginType == "builder" { + switch pluginType { + case "builder": builder, found := Builders[pluginName] if !found { c.Ui.Error(fmt.Sprintf("Could not load builder: %s", pluginName)) return 1 } server.RegisterBuilder(builder) - } else if pluginType == "provisioner" { + case "provisioner": provisioner, found := Provisioners[pluginName] if !found { c.Ui.Error(fmt.Sprintf("Could not load provisioner: %s", pluginName)) return 1 } server.RegisterProvisioner(provisioner) - } else if pluginType == "post-processor" { + case "post-processor": postProcessor, found := PostProcessors[pluginName] if !found { c.Ui.Error(fmt.Sprintf("Could not load post-processor: %s", pluginName)) From 7acdc1b6afee2446769c583e553d64e78ef2b466 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 20 Aug 2015 16:43:30 -0700 Subject: [PATCH 872/956] Hide the plugin command from help output --- main.go | 23 ++++++++++++++++++++++- main_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/main.go b/main.go index a0d3190d1..ac61d211b 100644 --- a/main.go +++ b/main.go @@ -181,7 +181,7 @@ func wrappedMain() int { cli := &cli.CLI{ Args: args, Commands: Commands, - HelpFunc: cli.BasicHelpFunc("packer"), + HelpFunc: excludeHelpFunc(Commands, []string{"plugin"}), HelpWriter: os.Stdout, Version: Version, } @@ -195,6 +195,27 @@ func wrappedMain() int { return exitCode } +// excludeHelpFunc filters commands we don't want to show from the list of +// commands displayed in packer's help text. +func excludeHelpFunc(commands map[string]cli.CommandFactory, exclude []string) cli.HelpFunc { + // Make search slice into a map so we can use use the `if found` idiom + // instead of a nested loop. + var excludes = make(map[string]interface{}, len(exclude)) + for _, item := range exclude { + excludes[item] = nil + } + + // Create filtered list of commands + helpCommands := []string{} + for command := range commands { + if _, found := excludes[command]; !found { + helpCommands = append(helpCommands, command) + } + } + + return cli.FilteredHelpFunc(helpCommands, cli.BasicHelpFunc("packer")) +} + // extractMachineReadable checks the args for the machine readable // flag and returns whether or not it is on. It modifies the args // to remove this flag. diff --git a/main_test.go b/main_test.go index 7a14bed19..2b6686b38 100644 --- a/main_test.go +++ b/main_test.go @@ -3,9 +3,36 @@ package main import ( "math/rand" "reflect" + "strings" "testing" + + "github.com/mitchellh/cli" + "github.com/mitchellh/packer/command" ) +func TestExcludeHelpFunc(t *testing.T) { + commands := map[string]cli.CommandFactory{ + "build": func() (cli.Command, error) { + return &command.BuildCommand{ + Meta: command.Meta{}, + }, nil + }, + + "fix": func() (cli.Command, error) { + return &command.FixCommand{ + Meta: command.Meta{}, + }, nil + }, + } + + helpFunc := excludeHelpFunc(commands, []string{"fix"}) + helpText := helpFunc(commands) + + if strings.Contains(helpText, "fix") { + t.Fatal("Found fix in help text even though we excluded it: \n\n%s\n\n", helpText) + } +} + func TestExtractMachineReadable(t *testing.T) { var args, expected, result []string var mr bool From 95797d7a8ab9c00ec39fc3a3eaf9f9e8052c586e Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 12 Oct 2015 19:14:01 -0700 Subject: [PATCH 873/956] Fatal -> Fatalf since we have a format string --- main_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main_test.go b/main_test.go index 2b6686b38..0bc7ba96f 100644 --- a/main_test.go +++ b/main_test.go @@ -29,7 +29,7 @@ func TestExcludeHelpFunc(t *testing.T) { helpText := helpFunc(commands) if strings.Contains(helpText, "fix") { - t.Fatal("Found fix in help text even though we excluded it: \n\n%s\n\n", helpText) + t.Fatalf("Found fix in help text even though we excluded it: \n\n%s\n\n", helpText) } } From 6783bc3fb0776a1cb23c6b092b86f1bf47692745 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 13 Oct 2015 02:13:49 -0700 Subject: [PATCH 874/956] Added generator for command/plugin.go so we don't have to edit it by hand to add new plugins --- command/plugin.go | 159 +++++++++--------- command/version.go | 2 + scripts/generate-plugins.go | 325 ++++++++++++++++++++++++++++++++++++ 3 files changed, 411 insertions(+), 75 deletions(-) create mode 100644 scripts/generate-plugins.go diff --git a/command/plugin.go b/command/plugin.go index 91a8fe2b4..cf213b3ef 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -1,3 +1,7 @@ +// +// This file is automatically generated by scripts/generate-plugins.go -- Do not edit! +// + package command import ( @@ -6,46 +10,48 @@ import ( "regexp" "strings" - "github.com/mitchellh/packer/builder/amazon/chroot" - "github.com/mitchellh/packer/builder/amazon/ebs" - "github.com/mitchellh/packer/builder/amazon/instance" - "github.com/mitchellh/packer/builder/digitalocean" - "github.com/mitchellh/packer/builder/docker" - filebuilder "github.com/mitchellh/packer/builder/file" - "github.com/mitchellh/packer/builder/googlecompute" - "github.com/mitchellh/packer/builder/null" - "github.com/mitchellh/packer/builder/openstack" - parallelsiso "github.com/mitchellh/packer/builder/parallels/iso" - parallelspvm "github.com/mitchellh/packer/builder/parallels/pvm" - "github.com/mitchellh/packer/builder/qemu" - virtualboxiso "github.com/mitchellh/packer/builder/virtualbox/iso" - virtualboxovf "github.com/mitchellh/packer/builder/virtualbox/ovf" - vmwareiso "github.com/mitchellh/packer/builder/vmware/iso" - vmwarevmx "github.com/mitchellh/packer/builder/vmware/vmx" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer/plugin" - "github.com/mitchellh/packer/post-processor/artifice" - "github.com/mitchellh/packer/post-processor/atlas" - "github.com/mitchellh/packer/post-processor/compress" - "github.com/mitchellh/packer/post-processor/docker-import" - "github.com/mitchellh/packer/post-processor/docker-push" - "github.com/mitchellh/packer/post-processor/docker-save" - "github.com/mitchellh/packer/post-processor/docker-tag" - "github.com/mitchellh/packer/post-processor/vagrant" - "github.com/mitchellh/packer/post-processor/vagrant-cloud" - "github.com/mitchellh/packer/post-processor/vsphere" - "github.com/mitchellh/packer/provisioner/ansible-local" - "github.com/mitchellh/packer/provisioner/chef-client" - "github.com/mitchellh/packer/provisioner/chef-solo" + + amazonchrootbuilder "github.com/mitchellh/packer/builder/amazon/chroot" + amazonebsbuilder "github.com/mitchellh/packer/builder/amazon/ebs" + amazoninstancebuilder "github.com/mitchellh/packer/builder/amazon/instance" + ansiblelocalprovisioner "github.com/mitchellh/packer/provisioner/ansible-local" + artificepostprocessor "github.com/mitchellh/packer/post-processor/artifice" + atlaspostprocessor "github.com/mitchellh/packer/post-processor/atlas" + chefclientprovisioner "github.com/mitchellh/packer/provisioner/chef-client" + chefsoloprovisioner "github.com/mitchellh/packer/provisioner/chef-solo" + compresspostprocessor "github.com/mitchellh/packer/post-processor/compress" + digitaloceanbuilder "github.com/mitchellh/packer/builder/digitalocean" + dockerbuilder "github.com/mitchellh/packer/builder/docker" + dockerimportpostprocessor "github.com/mitchellh/packer/post-processor/docker-import" + dockerpushpostprocessor "github.com/mitchellh/packer/post-processor/docker-push" + dockersavepostprocessor "github.com/mitchellh/packer/post-processor/docker-save" + dockertagpostprocessor "github.com/mitchellh/packer/post-processor/docker-tag" + filebuilder "github.com/mitchellh/packer/builder/file" fileprovisioner "github.com/mitchellh/packer/provisioner/file" - "github.com/mitchellh/packer/provisioner/powershell" - "github.com/mitchellh/packer/provisioner/puppet-masterless" - "github.com/mitchellh/packer/provisioner/puppet-server" - "github.com/mitchellh/packer/provisioner/salt-masterless" - "github.com/mitchellh/packer/provisioner/shell" - shelllocal "github.com/mitchellh/packer/provisioner/shell-local" - "github.com/mitchellh/packer/provisioner/windows-restart" - windowsshell "github.com/mitchellh/packer/provisioner/windows-shell" + googlecomputebuilder "github.com/mitchellh/packer/builder/googlecompute" + nullbuilder "github.com/mitchellh/packer/builder/null" + openstackbuilder "github.com/mitchellh/packer/builder/openstack" + parallelsisobuilder "github.com/mitchellh/packer/builder/parallels/iso" + parallelspvmbuilder "github.com/mitchellh/packer/builder/parallels/pvm" + powershellprovisioner "github.com/mitchellh/packer/provisioner/powershell" + puppetmasterlessprovisioner "github.com/mitchellh/packer/provisioner/puppet-masterless" + puppetserverprovisioner "github.com/mitchellh/packer/provisioner/puppet-server" + qemubuilder "github.com/mitchellh/packer/builder/qemu" + saltmasterlessprovisioner "github.com/mitchellh/packer/provisioner/salt-masterless" + shelllocalprovisioner "github.com/mitchellh/packer/provisioner/shell-local" + shellprovisioner "github.com/mitchellh/packer/provisioner/shell" + vagrantcloudpostprocessor "github.com/mitchellh/packer/post-processor/vagrant-cloud" + vagrantpostprocessor "github.com/mitchellh/packer/post-processor/vagrant" + virtualboxisobuilder "github.com/mitchellh/packer/builder/virtualbox/iso" + virtualboxovfbuilder "github.com/mitchellh/packer/builder/virtualbox/ovf" + vmwareisobuilder "github.com/mitchellh/packer/builder/vmware/iso" + vmwarevmxbuilder "github.com/mitchellh/packer/builder/vmware/vmx" + vspherepostprocessor "github.com/mitchellh/packer/post-processor/vsphere" + windowsrestartprovisioner "github.com/mitchellh/packer/provisioner/windows-restart" + windowsshellprovisioner "github.com/mitchellh/packer/provisioner/windows-shell" + ) type PluginCommand struct { @@ -53,52 +59,55 @@ type PluginCommand struct { } var Builders = map[string]packer.Builder{ - "amazon-chroot": new(chroot.Builder), - "amazon-ebs": new(ebs.Builder), - "amazon-instance": new(instance.Builder), - "digitalocean": new(digitalocean.Builder), - "docker": new(docker.Builder), - "file": new(filebuilder.Builder), - "googlecompute": new(googlecompute.Builder), - "null": new(null.Builder), - "openstack": new(openstack.Builder), - "parallels-iso": new(parallelsiso.Builder), - "parallels-pvm": new(parallelspvm.Builder), - "qemu": new(qemu.Builder), - "virtualbox-iso": new(virtualboxiso.Builder), - "virtualbox-ovf": new(virtualboxovf.Builder), - "vmware-iso": new(vmwareiso.Builder), - "vmware-vmx": new(vmwarevmx.Builder), + "amazon-chroot": new(amazonchrootbuilder.Builder), + "amazon-ebs": new(amazonebsbuilder.Builder), + "amazon-instance": new(amazoninstancebuilder.Builder), + "digitalocean": new(digitaloceanbuilder.Builder), + "docker": new(dockerbuilder.Builder), + "file": new(filebuilder.Builder), + "googlecompute": new(googlecomputebuilder.Builder), + "null": new(nullbuilder.Builder), + "openstack": new(openstackbuilder.Builder), + "parallels-iso": new(parallelsisobuilder.Builder), + "parallels-pvm": new(parallelspvmbuilder.Builder), + "qemu": new(qemubuilder.Builder), + "virtualbox-iso": new(virtualboxisobuilder.Builder), + "virtualbox-ovf": new(virtualboxovfbuilder.Builder), + "vmware-iso": new(vmwareisobuilder.Builder), + "vmware-vmx": new(vmwarevmxbuilder.Builder), } + var Provisioners = map[string]packer.Provisioner{ - "ansible-local": new(ansiblelocal.Provisioner), - "chef-client": new(chefclient.Provisioner), - "chef-solo": new(chefsolo.Provisioner), - "file": new(fileprovisioner.Provisioner), - "powershell": new(powershell.Provisioner), - "puppet-masterless": new(puppetmasterless.Provisioner), - "puppet-server": new(puppetserver.Provisioner), - "salt-masterless": new(saltmasterless.Provisioner), - "shell": new(shell.Provisioner), - "shell-local": new(shelllocal.Provisioner), - "windows-restart": new(restart.Provisioner), - "windows-shell": new(windowsshell.Provisioner), + "ansible-local": new(ansiblelocalprovisioner.Provisioner), + "chef-client": new(chefclientprovisioner.Provisioner), + "chef-solo": new(chefsoloprovisioner.Provisioner), + "file": new(fileprovisioner.Provisioner), + "powershell": new(powershellprovisioner.Provisioner), + "puppet-masterless": new(puppetmasterlessprovisioner.Provisioner), + "puppet-server": new(puppetserverprovisioner.Provisioner), + "salt-masterless": new(saltmasterlessprovisioner.Provisioner), + "shell": new(shellprovisioner.Provisioner), + "shell-local": new(shelllocalprovisioner.Provisioner), + "windows-restart": new(windowsrestartprovisioner.Provisioner), + "windows-shell": new(windowsshellprovisioner.Provisioner), } + var PostProcessors = map[string]packer.PostProcessor{ - "artifice": new(artifice.PostProcessor), - "atlas": new(atlas.PostProcessor), - "compress": new(compress.PostProcessor), - "docker-import": new(dockerimport.PostProcessor), - "docker-push": new(dockerpush.PostProcessor), - "docker-save": new(dockersave.PostProcessor), - "docker-tag": new(dockertag.PostProcessor), - "vagrant": new(vagrant.PostProcessor), - "vagrant-cloud": new(vagrantcloud.PostProcessor), - "vsphere": new(vsphere.PostProcessor), + "artifice": new(artificepostprocessor.PostProcessor), + "atlas": new(atlaspostprocessor.PostProcessor), + "compress": new(compresspostprocessor.PostProcessor), + "docker-import": new(dockerimportpostprocessor.PostProcessor), + "docker-push": new(dockerpushpostprocessor.PostProcessor), + "docker-save": new(dockersavepostprocessor.PostProcessor), + "docker-tag": new(dockertagpostprocessor.PostProcessor), + "vagrant": new(vagrantpostprocessor.PostProcessor), + "vagrant-cloud": new(vagrantcloudpostprocessor.PostProcessor), + "vsphere": new(vspherepostprocessor.PostProcessor), } + var pluginRegexp = regexp.MustCompile("packer-(builder|post-processor|provisioner)-(.+)") func (c *PluginCommand) Run(args []string) int { diff --git a/command/version.go b/command/version.go index cd170f2df..f5898e77d 100644 --- a/command/version.go +++ b/command/version.go @@ -1,5 +1,7 @@ package command +//go:generate go run ../scripts/generate-plugins.go + import ( "bytes" "fmt" diff --git a/scripts/generate-plugins.go b/scripts/generate-plugins.go new file mode 100644 index 000000000..62828f96b --- /dev/null +++ b/scripts/generate-plugins.go @@ -0,0 +1,325 @@ +// Generate Plugins is a small program that updates the lists of plugins in +// command/plugin.go so they will be compiled into the main packer binary. +// +// See https://github.com/mitchellh/packer/pull/2608 for details. +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" +) + +const target = "command/plugin.go" + +func main() { + wd, _ := os.Getwd() + if filepath.Base(wd) != "packer" { + os.Chdir("..") + wd, _ = os.Getwd() + if filepath.Base(wd) != "packer" { + log.Fatalf("This program must be invoked in the packer project root; in %s", wd) + } + } + + builders, err := discoverBuilders() + if err != nil { + log.Fatalf("Failed to discover builders: %s", err) + } + + provisioners, _ := discoverProvisioners() + if err != nil { + log.Fatalf("Failed to discover provisioners: %s", err) + } + + postProcessors, _ := discoverPostProcessors() + if err != nil { + log.Fatalf("Failed to discover post processors: %s", err) + } + + output := source + output = strings.Replace(output, "IMPORTS", makeImports(builders, provisioners, postProcessors), 1) + output = strings.Replace(output, "BUILDERS", makeMap("Builders", "Builder", builders), 1) + output = strings.Replace(output, "PROVISIONERS", makeMap("Provisioners", "Provisioner", provisioners), 1) + output = strings.Replace(output, "POSTPROCESSORS", makeMap("PostProcessors", "PostProcessor", postProcessors), 1) + + // TODO sort the lists of plugins so we are not subjected to random OS ordering of the plugin lists + // TODO format the file + + file, err := os.Create(target) + if err != nil { + log.Fatalf("Failed to open %s for writing: %s", target, err) + } + file.WriteString(output) + file.Close() + + log.Printf("Generated %s", target) +} + +type plugin struct { + Package string + PluginName string + TypeName string + Path string + ImportName string +} + +// makeMap creates a map named Name with type packer.Name that looks something +// like this: +// +// var Builders = map[string]packer.Builder{ +// "amazon-chroot": new(chroot.Builder), +// "amazon-ebs": new(ebs.Builder), +// "amazon-instance": new(instance.Builder), +func makeMap(varName, varType string, items []plugin) string { + output := "" + + output += fmt.Sprintf("var %s = map[string]packer.%s{\n", varName, varType) + for _, item := range items { + output += fmt.Sprintf("\t\"%s\": new(%s.%s),\n", item.PluginName, item.ImportName, item.TypeName) + } + output += "}\n" + return output +} + +func makeImports(builders, provisioners, postProcessors []plugin) string { + plugins := []string{} + + for _, builder := range builders { + plugins = append(plugins, fmt.Sprintf("\t%s \"github.com/mitchellh/packer/%s\"\n", builder.ImportName, builder.Path)) + } + + for _, provisioner := range provisioners { + plugins = append(plugins, fmt.Sprintf("\t%s \"github.com/mitchellh/packer/%s\"\n", provisioner.ImportName, provisioner.Path)) + } + + for _, postProcessor := range postProcessors { + plugins = append(plugins, fmt.Sprintf("\t%s \"github.com/mitchellh/packer/%s\"\n", postProcessor.ImportName, postProcessor.Path)) + } + + // Make things pretty + sort.Strings(plugins) + + return strings.Join(plugins, "") +} + +// listDirectories recursively lists directories under the specified path +func listDirectories(path string) ([]string, error) { + names := []string{} + items, err := ioutil.ReadDir(path) + if err != nil { + return names, err + } + + for _, item := range items { + // We only want directories + if item.IsDir() { + currentDir := filepath.Join(path, item.Name()) + names = append(names, currentDir) + + // Do some recursion + subNames, err := listDirectories(currentDir) + if err == nil { + names = append(names, subNames...) + } + } + } + + return names, nil +} + +// deriveName determines the name of the plugin (what you'll see in a packer +// template) based on the filesystem path. We use two rules: +// +// Start with -> builder/virtualbox/iso +// +// 1. Strip the root -> virtualbox/iso +// 2. Switch slash / to dash - -> virtualbox-iso +func deriveName(root, full string) string { + short, _ := filepath.Rel(root, full) + bits := strings.Split(short, string(os.PathSeparator)) + return strings.Join(bits, "-") +} + +// deriveImport will build a unique import identifier based on packageName and +// the result of deriveName() +// +// This will be something like -> virtualboxisobuilder +// +// Which is long, but deterministic and unique. +func deriveImport(typeName, derivedName string) string { + return strings.Replace(derivedName, "-", "", -1) + strings.ToLower(typeName) +} + +// discoverTypesInPath searches for types of typeID in path and returns a list +// of plugins it finds. +func discoverTypesInPath(path, typeID string) ([]plugin, error) { + postProcessors := []plugin{} + + dirs, err := listDirectories(path) + if err != nil { + return postProcessors, err + } + + for _, dir := range dirs { + fset := token.NewFileSet() + goPackages, err := parser.ParseDir(fset, dir, nil, parser.AllErrors) + if err != nil { + return postProcessors, fmt.Errorf("Failed parsing directory %s: %s", dir, err) + } + + for _, goPackage := range goPackages { + ast.PackageExports(goPackage) + ast.Inspect(goPackage, func(n ast.Node) bool { + switch x := n.(type) { + case *ast.TypeSpec: + if x.Name.Name == typeID { + derivedName := deriveName(path, dir) + postProcessors = append(postProcessors, plugin{ + Package: goPackage.Name, + PluginName: derivedName, + ImportName: deriveImport(x.Name.Name, derivedName), + TypeName: x.Name.Name, + Path: dir, + }) + // The AST stops parsing when we return false. Once we + // find the symbol we want we can stop parsing. + + // DEBUG: + // fmt.Printf("package %#v\n", goPackage) + return false + } + } + return true + }) + } + } + + return postProcessors, nil +} + +func discoverBuilders() ([]plugin, error) { + path := "./builder" + typeID := "Builder" + return discoverTypesInPath(path, typeID) +} + +func discoverProvisioners() ([]plugin, error) { + path := "./provisioner" + typeID := "Provisioner" + return discoverTypesInPath(path, typeID) +} + +func discoverPostProcessors() ([]plugin, error) { + path := "./post-processor" + typeID := "PostProcessor" + return discoverTypesInPath(path, typeID) +} + +const source = `// +// This file is automatically generated by scripts/generate-plugins.go -- Do not edit! +// + +package command + +import ( + "fmt" + "log" + "regexp" + "strings" + + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/packer/plugin" + +IMPORTS +) + +type PluginCommand struct { + Meta +} + +BUILDERS + +PROVISIONERS + +POSTPROCESSORS + +var pluginRegexp = regexp.MustCompile("packer-(builder|post-processor|provisioner)-(.+)") + +func (c *PluginCommand) Run(args []string) int { + // This is an internal call (users should not call this directly) so we're + // not going to do much input validation. If there's a problem we'll often + // just crash. Error handling should be added to facilitate debugging. + log.Printf("args: %#v", args) + if len(args) != 1 { + c.Ui.Error("Wrong number of args") + return 1 + } + + // Plugin will match something like "packer-builder-amazon-ebs" + parts := pluginRegexp.FindStringSubmatch(args[0]) + if len(parts) != 3 { + c.Ui.Error(fmt.Sprintf("Error parsing plugin argument [DEBUG]: %#v", parts)) + return 1 + } + pluginType := parts[1] // capture group 1 (builder|post-processor|provisioner) + pluginName := parts[2] // capture group 2 (.+) + + server, err := plugin.Server() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error starting plugin server: %s", err)) + return 1 + } + + switch pluginType { + case "builder": + builder, found := Builders[pluginName] + if !found { + c.Ui.Error(fmt.Sprintf("Could not load builder: %s", pluginName)) + return 1 + } + server.RegisterBuilder(builder) + case "provisioner": + provisioner, found := Provisioners[pluginName] + if !found { + c.Ui.Error(fmt.Sprintf("Could not load provisioner: %s", pluginName)) + return 1 + } + server.RegisterProvisioner(provisioner) + case "post-processor": + postProcessor, found := PostProcessors[pluginName] + if !found { + c.Ui.Error(fmt.Sprintf("Could not load post-processor: %s", pluginName)) + return 1 + } + server.RegisterPostProcessor(postProcessor) + } + + server.Serve() + + return 0 +} + +func (*PluginCommand) Help() string { + helpText := ` + "`" + ` +Usage: packer plugin PLUGIN + + Runs an internally-compiled version of a plugin from the packer binary. + + NOTE: this is an internal command and you should not call it yourself. +` + "`" + ` + + return strings.TrimSpace(helpText) +} + +func (c *PluginCommand) Synopsis() string { + return "internal plugin command" +} +` From 9c68f039b37b2ad76205a4100f1c9fe8acd0072c Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 13 Oct 2015 02:26:07 -0700 Subject: [PATCH 875/956] Don't suppress errors from provisioners or post-processors --- scripts/generate-plugins.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/generate-plugins.go b/scripts/generate-plugins.go index 62828f96b..c0c8fc185 100644 --- a/scripts/generate-plugins.go +++ b/scripts/generate-plugins.go @@ -34,12 +34,12 @@ func main() { log.Fatalf("Failed to discover builders: %s", err) } - provisioners, _ := discoverProvisioners() + provisioners, err := discoverProvisioners() if err != nil { log.Fatalf("Failed to discover provisioners: %s", err) } - postProcessors, _ := discoverPostProcessors() + postProcessors, err := discoverPostProcessors() if err != nil { log.Fatalf("Failed to discover post processors: %s", err) } From eb8a0bf731ef842dad2687da83dbdd499e8c7a2c Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 13 Oct 2015 02:38:31 -0700 Subject: [PATCH 876/956] Add some documentation to generate-plugins --- scripts/generate-plugins.go | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/scripts/generate-plugins.go b/scripts/generate-plugins.go index c0c8fc185..2dced41a3 100644 --- a/scripts/generate-plugins.go +++ b/scripts/generate-plugins.go @@ -20,6 +20,9 @@ import ( const target = "command/plugin.go" func main() { + // Normally this is run via go:generate from the command folder so we need + // to cd .. first. But when developing it's easier to use go run, so we'll + // support that too. wd, _ := os.Getwd() if filepath.Base(wd) != "packer" { os.Chdir("..") @@ -29,6 +32,7 @@ func main() { } } + // Collect all of the data we need about plugins we have in the project builders, err := discoverBuilders() if err != nil { log.Fatalf("Failed to discover builders: %s", err) @@ -44,6 +48,7 @@ func main() { log.Fatalf("Failed to discover post processors: %s", err) } + // Do some simple code generation and templating output := source output = strings.Replace(output, "IMPORTS", makeImports(builders, provisioners, postProcessors), 1) output = strings.Replace(output, "BUILDERS", makeMap("Builders", "Builder", builders), 1) @@ -53,22 +58,27 @@ func main() { // TODO sort the lists of plugins so we are not subjected to random OS ordering of the plugin lists // TODO format the file + // Write our generated code to the command/plugin.go file file, err := os.Create(target) + defer file.Close() if err != nil { log.Fatalf("Failed to open %s for writing: %s", target, err) } - file.WriteString(output) - file.Close() + + _, err = file.WriteString(output) + if err != nil { + log.Fatalf("Failed writing to %s: %s", target, err) + } log.Printf("Generated %s", target) } type plugin struct { - Package string - PluginName string - TypeName string - Path string - ImportName string + Package string // This plugin's package name (iso) + PluginName string // Name of plugin (vmware-iso) + TypeName string // Type of plugin (builder) + Path string // Path relative to packer root (builder/vmware/iso) + ImportName string // PluginName+TypeName (vmwareisobuilder) } // makeMap creates a map named Name with type packer.Name that looks something From a143f1e0856d0425d43fffa7d82b6b5e59f2c68c Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 21 Oct 2015 16:41:58 -0700 Subject: [PATCH 877/956] Updated build.sh so it doesn't build all the plugins separately anymore --- scripts/build.sh | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/scripts/build.sh b/scripts/build.sh index b2e3248e4..dcd9bd7c8 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -42,18 +42,10 @@ gox \ -os="${XC_OS}" \ -arch="${XC_ARCH}" \ -ldflags "-X main.GitCommit ${GIT_COMMIT}${GIT_DIRTY}" \ - -output "pkg/{{.OS}}_{{.Arch}}/packer-{{.Dir}}" \ - ./... + -output "pkg/{{.OS}}_{{.Arch}}/packer" \ + . set -e -# Make sure "packer-packer" is renamed properly -for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do - set +e - mv ${PLATFORM}/packer-packer.exe ${PLATFORM}/packer.exe 2>/dev/null - mv ${PLATFORM}/packer-packer ${PLATFORM}/packer 2>/dev/null - set -e -done - # Move all the compiled things to the $GOPATH/bin GOPATH=${GOPATH:-$(go env GOPATH)} case $(uname) in From 699c6735369990bbc32238ee71b78369e8567994 Mon Sep 17 00:00:00 2001 From: Yuya Kusakabe Date: Fri, 13 Feb 2015 19:13:58 +0900 Subject: [PATCH 878/956] builder/vmware-esxi: Add step_export If `format` option is configured, packer exports the VM with ovftool. website: Document about OVF Tool and `format` option. post-processor/vsphere: Enable to use `mitchellh.vmware-esx` artifact type and OVF and OVA formats, fixes #1457. --- builder/vmware/common/step_clean_vmx.go | 1 + builder/vmware/common/step_output_dir.go | 19 +++-- builder/vmware/iso/builder.go | 20 ++++- builder/vmware/iso/driver_esx5.go | 12 +++ builder/vmware/iso/remote_driver.go | 6 ++ builder/vmware/iso/remote_driver_mock.go | 17 +++++ builder/vmware/iso/step_export.go | 74 +++++++++++++++++++ builder/vmware/iso/step_register.go | 28 +++++-- post-processor/vsphere/post-processor.go | 17 +++-- .../docs/builders/vmware-iso.html.markdown | 4 + 10 files changed, 174 insertions(+), 24 deletions(-) create mode 100644 builder/vmware/iso/step_export.go diff --git a/builder/vmware/common/step_clean_vmx.go b/builder/vmware/common/step_clean_vmx.go index e9bc51987..eb67aa14d 100755 --- a/builder/vmware/common/step_clean_vmx.go +++ b/builder/vmware/common/step_clean_vmx.go @@ -54,6 +54,7 @@ func (s StepCleanVMX) Run(state multistep.StateBag) multistep.StepAction { vmxData[ide+"devicetype"] = "cdrom-raw" vmxData[ide+"filename"] = "auto detect" + vmxData[ide+"clientdevice"] = "TRUE" } ui.Message("Disabling VNC server...") diff --git a/builder/vmware/common/step_output_dir.go b/builder/vmware/common/step_output_dir.go index 17f13d5d3..9807296ba 100644 --- a/builder/vmware/common/step_output_dir.go +++ b/builder/vmware/common/step_output_dir.go @@ -60,15 +60,18 @@ func (s *StepOutputDir) Cleanup(state multistep.StateBag) { dir := state.Get("dir").(OutputDir) ui := state.Get("ui").(packer.Ui) - ui.Say("Deleting output directory...") - for i := 0; i < 5; i++ { - err := dir.RemoveAll() - if err == nil { - break - } + exists, _ := dir.DirExists() + if exists { + ui.Say("Deleting output directory...") + for i := 0; i < 5; i++ { + err := dir.RemoveAll() + if err == nil { + break + } - log.Printf("Error removing output dir: %s", err) - time.Sleep(2 * time.Second) + log.Printf("Error removing output dir: %s", err) + time.Sleep(2 * time.Second) + } } } } diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index bac3dbdff..617e1579b 100755 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -40,6 +40,7 @@ type Config struct { DiskSize uint `mapstructure:"disk_size"` DiskTypeId string `mapstructure:"disk_type_id"` FloppyFiles []string `mapstructure:"floppy_files"` + Format string `mapstruture:"format"` GuestOSType string `mapstructure:"guest_os_type"` ISOChecksum string `mapstructure:"iso_checksum"` ISOChecksumType string `mapstructure:"iso_checksum_type"` @@ -235,6 +236,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe default: dir = new(vmwcommon.LocalOutputDir) } + if b.config.RemoteType != "" && b.config.Format != "" { + b.config.OutputDir = b.config.VMName + } dir.SetOutputDir(b.config.OutputDir) // Setup the state bag @@ -289,7 +293,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe VNCPortMin: b.config.VNCPortMin, VNCPortMax: b.config.VNCPortMax, }, - &StepRegister{}, + &StepRegister{ + Format: b.config.Format, + }, &vmwcommon.StepRun{ BootWait: b.config.BootWait, DurationBeforeStop: 5 * time.Second, @@ -328,6 +334,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &vmwcommon.StepCompactDisk{ Skip: b.config.SkipCompaction, }, + &StepExport{ + Format: b.config.Format, + }, } // Run! @@ -357,7 +366,14 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe } // Compile the artifact list - files, err := state.Get("dir").(OutputDir).ListFiles() + var files []string + if b.config.RemoteType != "" { + dir = new(vmwcommon.LocalOutputDir) + dir.SetOutputDir(b.config.OutputDir) + files, err = dir.ListFiles() + } else { + files, err = state.Get("dir").(OutputDir).ListFiles() + } if err != nil { return nil, err } diff --git a/builder/vmware/iso/driver_esx5.go b/builder/vmware/iso/driver_esx5.go index 75d4d3d25..e17dd7f69 100644 --- a/builder/vmware/iso/driver_esx5.go +++ b/builder/vmware/iso/driver_esx5.go @@ -104,6 +104,18 @@ func (d *ESX5Driver) Unregister(vmxPathLocal string) error { return d.sh("vim-cmd", "vmsvc/unregister", d.vmId) } +func (d *ESX5Driver) Destroy() error { + return d.sh("vim-cmd", "vmsvc/destroy", d.vmId) +} + +func (d *ESX5Driver) IsDestroyed() (bool, error) { + err := d.sh("test", "!", "-e", d.outputDir) + if err != nil { + return false, err + } + return true, err +} + func (d *ESX5Driver) UploadISO(localPath string, checksum string, checksumType string) (string, error) { finalPath := d.cachePath(localPath) if err := d.mkdir(filepath.ToSlash(filepath.Dir(finalPath))); err != nil { diff --git a/builder/vmware/iso/remote_driver.go b/builder/vmware/iso/remote_driver.go index 7c62cd4d7..378c949d4 100644 --- a/builder/vmware/iso/remote_driver.go +++ b/builder/vmware/iso/remote_driver.go @@ -18,6 +18,12 @@ type RemoteDriver interface { // Removes a VM from inventory specified by the path to the VMX given. Unregister(string) error + // Destroys a VM + Destroy() error + + // Checks if the VM is destroyed. + IsDestroyed() (bool, error) + // Uploads a local file to remote side. upload(dst, src string) error diff --git a/builder/vmware/iso/remote_driver_mock.go b/builder/vmware/iso/remote_driver_mock.go index 2f4b3ae81..dcd1ba0aa 100644 --- a/builder/vmware/iso/remote_driver_mock.go +++ b/builder/vmware/iso/remote_driver_mock.go @@ -20,6 +20,13 @@ type RemoteDriverMock struct { UnregisterPath string UnregisterErr error + DestroyCalled bool + DestroyErr error + + IsDestroyedCalled bool + IsDestroyedResult bool + IsDestroyedErr error + uploadErr error ReloadVMErr error @@ -43,6 +50,16 @@ func (d *RemoteDriverMock) Unregister(path string) error { return d.UnregisterErr } +func (d *RemoteDriverMock) Destroy() error { + d.DestroyCalled = true + return d.DestroyErr +} + +func (d *RemoteDriverMock) IsDestroyed() (bool, error) { + d.DestroyCalled = true + return d.IsDestroyedResult, d.IsDestroyedErr +} + func (d *RemoteDriverMock) upload(dst, src string) error { return d.uploadErr } diff --git a/builder/vmware/iso/step_export.go b/builder/vmware/iso/step_export.go new file mode 100644 index 000000000..8c1ed1b93 --- /dev/null +++ b/builder/vmware/iso/step_export.go @@ -0,0 +1,74 @@ +package iso + +import ( + "bytes" + "fmt" + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + "net/url" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" +) + +type StepExport struct { + Format string +} + +func (s *StepExport) Run(state multistep.StateBag) multistep.StepAction { + c := state.Get("config").(*Config) + ui := state.Get("ui").(packer.Ui) + + if c.RemoteType != "esx5" || s.Format == "" { + return multistep.ActionContinue + } + + ovftool := "ovftool" + if runtime.GOOS == "windows" { + ovftool = "ovftool.exe" + } + + if _, err := exec.LookPath(ovftool); err != nil { + err := fmt.Errorf("Error %s not found: %s", ovftool, err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + // Export the VM + outputPath := filepath.Join(c.VMName, c.VMName+"."+s.Format) + + if s.Format == "ova" { + os.MkdirAll(outputPath, 0755) + } + + args := []string{ + "--noSSLVerify=true", + "--skipManifestCheck", + "-tt=" + s.Format, + "vi://" + c.RemoteUser + ":" + url.QueryEscape(c.RemotePassword) + "@" + c.RemoteHost + "/" + c.VMName, + outputPath, + } + + ui.Say("Exporting virtual machine...") + ui.Message(fmt.Sprintf("Executing: %s %s", ovftool, strings.Join(args, " "))) + var out bytes.Buffer + cmd := exec.Command(ovftool, args...) + cmd.Stdout = &out + if err := cmd.Run(); err != nil { + err := fmt.Errorf("Error exporting virtual machine: %s\n%s\n", err, out.String()) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + ui.Message(fmt.Sprintf("%s", out.String())) + + state.Put("exportPath", outputPath) + + return multistep.ActionContinue +} + +func (s *StepExport) Cleanup(state multistep.StateBag) {} diff --git a/builder/vmware/iso/step_register.go b/builder/vmware/iso/step_register.go index 6710cbd74..f4c5d776d 100644 --- a/builder/vmware/iso/step_register.go +++ b/builder/vmware/iso/step_register.go @@ -2,6 +2,7 @@ package iso import ( "fmt" + "time" "github.com/mitchellh/multistep" vmwcommon "github.com/mitchellh/packer/builder/vmware/common" @@ -10,6 +11,7 @@ import ( type StepRegister struct { registeredPath string + Format string } func (s *StepRegister) Run(state multistep.StateBag) multistep.StepAction { @@ -41,12 +43,26 @@ func (s *StepRegister) Cleanup(state multistep.StateBag) { ui := state.Get("ui").(packer.Ui) if remoteDriver, ok := driver.(RemoteDriver); ok { - ui.Say("Unregistering virtual machine...") - if err := remoteDriver.Unregister(s.registeredPath); err != nil { - ui.Error(fmt.Sprintf("Error unregistering VM: %s", err)) + if s.Format == "" { + ui.Say("Unregistering virtual machine...") + if err := remoteDriver.Unregister(s.registeredPath); err != nil { + ui.Error(fmt.Sprintf("Error unregistering VM: %s", err)) + } + + s.registeredPath = "" + } else { + ui.Say("Destroying virtual machine...") + if err := remoteDriver.Destroy(); err != nil { + ui.Error(fmt.Sprintf("Error destroying VM: %s", err)) + } + // Wait for the machine to actually destroy + for { + exists, _ := remoteDriver.IsDestroyed() + if !exists { + break + } + time.Sleep(150 * time.Millisecond) + } } - - s.registeredPath = "" } - } diff --git a/post-processor/vsphere/post-processor.go b/post-processor/vsphere/post-processor.go index 39cd8c15b..21ae9490c 100644 --- a/post-processor/vsphere/post-processor.go +++ b/post-processor/vsphere/post-processor.go @@ -15,7 +15,8 @@ import ( ) var builtins = map[string]string{ - "mitchellh.vmware": "vmware", + "mitchellh.vmware": "vmware", + "mitchellh.vmware-esx": "vmware", } type Config struct { @@ -95,16 +96,16 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac return nil, false, fmt.Errorf("Unknown artifact type, can't build box: %s", artifact.BuilderId()) } - vmx := "" + source := "" for _, path := range artifact.Files() { - if strings.HasSuffix(path, ".vmx") { - vmx = path + if strings.HasSuffix(path, ".vmx") || strings.HasSuffix(path, ".ovf") || strings.HasSuffix(path, ".ova") { + source = path break } } - if vmx == "" { - return nil, false, fmt.Errorf("VMX file not found") + if source == "" { + return nil, false, fmt.Errorf("VMX, OVF or OVA file not found") } ovftool_uri := fmt.Sprintf("vi://%s:%s@%s/%s/host/%s", @@ -126,11 +127,11 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac fmt.Sprintf("--diskMode=%s", p.config.DiskMode), fmt.Sprintf("--network=%s", p.config.VMNetwork), fmt.Sprintf("--vmFolder=%s", p.config.VMFolder), - fmt.Sprintf("%s", vmx), + fmt.Sprintf("%s", source), fmt.Sprintf("%s", ovftool_uri), } - ui.Message(fmt.Sprintf("Uploading %s to vSphere", vmx)) + ui.Message(fmt.Sprintf("Uploading %s to vSphere", source)) var out bytes.Buffer log.Printf("Starting ovftool with parameters: %s", strings.Join(args, " ")) cmd := exec.Command("ovftool", args...) diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index 2859cbfa3..8e851dfb9 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -398,6 +398,10 @@ modify as well: - `remote_password` - The SSH password for access to the remote machine. +- `format` (string) - Either "ovf", "ova" or "vmx", this specifies the output + format of the exported virtual machine. This defaults to "ovf". + Before using this option, you need to install `ovftool`. + ### Using a Floppy for Linux kickstart file or preseed Depending on your network configuration, it may be difficult to use packer's From 9d0c443ca25fcb58f77a08e4a0a2980da2f75f82 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Fri, 23 Oct 2015 16:50:14 -0700 Subject: [PATCH 879/956] builder/vmware-esxi: Add format validation and step_export tests --- builder/vmware/iso/builder.go | 7 ++++++ builder/vmware/iso/builder_test.go | 30 +++++++++++++++++++++++ builder/vmware/iso/step_export_test.go | 34 ++++++++++++++++++++++++++ 3 files changed, 71 insertions(+) create mode 100644 builder/vmware/iso/step_export_test.go diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index 617e1579b..4c8e74810 100755 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -202,6 +202,13 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } } + if b.config.Format != "" { + if !(b.config.Format == "ova" || b.config.Format == "ovf" || b.config.Format == "vmx") { + errs = packer.MultiErrorAppend(errs, + fmt.Errorf("format must be one of ova, ovf, or vmx")) + } + } + // Warnings if b.config.ISOChecksumType == "none" { warnings = append(warnings, diff --git a/builder/vmware/iso/builder_test.go b/builder/vmware/iso/builder_test.go index 13a9622f7..ecc012fe0 100644 --- a/builder/vmware/iso/builder_test.go +++ b/builder/vmware/iso/builder_test.go @@ -208,6 +208,36 @@ func TestBuilderPrepare_FloppyFiles(t *testing.T) { } } +func TestBuilderPrepare_Format(t *testing.T) { + var b Builder + config := testConfig() + + // Bad + config["format"] = "foobar" + warns, err := b.Prepare(config) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatal("should have error") + } + + goodFormats := []string{"ova", "ovf", "vmx"} + + for _, format := range goodFormats { + // Good + config["format"] = format + b = Builder{} + warns, err = b.Prepare(config) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + } +} + func TestBuilderPrepare_HTTPPort(t *testing.T) { var b Builder config := testConfig() diff --git a/builder/vmware/iso/step_export_test.go b/builder/vmware/iso/step_export_test.go new file mode 100644 index 000000000..9ea0d64b9 --- /dev/null +++ b/builder/vmware/iso/step_export_test.go @@ -0,0 +1,34 @@ +package iso + +import ( + "github.com/mitchellh/multistep" + "testing" +) + +func TestStepExport_impl(t *testing.T) { + var _ multistep.Step = new(StepExport) +} + +func testStepExport_wrongtype_impl(t *testing.T, remoteType string) { + state := testState(t) + step := new(StepExport) + + var config Config + config.RemoteType = "foo" + state.Put("config", &config) + + if action := step.Run(state); action != multistep.ActionContinue { + t.Fatalf("bad action: %#v", action) + } + if _, ok := state.GetOk("error"); ok { + t.Fatal("should NOT have error") + } + + // Cleanup + step.Cleanup(state) +} + +func TestStepExport_wrongtype_impl(t *testing.T) { + testStepExport_wrongtype_impl(t, "foo") + testStepExport_wrongtype_impl(t, "") +} From 0d2fa223f2315ce80d2f841f25a67cd24971ab0b Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Fri, 23 Oct 2015 16:51:03 -0700 Subject: [PATCH 880/956] builder/vmware-esxi: hide password in ovftool command --- builder/vmware/iso/step_export.go | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/builder/vmware/iso/step_export.go b/builder/vmware/iso/step_export.go index 8c1ed1b93..ed4db4896 100644 --- a/builder/vmware/iso/step_export.go +++ b/builder/vmware/iso/step_export.go @@ -17,6 +17,20 @@ type StepExport struct { Format string } +func (s *StepExport) generateArgs(c *Config, outputPath string, hidePassword bool) []string { + password := url.QueryEscape(c.RemotePassword) + if hidePassword { + password = "****" + } + return []string{ + "--noSSLVerify=true", + "--skipManifestCheck", + "-tt=" + s.Format, + "vi://" + c.RemoteUser + ":" + password + "@" + c.RemoteHost + "/" + c.VMName, + outputPath, + } +} + func (s *StepExport) Run(state multistep.StateBag) multistep.StepAction { c := state.Get("config").(*Config) ui := state.Get("ui").(packer.Ui) @@ -44,18 +58,10 @@ func (s *StepExport) Run(state multistep.StateBag) multistep.StepAction { os.MkdirAll(outputPath, 0755) } - args := []string{ - "--noSSLVerify=true", - "--skipManifestCheck", - "-tt=" + s.Format, - "vi://" + c.RemoteUser + ":" + url.QueryEscape(c.RemotePassword) + "@" + c.RemoteHost + "/" + c.VMName, - outputPath, - } - ui.Say("Exporting virtual machine...") - ui.Message(fmt.Sprintf("Executing: %s %s", ovftool, strings.Join(args, " "))) + ui.Message(fmt.Sprintf("Executing: %s %s", ovftool, strings.Join(s.generateArgs(c, outputPath, true), " "))) var out bytes.Buffer - cmd := exec.Command(ovftool, args...) + cmd := exec.Command(ovftool, s.generateArgs(c, outputPath, false)...) cmd.Stdout = &out if err := cmd.Run(); err != nil { err := fmt.Errorf("Error exporting virtual machine: %s\n%s\n", err, out.String()) From 0b6d303e6582d09b82dd2c099c4e9245a5f1aa1d Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Fri, 23 Oct 2015 17:39:48 -0700 Subject: [PATCH 881/956] Fixes Issue #1059 Adds size output to `ui.Message` as well as if the artifact failed to upload to atlas. --- post-processor/atlas/post-processor.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/post-processor/atlas/post-processor.go b/post-processor/atlas/post-processor.go index be445b238..029fcffe6 100644 --- a/post-processor/atlas/post-processor.go +++ b/post-processor/atlas/post-processor.go @@ -204,7 +204,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac opts.FileSize = r.Size } - ui.Message("Uploading artifact version...") + ui.Message(fmt.Sprintf("Uploading artifact (Size: %v)", opts.FileSize)) var av *atlas.ArtifactVersion doneCh := make(chan struct{}) errCh := make(chan error, 1) @@ -220,7 +220,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac select { case err := <-errCh: - return nil, false, fmt.Errorf("Error uploading: %s", err) + return nil, false, fmt.Errorf("Error uploading (Size: %v): %s", opts.FileSize, err) case <-doneCh: } From 5bfa6ce2b87ba2ea5db5b92fbaa9a66bf9330ab0 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Fri, 23 Oct 2015 18:33:51 -0700 Subject: [PATCH 882/956] post-processor/vagrant: add artifact id tests for AWS and DO --- post-processor/vagrant/aws_test.go | 20 +++++++++++++ post-processor/vagrant/digitalocean_test.go | 32 +++++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/post-processor/vagrant/aws_test.go b/post-processor/vagrant/aws_test.go index b1d44b7e0..7754a48ec 100644 --- a/post-processor/vagrant/aws_test.go +++ b/post-processor/vagrant/aws_test.go @@ -1,7 +1,10 @@ package vagrant import ( + "strings" "testing" + + "github.com/mitchellh/packer/packer" ) func TestAWSProvider_impl(t *testing.T) { @@ -15,3 +18,20 @@ func TestAWSProvider_KeepInputArtifact(t *testing.T) { t.Fatal("should keep input artifact") } } + +func TestAWSProvider_ArtifactId(t *testing.T) { + p := new(AWSProvider) + ui := testUi() + artifact := &packer.MockArtifact{ + IdValue: "us-east-1:ami-1234", + } + + vagrantfile, _, err := p.Process(ui, artifact, "foo") + if err != nil { + t.Fatalf("should not have error: %s", err) + } + result := `aws.region_config "us-east-1", ami: "ami-1234"` + if strings.Index(vagrantfile, result) == -1 { + t.Fatalf("wrong substitution: %s", vagrantfile) + } +} diff --git a/post-processor/vagrant/digitalocean_test.go b/post-processor/vagrant/digitalocean_test.go index 09ca6fdf4..964877c75 100644 --- a/post-processor/vagrant/digitalocean_test.go +++ b/post-processor/vagrant/digitalocean_test.go @@ -1,9 +1,41 @@ package vagrant import ( + "strings" "testing" + + "github.com/mitchellh/packer/packer" ) func TestDigitalOceanProvider_impl(t *testing.T) { var _ Provider = new(DigitalOceanProvider) } + +func TestDigitalOceanProvider_KeepInputArtifact(t *testing.T) { + p := new(DigitalOceanProvider) + + if !p.KeepInputArtifact() { + t.Fatal("should keep input artifact") + } +} + +func TestDigitalOceanProvider_ArtifactId(t *testing.T) { + p := new(DigitalOceanProvider) + ui := testUi() + artifact := &packer.MockArtifact{ + IdValue: "San Francisco:42", + } + + vagrantfile, _, err := p.Process(ui, artifact, "foo") + if err != nil { + t.Fatalf("should not have error: %s", err) + } + image := `digital_ocean.image = "42"` + if strings.Index(vagrantfile, image) == -1 { + t.Fatalf("wrong image substitution: %s", vagrantfile) + } + region := `digital_ocean.region = "San Francisco"` + if strings.Index(vagrantfile, region) == -1 { + t.Fatalf("wrong region substitution: %s", vagrantfile) + } +} From 71ed8e4a38417f3b17f28e911085940b3d7659f4 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Sun, 25 Oct 2015 12:28:06 -0700 Subject: [PATCH 883/956] Fix #2742: Include template line numbers on error --- command/build.go | 6 +-- template/parse.go | 51 +++++++++++++++++++-- template/parse_test.go | 20 ++++++++ template/test-fixtures/error-beginning.json | 1 + template/test-fixtures/error-end.json | 1 + template/test-fixtures/error-middle.json | 7 +++ 6 files changed, 76 insertions(+), 10 deletions(-) create mode 100644 template/test-fixtures/error-beginning.json create mode 100644 template/test-fixtures/error-end.json create mode 100644 template/test-fixtures/error-middle.json diff --git a/command/build.go b/command/build.go index 43345f6e1..acf568b1e 100644 --- a/command/build.go +++ b/command/build.go @@ -39,11 +39,7 @@ func (c BuildCommand) Run(args []string) int { // Parse the template var tpl *template.Template var err error - if args[0] == "-" { - tpl, err = template.Parse(os.Stdin) - } else { - tpl, err = template.ParseFile(args[0]) - } + tpl, err = template.ParseFile(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err)) return 1 diff --git a/template/parse.go b/template/parse.go index e5d7d3dc5..bb17b1bd8 100644 --- a/template/parse.go +++ b/template/parse.go @@ -1,10 +1,12 @@ package template import ( + "bufio" "bytes" "encoding/json" "fmt" "io" + "io/ioutil" "os" "path/filepath" "sort" @@ -309,17 +311,56 @@ func Parse(r io.Reader) (*Template, error) { return rawTpl.Template() } +// Find line number and position based on the offset +func findLinePos(f *os.File, offset int64) (int64, int64, string) { + scanner := bufio.NewScanner(f) + count := int64(0) + for scanner.Scan() { + count += 1 + scanLength := len(scanner.Text()) + 1 + if offset < int64(scanLength) { + return count, offset, scanner.Text() + } + offset = offset - int64(scanLength) + } + if err := scanner.Err(); err != nil { + return 0, 0, err.Error() + } + return 0, 0, "" +} + // ParseFile is the same as Parse but is a helper to automatically open // a file for parsing. func ParseFile(path string) (*Template, error) { - f, err := os.Open(path) - if err != nil { - return nil, err + var f *os.File + var err error + if path == "-" { + // Create a temp file for stdin in case of errors + f, err = ioutil.TempFile(os.TempDir(), "packer") + if err != nil { + return nil, err + } + defer os.Remove(f.Name()) + defer f.Close() + io.Copy(f, os.Stdin) + f.Seek(0, os.SEEK_SET) + } else { + f, err = os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() } - defer f.Close() - tpl, err := Parse(f) if err != nil { + syntaxErr, ok := err.(*json.SyntaxError) + if !ok { + return nil, err + } + // Rewind the file and get a better error + f.Seek(0, os.SEEK_SET) + line, pos, errorLine := findLinePos(f, syntaxErr.Offset) + err = fmt.Errorf("Error in line %d, char %d: %s\n%s", line, pos, syntaxErr, errorLine) return nil, err } diff --git a/template/parse_test.go b/template/parse_test.go index c2fd72742..008266642 100644 --- a/template/parse_test.go +++ b/template/parse_test.go @@ -350,3 +350,23 @@ func TestParse_contents(t *testing.T) { t.Fatalf("bad: %s\n\n%s", actual, expected) } } + +func TestParse_bad(t *testing.T) { + cases := []struct { + File string + Expected string + }{ + {"error-beginning.json", "line 1, char 1"}, + {"error-middle.json", "line 5, char 5"}, + {"error-end.json", "line 1, char 30"}, + } + for _, tc := range cases { + _, err := ParseFile(fixtureDir(tc.File)) + if err == nil { + t.Fatalf("expected error") + } + if !strings.Contains(err.Error(), tc.Expected) { + t.Fatalf("file: %s\nExpected: %s\n%s\n", tc.File, tc.Expected, err.Error()) + } + } +} diff --git a/template/test-fixtures/error-beginning.json b/template/test-fixtures/error-beginning.json new file mode 100644 index 000000000..eb9a1aa6f --- /dev/null +++ b/template/test-fixtures/error-beginning.json @@ -0,0 +1 @@ +*"builders": [ { "type":"test", }]} diff --git a/template/test-fixtures/error-end.json b/template/test-fixtures/error-end.json new file mode 100644 index 000000000..95755d9eb --- /dev/null +++ b/template/test-fixtures/error-end.json @@ -0,0 +1 @@ +{"builders":[{"type":"test"}]* diff --git a/template/test-fixtures/error-middle.json b/template/test-fixtures/error-middle.json new file mode 100644 index 000000000..65cda6ea3 --- /dev/null +++ b/template/test-fixtures/error-middle.json @@ -0,0 +1,7 @@ +{ + "builders": [ + { + "type":"test", + } + ] +} From 0acc75ae49e96bf43ad92bb1acba1248d7f3013e Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Sun, 25 Oct 2015 23:51:20 -0400 Subject: [PATCH 884/956] Use releases to release --- website/Gemfile.lock | 88 ++++++++++--------- website/config.rb | 12 +-- website/source/assets/images/fastly_logo.png | Bin 0 -> 182835 bytes website/source/downloads.html.erb | 39 ++++---- 4 files changed, 74 insertions(+), 65 deletions(-) create mode 100644 website/source/assets/images/fastly_logo.png diff --git a/website/Gemfile.lock b/website/Gemfile.lock index 3895f5bb1..8cdac0cd6 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -1,12 +1,12 @@ GIT remote: git://github.com/hashicorp/middleman-hashicorp.git - revision: 76f0f284ad44cea0457484ea83467192f02daf87 + revision: f21146d03182b7236b85ee8bc430fd613125d5bb specs: - middleman-hashicorp (0.1.0) + middleman-hashicorp (0.2.0) bootstrap-sass (~> 3.3) builder (~> 3.2) less (~> 2.6) - middleman (~> 3.3) + middleman (~> 3.4) middleman-livereload (~> 3.4) middleman-minify-html (~> 3.4) middleman-syntax (~> 2.0) @@ -21,21 +21,25 @@ GIT GEM remote: https://rubygems.org/ specs: - activesupport (4.1.12) - i18n (~> 0.6, >= 0.6.9) + activesupport (4.2.4) + i18n (~> 0.7) json (~> 1.7, >= 1.7.7) minitest (~> 5.1) - thread_safe (~> 0.1) + thread_safe (~> 0.3, >= 0.3.4) tzinfo (~> 1.1) - autoprefixer-rails (5.2.1) + autoprefixer-rails (6.0.3) execjs json bootstrap-sass (3.3.5.1) autoprefixer-rails (>= 5.0.0.1) sass (>= 3.3.0) builder (3.2.2) - celluloid (0.16.0) - timers (~> 4.0.0) + capybara (2.4.4) + mime-types (>= 1.16) + nokogiri (>= 1.3.3) + rack (>= 1.0.0) + rack-test (>= 0.5.4) + xpath (~> 2.0) chunky_png (1.3.4) coffee-script (2.4.1) coffee-script-source @@ -59,55 +63,53 @@ GEM eventmachine (>= 0.12.9) http_parser.rb (~> 0.6.0) erubis (2.7.0) - eventmachine (1.0.7) - execjs (2.5.2) + eventmachine (1.0.8) + execjs (2.6.0) ffi (1.9.10) git-version-bump (0.15.1) - haml (4.0.6) + haml (4.0.7) tilt hike (1.2.3) - hitimes (1.2.2) - hooks (0.4.0) - uber (~> 0.0.4) - htmlbeautifier (1.1.0) + hooks (0.4.1) + uber (~> 0.0.14) + htmlbeautifier (1.1.1) htmlcompressor (0.2.0) http_parser.rb (0.6.0) i18n (0.7.0) json (1.8.3) - kramdown (1.8.0) + kramdown (1.9.0) less (2.6.0) commonjs (~> 0.2.7) - libv8 (3.16.14.11) - listen (2.10.1) - celluloid (~> 0.16.0) + libv8 (3.16.14.13) + listen (3.0.3) rb-fsevent (>= 0.9.3) rb-inotify (>= 0.9) - middleman (3.3.12) + middleman (3.4.0) coffee-script (~> 2.2) compass (>= 1.0.0, < 2.0.0) compass-import-once (= 1.0.5) execjs (~> 2.0) haml (>= 4.0.5) kramdown (~> 1.2) - middleman-core (= 3.3.12) + middleman-core (= 3.4.0) middleman-sprockets (>= 3.1.2) sass (>= 3.4.0, < 4.0) uglifier (~> 2.5) - middleman-breadcrumbs (0.1.0) + middleman-breadcrumbs (0.2.0) middleman (>= 3.3.5) - middleman-core (3.3.12) - activesupport (~> 4.1.0) + middleman-core (3.4.0) + activesupport (~> 4.1) bundler (~> 1.1) + capybara (~> 2.4.4) erubis hooks (~> 0.3) i18n (~> 0.7.0) - listen (>= 2.7.9, < 3.0) + listen (~> 3.0.3) padrino-helpers (~> 0.12.3) rack (>= 1.4.5, < 2.0) - rack-test (~> 0.6.2) thor (>= 0.15.2, < 2.0) tilt (~> 1.4.1, < 2.0) - middleman-livereload (3.4.2) + middleman-livereload (3.4.3) em-websocket (~> 0.5.1) middleman-core (>= 3.3) rack-livereload (~> 0.3.15) @@ -122,8 +124,12 @@ GEM middleman-syntax (2.0.0) middleman-core (~> 3.2) rouge (~> 1.0) - minitest (5.7.0) + mime-types (2.6.2) + mini_portile (0.6.2) + minitest (5.8.1) multi_json (1.11.2) + nokogiri (1.6.6.2) + mini_portile (~> 0.6.0) padrino-helpers (0.12.5) i18n (~> 0.6, >= 0.6.7) padrino-support (= 0.12.5) @@ -131,7 +137,7 @@ GEM padrino-support (0.12.5) activesupport (>= 3.1) rack (1.6.4) - rack-contrib (1.3.0) + rack-contrib (1.4.0) git-version-bump (~> 0.15) rack (~> 1.4) rack-livereload (0.3.16) @@ -139,16 +145,16 @@ GEM rack-protection (1.5.3) rack rack-rewrite (1.5.1) - rack-ssl-enforcer (0.2.8) + rack-ssl-enforcer (0.2.9) rack-test (0.6.3) rack (>= 1.0) - rb-fsevent (0.9.5) + rb-fsevent (0.9.6) rb-inotify (0.9.5) ffi (>= 0.5.0) - redcarpet (3.3.2) + redcarpet (3.3.3) ref (2.0.0) - rouge (1.9.1) - sass (3.4.16) + rouge (1.10.1) + sass (3.4.19) sprockets (2.12.4) hike (~> 1.2) multi_json (~> 1.0) @@ -162,21 +168,21 @@ GEM therubyracer (0.12.2) libv8 (~> 3.16.14.0) ref - thin (1.6.3) + thin (1.6.4) daemons (~> 1.0, >= 1.0.9) - eventmachine (~> 1.0) + eventmachine (~> 1.0, >= 1.0.4) rack (~> 1.0) thor (0.19.1) thread_safe (0.3.5) tilt (1.4.1) - timers (4.0.1) - hitimes tzinfo (1.2.2) thread_safe (~> 0.1) - uber (0.0.13) - uglifier (2.7.1) + uber (0.0.15) + uglifier (2.7.2) execjs (>= 0.3.0) json (>= 1.8.0) + xpath (2.0.0) + nokogiri (~> 1.3) PLATFORMS ruby diff --git a/website/config.rb b/website/config.rb index 80fc3680b..8bd18c385 100644 --- a/website/config.rb +++ b/website/config.rb @@ -1,15 +1,9 @@ -#------------------------------------------------------------------------- -# Configure Middleman -#------------------------------------------------------------------------- - set :base_url, "https://www.packer.io/" activate :breadcrumbs activate :hashicorp do |h| - h.version = ENV["PACKER_VERSION"] - h.bintray_enabled = ENV["BINTRAY_ENABLED"] - h.bintray_repo = "mitchellh/packer" - h.bintray_user = "mitchellh" - h.bintray_key = ENV["BINTRAY_API_KEY"] + h.name = "packer" + h.version = "0.8.6" + h.github_slug = "mitchellh/packer" end diff --git a/website/source/assets/images/fastly_logo.png b/website/source/assets/images/fastly_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..0c4619e964742b43dad160839a7e4f4433b53790 GIT binary patch literal 182835 zcmeFa2~*C=HZ~ zrb`-BD$OZ+_de%bx^<7=f35d@*Sp@e?6u_Fd-r+v^L(Ch@3YT2S5*(~W1F{p9)cij ziu-q~A;^li2*OI9GaG!<6c(`;{5RKT|B*8YBDxy>V{(kxehz%O?Cc($v+CAnXB~{} zOpzVOt&L6j6)la-P1Q_|jyu`DHkCtWyrwAb-l5_6v8!PR6T**N5>Iz;OP!S@GGo@0 zI(9&VsBwnQB>D+_uW@kTFTcJDXlEk+`KH8MX5wG@O&+ii|4MqXn}hgQ^0lkH#J?iT z7R&$T*G+4k|MKhce*#ZN=bwfrqk;T0X*xLm`7j+E|AH1B9RH#;9UTAC5 zB*w_x3;(uiVZ8fq3o%y~{&lHk?_U>F{tY}y4dCz})bx1#GcI~O`2V>R9U%WA7abh` z0>~6NkbeP$9+7_mgdUH70fZipe-+3SJpKg`dO-dK5PCfR1rT~X{soXJc>Mn}fCLm= z19DK?*ep|>Tle{o*$+NTGVmSKH_0;amRwO@=w_+^#C-UoONObub6)q41QotXV!LPe zxw9B3&!GA@ygyd3X*N8{6zZWZKtQU+0{3>ZC*R z83DNSW!&UMki-=kw&sLlH_M&9dU9O#IU#a6&EFHdOio*CmuWT(o_a{IHHWIQ3XaH2(njWdtX}mo5sb{88CpEGw^G?Ak&u@Bz)=*WmhtC0Nt}U2!GVSn}SM@-yFIxyj%IWL@1nk=0cHq5srzf^A3SC98q%E0|vNk&AQ9kN2!97)`Ca zv9v^M>lAkJkK`ZAfaQ_p4@d-WR@vng6;!aU7?JOnO&tDOBx7fv-&!Nx#L*>xZE4B9 z(~Kz~_TrG^ZMTheu7AmMWj00JEOMR9#qwj~`V)@$bfk+lx~AzU><4`qHPzXW;pACK ztgRt2Gm`BumJSbg`S$d7)VF0WuN-qYVV$N^cAUWb+LywUdv~n&xYK%4xF=U4fTd!I zPgAo>dHx45x|4K#=IGc{3c)5UiV-)IUFH#YS|nw|S! z%IFI7eIba<7KR|mlRrP=_9(49(b&PJ!4$s9r`g%R+-(8~_0ZWemA5)baq^1vBu4MP zfFkYL4~_YulergCvFl>%$)i2rhiw6norm*|q%NynA>wx?Ytl)R4#|m|pnT>L+=u$J zEUF8gO!gpDO`yvyiNghNjkov`7;*b@DC^k+UPe*@!y~i3CXu8&Bptg3vW1O}M?Ye? zu5}UJZ+|`|e2Q1`;y7hlqFyN<5ac@MJ1)S&jxX z;xyiA%R541n2g!ubdpO@xf9-sddbqmVgz{XIg~ zzo$q*^=t^Rb+Q+T)BX2a9U^BV18N(Iz_N#6ZGc?rEEDldfSZ^ZG*S&Tbs=D3o5Si} zIPxMDn}>F{VEVPib z4s0T^A=D+3L^?&uwm*Tc;{c&T+#V1qKIO*XU(N(It&>8?nILy-5yZ>znGmh5 zAbGuM+gh*!^mZWBztCk!nO#vLxb2L8lMY~*nGgpxh(kLWLY6stUX2+UnMD{z*56oz zAhPkCh4u>lFUY(|Z>Jd%W)ZS}pgI#*V~rZEN9~|Dh?oT)U`@&H>pe~?xXa8QK*&wQ zmLQF9VqgsLE0naMJ4(7m<}fo-dpnCL@F_x$YkfEiH-{ChV0K0*=b$k$Y+iQ4ObEpa z%_pa>wJ{+%Z;1l>-7ouRAmj!`iKA`(4kjcHq&DO)9DvADo6lJ&9#)RJZC3!y28>#` z&^`d?X>G4h0$~{v;Vej3W9D1qQGNo3RTn`o*Sl-{q_nMkd^Rv)7jdG9AXJ(HASwVE z{&soW_*&Q(W-f+JsO%Q(3xea@c$U+wAYzYI6Cwe#IK_IuM15<&xf?UEO6CA1hEx&c zZmSMJ%|}Uswf`QfH(7ot0Ha2-g?bwafNo?70hW!oepuAEgP=SL-wb_2fggr*gHg6B z1YtidN)UBaAD95&Z3NQ!mq#o@Nc*7uGi+WEAu8K?4XF$l(YhK;iX=$F)A?${_EmNTvZvJPiBVLbB% zl7$aTGdp8&T_(1K4ggnpIRr{@xrm?S5{6?VWHl(i^F~mXa#t`T#r>K@ssH#(;5g?8 zPLr&|XCtZsg!RX9=wRoeK?&6#BqRp(LDq-Z-RED&}` zB741C4iZK7<8Z)^WkAFYeS{*Og`Hy2PL+gaSO&DEnt;Zd>r8+_Ucx-}4niB7wyYwk zMMjGmiQZ3?rBz{>K(Y^IYgYz-wc&q^{qP6`H@X5;xiO5Dirht+^yMr(hIXygCQ7Bt zp=ZXXEUU=Tn1MuV5HiRN8C;^qs*Q?lMJO^#&nt6T#TaxBJHjl-b`5BC5A@|TaWjz$ z=D&9=I)KG9RRChcU&OAWK;5i?#!&7`2oVLTDd0+vJFMrh%d zs9tUw2ee8ao`Fzb|7G%uj%;J|d*m`MLcfNgC!2Z*WXuV|*l7WiuWa;hi6w~gX*A5I zR712H6ate=bnePXWRfow#`EktP~~x?0~8rHDY&Y@v)k*_Rut(ZxB@|xo|Z#FhD1R{ zZXP=m5Is+*WtbVM3%dJO01NBgb%qETU6dd?|6d18!7Y|BPC$Z&P2HeKa;zjwWLi+g zd5RJT`yRFp^;B2faNDCb1sQU<89 zkRHO)sS3`olR4A@p~eJ4d11OWj8bFe8Vv^Fsh2p&W8;1UVvsqA-3i8m+Iw&wV*+q) z1nZU=8yK1cf=I#IsB1TzCu+2p{1>R259U$(EJlvp0b?oB9(er!@(r zwlIeX1;SY&%rN7(2^FmXr`)G2K|~}Q5Ka>Y;8fQTtUrO(Di9BQO9%rzMy|vc&Ol5V zNjx5wmy>15Q}Rr_i}le;|LRbFuVn$t(l(v&q%K&yS#!4Vf_ek7tW?Pp&T{gi{u65s z$dP-_^HQl@O*0jsum|9xrEFbQRo!RV4MLZD#hYTCZ7IL)jUZC@~f&$?DPofD;5G9v_l6#_0WCn2ZOEzgx1O{ab4jDG` zXIMDpZ;Ha-MF35L;( zQ48M>8BD?*>B$p9Y2oyhq79|h*+Ymh+Q}2RT4~Q-U{fV=9^b?5ge428_Ie>=pt+K5E`i|Fw&7%F#5+S56L)VEI_3c)Nd;l{9DfaZQnCppeGN4*Ap>m_4%r#=7nQ<=drHp`=oa5W z=n?2#xd~PvI@-4h*cXD?m0A!PY1M|p8_P~4Lj)?rDX6UuLK(iIGC;j1=Mx$@oJv#P zS5;vV*6`SSab67q_71=V?D!5ib#gXQ?nU63w1%dLV!*Gi?f{DUON22A;|Wek7DO$70gRquBcmf8**X_No{bZNB?2s7 z)ep`##0ee88U!Np&V$N%L1>v~L=;01w4)uo40UNMk+1gpL9q5gun5ZriXOy)2j(E# zSfhgg42CTirmAAw)e!eELIsusli@V9=r{y|gDR2ZlZT;vze1PZYF&+{z3<-fb80>a4D<)>!5(L@#6D)@?Y*Z6~7QZ`0 z>pr1s_#j)O@Xue^l7QoF2Y-+h!ertKL(!KW&;LEzvY^0hFbn6`lDeiyWTQS1_FS0g zt_-RZR6*qOKb?HnFoXas6v684RS@_?)*)2#BtrH#2?$ZoKs43W)veny!IBZb2BGVq z9Su;}dE<-@ZXzJ)3g@g};$Y0uASwzesITUq0|s#kreKE6W+qfm5{!_OlkDNVZz5~?044do?sGmxheN_R# zjN1*kX4aR6LPZF>EYyz*3cE9iZ4BED7-Ba`N)4WKTLC!Q2@Q;4lf4buQUMbMQuIp; zZocCTdx2}lDEs9o`-E}wC5rtA;Pne&Im@t7=0Z^2;PSyvK|=On?bMeFD*C@D@jFTg zO(-x9GHg_M&7FTWFlLbD=M3twHD6?9*bzZc=AGb*)13E2^I;n}oxACeS|AvmF{R*F z(B4h|9^g+x=Jin&2vMFJ#MF$&g2L$F2#w!|?S$~-nVGQ}8$(eA+h%hwuz7!}vVM%1T z3`&L(Vj(g*KUhAF*8UZu}%Y$#+?Xau}y zd?B3k8E%2BdqR^Vt~*N500hUIbA~9cxu?3lPxK@mvIhQlhE8N2c)8jb@!DYVRYS9O`_0@>$zut0IT(simp;8rIwD z0{y!qKINq)HMu{jY!6XnOpfd;1RVH@y;=eJ&7`aqnm>|EIgz=n*dfB%_- z?gUj6(smdakZqk8LY`kbzu(L4Q0xz(rHL`y&J;ZnK6H#vX-K-+A?)O#obQei2H&Y* zMV(=b-v=!PDl0!mjCtcToj`vL`_+rA8sd*z7vxsAe$a@|oWET=xlNvcfX*$bA#&3< z5!ND;J=@o5p68rfpy&`;VtYh5DM2S$`@nLB10#m=09t;lm^p~-rv)CLbAQGgvUZno z&2q`3|L4>X61$vAYD|4B<%T|9i`^eGZP|jw%?M>GG!fd`Crdl%Nwo77KAT4`kzwxHk%D|Dw&kRw}x+c70bN*Vph_P>{~03 zvk9`_Rjn>)P~j=|NZ3Ue7~Hvv{oT*?m;EDG zB?J}W;7+i^hjF7K5V(qOqaR|HAU8q@<)kn{)}nw2Jn3RH2riQ#h$ni#*0s*eyColq z>TX)6woNUMHxbNI5K18`;fwDgEswv*sc?F#WzSZ{nXM`fEtOFq!>9z9)p7-}o_FJWxC_y;~vQEEYtRX-~ zj)r}1%ugGGQo??&pdW&iXCBBJFeB>ZsDL_D+Xs$JV(?3@z`2fGdClM07h8?Wcvfb; ze>%ZsDFo3!1j10qW*H!(P9PCaRC@qv_#r-JWI^}ZmdDbB`tHQ|D*Xt`7sf3K1OCZR zn~jo;dw|>BLO}>XD9zIA{X>Wuhrt%1+A6^qz_4B0h4yGmR2R)7B=Z8^qvx9?zda{; z1}Qq7sN_XnKrjclNHLKDe%h=cWc}JcbWEe5zbU2}eGZgk?F&Mbnis+40ol`GcEpd2 z;=YHXWJK)ji_zOEWe-A@7&gNhs6nzkI*Pgs!#->Iq2O8KOq4H)2O0JNUy8HAC{-Ra z7g<|#kEoO3a&)wbObH^_rEEs|=+066i-QDspmLuNA=FhQYWOw==0rJ${1vs+`oHkH zl@AKrTc}(dAp=y=>r>aCI@%kcH^WeFy?+^_I-)`vbzFCQm~VcWNYwY3A>8l9;HRmL zoDmWLzN3035X&G4VZRrI7;ZvQn@R%xym&DvihsOOM#Qh*RWc{aljM_V(67&uCd7RJ z!+n4ev*<_xm`jHIg<;gOZBYAX$Sgk@)f<1yyCXz=7&@b}rhd3ih>J0*xi@Bc|FZ|N zeb8uZBMcqSQO^k|%^(Ei3VLs^c+lYg=&@O><((dpy!@ysS7cig6G2Tm&5GERWg%pw z&cASqMlNVc8h;TO#2NAr*Ue0n=>I_p`9EOBnHX3!o(1~ZeNA9!B;d;h#3lrOC5xl- zmLw_@p07k0?+2c}UvnQ-sk&hu9;*2$xV1bu%Q1v`#|rz7IFmBSr}g|3m`v zUxSWB*o>erlg%QIZrw=K=lTC_`b3nGIhetA5LW=-iE|C0ik(kHp7i&pAU;Eb1JTI~ z_Cmurp-JpZj{gUWC8GX+q7|ZZ|DTXLp@T3xFF;ErqQNnbBKj3KXQH3~zm}SaFHF}& zzy1?>Dl%Bmn))s6^FJ-qkf8mK23gvNY5)Jf9>k`Nh=1{a&#>RH;2BBog#1zMr!K#~ zsxw_IDVWe>G~7OAJk3&>*896T4!ryon%QQqu(bEs_VLWEl8jDy+5Mbw#sm8ACZ6kn zUoji(<#KU2Qz!da+AwXYgLTi7yF)Wqne&@cUx%-|$o_QI!8uKAPwy~=TwwjrW$5;{ zEf;;Hg!SKA+vz!@gD5+bPyaPPy+V@+BkACG|4yEU%s;@U zLh{c@rXqs;Gm@#0{4X^8wYl4+3qizHJa`DY|k5&0KMrXlkGTO;{pLN}#o zi%2RCGRko)Ov)b|b~}FO)AkspPRWUooChZ}f1RbT`6&hHlJ##aZXP!9EiuyLl^cI& z^7hoNoRQj&|6XJ$+@5f-9!-xj`YD{DJwY}99@dc8`{c^xhj;=C=IFI1loT~&P%~>; z#&tzx{FMFMv~&q@6WQ^wG5=3aUESh#4kP_a-VF<%X^uDzSpN)i|D`|nws5{r(SjzW zw|PAZ=A&Ko%lIU#KWkDw){^ntOb|z|bg;Xk_wbL{dLGx%FyTUlOY8o7x!zHv*kSCM ziG$eqF&Ub;XTwfPA(_gNmCeTfA$f*_DEy~;v9p0J^J^Y%%&zoDSIim0; z)2*=SX`v@Cdv{H0o#(>*#1kLOrtGg2y-2qXX;TFldK6TW_Ta08k({i40q5s!S9;|= zB+|H^4L{9uKBwnOe;tZC6TL3+8~?BeNyXfh97PA2lRm>q7iD>mW)~JrWTw_Ru#^i{ zmPRG}e9K$jxG^w8TL1Ss@PJ&7uGs$m&h<+E2LHmLcZ*&zD?EFjT%9^5<}qycHm}|2 zT+X2}IZEcnz;mb4es@O?dgvF8M?|(?`BIcFzKS|C-XKffJu%42ZH1P<)yw2Y(T2pz z#wE&i%huCTpukJrlfFsni0d<}P4rAxZ=^?dV+Y*@Z-(y72+Uel?F`V3eUPsS5zBi1hskH+R#GL4AzSG0edU!GH0%2za$ z{ov%bhjea$l-;7!%~1e}jM25`BB{%LT5FkmPc|;EbJ;&OF~j8~I{a$-;VtNgzc!%h zwDJY1k@I!X(B-d4aQxs?1^31Edip9BaOBxqsqg!ePi{a2QbT zCnq5>(Hak?ktGt3ow+~eC?!c&_QRo6T}{z|hoNw$J$Yz&H4RPb`aX^r*Uy-P;<+69;d}TH(6b; zg2K6cgpKbJj~ z=k`Lj|FkyRr57qQu(-M99E$n@^K72Cde$^Xw57aE=IAVnZmfovE|JjAIJ;#pPEjfc zsp&BYMNE&ctFrTS{paB&67kwB7vR>vKTUQ%4Y!_0NyVMXJ}pHJt6o#K=tK+(%GbzW zKCh7^3fx)%oI!mzpt#x z`-(Nl^KF*AV76$^R`RS=xYTAO@A(emYW@%mE3VJn_$Xa^3D}IXeO^~lkND@|rFCiN z_Ec8yolQqMjJ94*z0Ur?$yVgxEGnP6M$akdd{@@>(K7LtZ_`4+=0OCB64=u(otnN0 zkUXNU10Hy+%a>_g*s*h7=jHF)u9rD1rz3mQ2lq+J#vie{0Nn%mo0gDejzmc98T`B> zZ7_|8G#{M6a|c+sk^4$neXgaY%;f&e{inRT8X|^g*}BXz(XkHFy4GoVot2Jqqd!iW zWM`vlcXYlwlOxyn(E%l0z_mixL+5Q^lOTfFeU0mLuKhJ9aTYTo7WB+HGj*85*1O62 ztkJmJ>TZcd?G;>fRL6PY1#Qr%##dXJTmwg3$D|TQT*tYGcI3GSLEB;xiQ-w+kfF8|!wsF#)#V^!yFh&whn}NxzB& zU-}yUA?GaRw0!*76D|;?p#$4MAK^}7wtkllR7JgWwUgeNIB1uR%t7_gnY>+SOE>$0 z*|L`3oyMgTM&u?$x-L2ozktm9uRk`w@zT#h>g5;&S@${piT*hIqZL{qLm)kVd;*#I z-DJ_nP=k+@*T7X453vX)9h^yxFJk^)@Ip#%;^mmT=XdcBW5Dwvl;r;R+AW)|)%@42 z8#D*8FknSGc?q_39NTo@0h)ZLQoF^zc3=NzH=qQrPU(aKj~Y+M zD_uX6K}_kn4(<9-Q?HY|n~0;40797+_!+8wzpV?m7MIzDJfNi)boB-$2EFnPPupqW zD`O-lG+J(yaBwD>>pAoBf)~E*K0mhsg9`Z$1^d22q0+!pB0#37KM_2Mi@K*XE&olU zlKf?fXl+lT5S~v5P976WV2+V8CR_b~A zd06n+{=<3V9)nvSLZA8F__=7Ts{{LF9kXftxEpz8C=s7z%a*iD1u(CCI1hv6Wx(bB zU;D$#nUE1V@3iXF;ZwkK*T>4lLnDd)O%Wc9%)w(A>D<@wFUs5;O8WQaAYttmItK5& zk`B9ja=u?&1=uCKTaU!Ayy#?W`5$=VEoHrqXHTKCj}c#W-8_0*?&YOfhOnA!Pq{d+ zUsw2XkT-3kByob3SuA#cIBlseY17#SmX`H|w?K-G;K62!j#)UVIKd|oGs-Q=jX zIlbO;eew9xYjmj}k+oG=MwBhcaSj=D&>?>p6H`!eeOc%ki&m0y?3Ikq5SZ2T}~Ei<%KSt z&!I-i%+4~`wmtK^`4hBFZ*@6yWrj! zKyCqU6S)CCVB}3RAiJ`T8J9EidnjZ%`@KsK3JYlXf$}Od8o&U9=sP8Ggl5Th} zz~i9LoHAyzd)Mrah8NPR<@=HVEuL#~{pXBV1j}q%2sYETKl--O;o_jyVdg=%OPx;E ze`JBfp58eC{sY=H*W>sRb$I)MeAcHJC+|UP6!~q!p&NOsy#|o;(7N{K;PcCEn9{* z*O^xJ=7wGcA8)uVjd$RxGt65QDj zzcJFWNUdu0LA{RV}&*F-(6Ui@e^Ofop_2YofP0=epb7EabNU!OW#lX63uIpM3 zJ&ikY!TIg8!Q7I`Od`2rt1w(`E}c_Q6qVY%5OLHt70DO%JvC6%CDTx3XC@3z$c>ac zsGrysl^j@~}Ox^0HqDs{~Yx-S$y~A^`G)G?jn0KK3q1$aaX*Oi!X7O_Q zGO00%G36?fJYNO~Exr5tTt8fOUgx?@N*vVgwaYUC(|(>gnfZ3-;DH_>$P}{Dm$2yt zNxN_>;t3xNZ@T(pi{0oNCtyg)HO0Da$2QcAh<&hjtpK~-DCJ+WYy%Uj zUTp^6t~sE4y)HB-tUm6=RO}Yk#nQ0K8`f z2;7wK#gfMBI`VoqTRZ`IyfdjlXH$tCb73@t-3N%)6w2fx2XK-i)UklX+Onz8aN%S(59Zq&ZLB>cfv! zJ2dW>U43G^M1FksxA-*z+tK?rz_vx32Ikk^RSLJeE*wX=JzEP#GfSTzp|(}|eQmf3 z@}i;KXBV5$k1n51odgW)hnzd}A>@6~pwf?29cvf6iQXObvWKbT!G>8=DSyAEQz*-A zCd2wbmLp=#r+Zzxlr82D@)iNr+XQ?b>)DXr;?(vI_(|i|p4P~1FJ;_ceuzxe>G|^O zQy$%YKjf6(WYv^HBQs=^ocKpIKr&xk`hl8pPgZg~_~WL$ZO}V;JVvVZp6d!a6av|H zmu~r$9n|VNv9Dv^V78O{G=3=o;GPBZ((noO*Xt9jv# z{M?}z{=W(Fp-fVdPzrxT8A9y6}u{ zNcUM?zGen8POhv{$aP*}?j2@qeW;r+!x+TI_RJBFZQkhzzO78@lLJF}9&zp)yE!5{ zB5!}$IY#8Yix|lMe;V~Bjp5L*y!))Y`j@_LeuYp7+-mt| zYZ+gVSy=scIWK4j?w`lH1c1W2J()l*wrec4)Q%n2>o_o|rWZfFgU+WYsx&6$V+B=A zMAY;`9X2ck&$m`KwhFu|`MNwsX5BD{0~Fs;zPKM|gFyQ;mFG>yLlwl{yKsUu@~2g3 z<2NxGa#*}Jk2%i`@SFvvL1Sk>92`!zII}lvTbBC>KPZ~W4XUBQjk;c)d}A+Dn)#CN zYrk6FumD{R^J6EtV+gj~2fB9Zftd$+L5cn&#z|)o$}93mUaF~3Ij{`<4{KT<2dP9j zE8l9APLFWbLnl4Ixf_GGQNeaD-cRoO`@3xkYI+BmkqU>>i?K4mk6NK0Swe&FHM!dc zUb=ivty=1nvkurCdr_2HUT1es+i=yA4#5bz#7&W;nTL6-pcrZ#Y>46RwGsd^ zr}iiyT^<*pC%*TXQ{x7E_?$n4r7LZ0=x{9E>_^TUx7-n?^e-plP2UKt2O1k0QMCxZ z<#si1YnFSh7ch11i`|Kiz8`Pf0t1owaVn;fky+&L#({G6)a^TB;oToNBMn5G*3+mw0=}#b(ek@g}V}s(8WYK>m_GEI3Xu(z3#Na zJi2xK(LrD=vO?bzQulv5|2+oe5hVXW4BbY?-aRyw`KoTWrT8g2fm+vRL?bfu#!p++ zP+&ZIFp`ngo^NRew26!u9YB|DXgVkRfZNO92MX6&pH)f5(4TuxmzqRpN|dNvb}k0> z7y15`o`6)HpfCAA4nr<_5h(lD;~{Y!w63nI%1wDWt@ZXGb_xL}3-|e)V;cBz?DGnc z50KpD^r{tKgtN~Nb>V1Q+8k9=`g}`Q6e@cAy%DV=kwayU0-%H-=c&=_ljQ&o-9s%U z{YOKqzNmu{x-M@5g=e_T=ejQf)mzQh!V~qUo(O$Db~b03KZsyRa*p-) zJ!&BSRG6a>YcFZyE2(N+q4Ee-2IxU+y<35TC>sy^L2GuIkTzHYx!d&s!&h~mf55xJH$ah3MuR5uL6DKA}f2?t@y zO;y{^VX0(KtXbKoBemc0w&9=8&=B9a$XGeO4qu4fTNAfLkAUqVm<1u z6Z8nYU4g2)z7lYmujjfeIAW-K3VYfCgk5xVrhGEiU+_K;RFKITy?Hy(6 zTykg3lS2RuoWCZ2g%)@m-Bh3|_yEe>A>BKErdlcd9MjkYNQk#v|I~yqi)$(0Yt#&$ zUz1XUO0V*Hn-TA+#gccX3QsBk(S~Zcz69_}zBn;t&AZ*oOAVT~_|>fIM>aqiKIa&I zbmcqSL>GjiC<8Zv=(AOhKp(x}ml z0IN&pD((X$$kuOtLtx0KW(5jdcd*SL7aTfp56H-KqnmeB3&c74RtT~jIV4svEFxn~ z&x5+sU?oU?WL^4~J@KGqI0URSVzYacMe%^!jXb$5cSlYDB&&LGXIqN2gyb}PVukjk zf9WwOcMz&z)on53ggg6un2_3p60^_aqv{qV0}59FtRAb&`bu=Hr`;DXeiYcnh$tq? zvG#5r>DTf=iAkMXTQ`(FRKC@(3uPGy$~jIi20|_bcfyjzVqv9sQ>Mo(P)-gEgXAXb z+MQ{u;PgW^D}V2K{R`pGuGuhwKsJ7?E9=yb5vd5;4+f!Yh9dyeYxMxsK=*R=DV|+w z+M$_UFD||W<3Bz5rcO4_O)e_5e*0(lLE9rPvBik<+-kKjgV%7qv~gGU+sN%vZtD!(wT z0)4_+vR%A2-(-PI!%YU@@{IMg@}O1w8c;nre-_TaHn?}1x;+&EG1B?_!bu;ZZbN2& zMEbcdJr(kC5$MVt7!psKAsztx~^^) z34BN4w{H=M`{D*%uyB*N6IWZqZ*Ux!*_P?Qa9%ytiOIIA4_JD=enko=-Me}g&>-(p zRkG^dS}g(R9#rx%>BVhL?ow?{cswHWRsKx0stie;DiZfxFlX8pj)pSspl*dnczwOv zZbj;q{@xBC&}wj-{D^|BpOu?-O>l%cpikepxd4fBRV4)_OU1@LZz?~_p;47`qUyGP ziq`88P;oV%UEVKUR^(j^&1e*E$4p>XF*#hOT+;S z7`o7W4=9}4+{EF2{Y+nhy51&`HOH^Z%E&mB$lRu92q;txu%sdc!y_`1-OX-_7A?q# znDW9`0>q(eTQHEm$lZ+b<&{h1P8#xLf1^f5zN&jLRulZ&-#)qqqZr|qWt*vM& z0Du48#(nV-6x0FdLq^SKuqf7>mpvoT^3B0%a=2clozTH6rC(H!@l58!1h<9-4}JSp zb_*-7qXi9$oWnSA1}|pkv|6f#Pjv-~Hjg)YzF{t$5L8R}W2Qyj_x?6lO5=2it!=@C z6Ew0dD6viQROSt!r#pJ+_N`qHUI{_psM~1dk6OI$*XazPiTe=buBt{E1Ca9Q8@UI4 zr~rrnyDeVF>11MNMcqOK4>U5^R;w!IUrE2Z#TQFZ<@cxE^-XS5!86M7sxnKv@>u}C-v@C!!3a)0H_|!W@$L0X{bSP7RKeIGjhqR zvFRw!4KP2BfB6Vwn9QZ7r2pL6pTQ72Z|p(O7k7njqYR4`&?xge<_$1Vl>b>=QAWUb zi<}SA6tx3Cn8Zr0uL;HqtGbBO@>6%v$saK4XVaNTR@x_kp2;0b&owBm*G(6)t0Dz6 zMvsSJ8k?>N=j5Q(UI5looepvZeXAA3waI)KXw#lc60caqlrPXtX5d*#_!N{>{{D7U ziWW_shFsH`?hVA1GoT|fDp0+)6%Q9(|INVMeG(V?b-4Tdi1Ew4F(``LwBf$#WRp2w zVpz2MgGq4pT67WtNS)g=o!c=xTpPclf!q8H#Hl6kYB--Zor$5(I-ouPZA@E9!5dh$ zKW5cA8qhYTB_d>~Wk22@ERJ~%D$tmI3A_gPAe0>T$1n^xq+~v5NcL~iK~x8=R%*He z2QFjbhb#}4u}m6@f)Ah$OFpJPEtHVLc&?&!sP^w~Yp1v=(=2XQ`WL78>B?^R#;q)N(U`6J$M{T2P0#;jHA|gjRLz4LY3%uvo$rb4n3~c+#H= z2izka^Sg~c8{EOIX(-$;_X>j=jP)t*k#0w!uP;8c9%FduzMq`W;Peumm1Y2ZRq_X1 zxz23(-5peJ8>f~;h{qRPM@>n7xz`3y;TeY#3S>{|HeQQG&`InY?#DIN`z@R0YK;V3 zNCM5U92EExllq~D;+qd(=_da$P?Ow#R`@N6qw01|is-ai9%aEw+#Wk&>03t&u=1VV z@zZO6iYiS_VefCtq$FbGDEaTfVoCMwsf-xD!j&|B9olYA04$6^tIdRopONu;bm72E zB>1^5>^zSHLvlY&)7Ydj*vYup6~Jhbr^G9OT1ogii`ShxlG9AUW)6;F1zZ)iG#16G z7Loy|b5g9PO>-#sWpU$u$#SJFB|{MpSMu)~43eh9xC%Z|3AAhaB5;M&8J~AmaG_a< zR`N{Zo(Fq$6L<;{j03;!$dNETO`JA1GoI8BJ)~CP6$1A|m^ghn4Dbk(A0sGsXWQwi z{n@Lq_)JcT0RhSK&P%Xr3|jmBZr-u$(-MlT24*ni!(a*%kwLPC>pY-=iHF>E`*qXI zmmOcUHrx!VO`VCW_waHn^yBkj%*WBwu|MQp=Y-K(h*|+tP&LhV!;_%B`A;(%Y6ZRk z#;&U!o_QT#iXrENX`@NPblkb|a4M_OaIw&brlg;lSMOaxMV`!w1el4y&peMn&0L-I z`eGsa>j3cam8aHD6Y~YWSZ!mI{nDceEAq(spiX$64d10l-%RXqDOI5lOVcXE@Sf26 zQ&X!qN;JL#%f55CqF0)h5oa4OKC@wJphI4?VlF`e@U^Az()?id13%X#oob8dxd8Tu zK2i&Wh6>c31Fm8&$Ne94x#^S5Yj~w?m<&~EO{4B%dZ*rUD%;}5cqaFV_RGC$FM6;j^UQFMp zXYjQ4V<#S-JRg7zWlu{vzryldUMp`rRfFW3W^u>|Mi!*aloXc~GBY$=P&Vh;vK`-j z!lReuwqAaN-r$iCdh7vPojo0ub3h z1S>^kGdK%3B;gONuGpl6RsWM@q;!_;MnCe<{N|$&fn?cdu(My@G;VK87v;W=vW*t@oTKUNB*RmhcJo^Aq`G$v@4{ZY6`T(}#r0Vc6amceIgJO{mu zf@|z)_zvlQ`3e^4r?kfIqiB$$tkRg}9Q5qzQ}p*V@ZRRzHPfHqNU^+}=_}YTwV%qq% zJE!HlDWx7`W5X2`@43vgQx;lW*Qzr>rq zj_FV8Wrq*q1nsPjm(dX##V`>pIKdxi%`*b0xB9q1#|x$p?~f0^3U{X7Y|g?pN%N=299tSWMVRiE(1e$gobD~( z@vci_<_$ttK&&m4!;9ZjTtFU@-(iyUBxF&5hFErvggIa1PFsTcANUp?DXY>1$o~flE zp5(gS!R&6*DZw{zM8SI`g_Dvn9g-dHO|L!<;kyAgt7?W1d*YgJXk{=yo*3Eg7<0F! z-^GT^Re`DQGIrXe@u_Jz1{Yysl#@S+{zl0u-94!jb{K_2zg(cGLW~m&3=~g|8gOA} zYF~0CUR1Hsj9DQ;xZ+F*yktq&Ea2mrLEBsi3Ii;E^cqdK5t2MM#~=qPNyN{i*`5*f zr0H`mvNP^UQa}N%_V_dSNA7~>>rQFm3b^pf5!_}@=kfB{_2YDlQiYd~K@0bQDgGB8 zKU@#N(qlTPzv-50c#h(U3~`ZIJk6h~@NFm2pn4NeQVtEp<~N37k<-=cjGL9zEwt#O zDM0@b_Y0?E;APcQS_i&XTD9*;xXdEV-MdN|NQS4r@Yyiy(ux-ssU_2y#bb|b3TL4l zt&*htdc<8kBQJECVTHS4*iz;~Y=4Sk{#2X8KntOG&#O=r5qwt6g6HkYVp-t3I)X`F z_rq$|s;FS4WU|3}55-T*;U!?YR^^BJmh2|nVr*>Gr-dKVc|80y>$NV!3rF_7`0kNO ztUQWdMt(KgSbU`HVBl4Q_@v?nf020Esr494vQHSm)QH1$ z0jB>y;oK9rIiq+LErZ1?8xk$}Ml-OgAGo7>roWNIt%L_({+lE`ast7L@vrxAqGc~~ zp%Mu0*?5GR)V(SU$BGQILldsieUj5{YI9*0>Xn8CYOKw<_@W#6Vc-=6v1pSnAAI;y zT1BoWGSGjV`?N#N%U?5L81-No7y04zg5`%)OaI<{R(eB?UX5{@yl=fnACj#}^C>En zo_ujkU+!*$rJM73Slz{MR+W(|vhE5pMV+(U!_pPrw}H@~+xzKpbkfB0=5pT=w*I3d zQy%h26~VROme6;(b!j; ztXfQe9j-zbiztRFYOLPBGXqb9#!RGl?eHEanfIrQS$x0P?k{&epEq_&w5X!|)Oh7C zpB|35tf&+8*KfmL;7suXXDoCoz%$t)xa2e5kuMCM#9b_3+wFG6BQiGMO~!{;zhJN? zihJ$$hLuY>F1z@YjEF{HlUbS)Z5%P~sCU#@TXtCB(?-ho2=3x{cffnV+LL>Vb{%Pq zRP|#~DQ@Bl;>fKLo>19ekUds!8R0^sg8k12Hw=NMaDQungD-8`EPEB0c>B$|2OEkv za<)Ew+R~HM8P;v3@8afVq}CujvHkUvZ29=eM>goWzbS^}no>MpuNWAH%z4|Y> zYWGYeZU2$L%=Nqn`rWR~{1~<2d2UR%Rj5m&im8j~>&B6{V@-W-DuKUQf(ImZJc@Aa zmpIt>$1+L=Nv@XsgjL4X)-ZZ3XKPb&@Qw;K9A_llgA5lVMkBS z!pAlwY@%LO1jtm?p0{muPHym*YdC*M@3&+?<^V;Ykkg+6W^-fu?}2K}FM3ODZ+8Z6&w z87f@@?4FhSE>a%LQ{+VQ*GO^-ic;`1=3ZK`w6mdS{*<{Eevz@)i*r}OlWo1p_}WeM zbznNO_v=32m+jHzK?;hKuA|J1r*Q5X)pyew*G-hUjU^cxM`!Vr?bz_xMipB|swxGr z1XB!(j@dRI;6(WD80?Z!kqB@#Y}%L3L4QfZT1WbiP!KVl6NZ0GPZeP3*u_2i{H5w;3BO^}m^2*RXf0^n>THNp7|f%t&}~eZm9Qub>EY zeBs`AZOPQN9a@%ekGB65ehlT|#BJbtj?aeWB;f`|x>jn3kFd5#yqvbMS|J2A@+z#Q zoMoC6rTD5@_L?G9-sG_ytQHNo1)gTpBVF!_S~A@bNCo~jllE(zyS68#<7eCw-Gh*Z zM9NPmHyoRWZ5EELr*PlVD;TnYf<@wfGmsBqPHXWR>(3~GJy@-Mc88ZyU=0^m0g_e& z7ofTWT-By7mVLvrB)J;aRJ!=8$nsM{GstDxrIITd_(j@FY)acswhs-(0(5xFS^=c} zQBO-bG7Y6~I7)Cq^$)Bk0IX-rGCwDhdOU^xaE~5+Y&$p;9;9h{-Ir5!z^B9~5^-H` zZ7HD5u796M*4ldF`MkmSQXBK>?<1>L9aYNstux2>2!KN?U>i;G`2@;`_}r($0+q z!?aQX+LCm*)LsGP#PrP(E7A9*AmnN^%?n`F`U%gDkK2)N_+sB5y5SG8SYI&@F3i#* zMI7rMq(XPPbO*6Hhh@sEGpn$|=7jJ4IfyRpD&TLrBz!$L{5xEj%EzaEy5<5i$kn5M z=Au(Bx{b|dwZ5)fXT_E!_*>_5`Cv$nA;c3D9 z$oLq{32joAE<%o88jIwU-h`h1=TC1Sc0c2#J9TNobzC_02g@F?ENC)0vXQF-DY(r{xpW%o8#DAsw}bFhB0r*35305FLX+9>Jng&1X`z) zg{n=Dy+(;655@g~Dl{uOT&79tg8`!F7j;4-Spd26;$vH12NXGE={y<9nQ9f8DKlr>W7ioRKDGdtQR2%LyMhn zULXPRMg)0F`re6h_sAnVzDSpr3@Kw?${lifCUReH^Uvg4OyqfIO)Y^o$lhm#POXCk zeShT4)aY>J?N4Pz?$;~luD-g}QjmIZ>%>HKmE{rjWymu}g=5;J>*Kr#xBRXF^4n8C z?w_gLxh6|_LY0Z?|3svQNpBev+vbMNqh7pbzJ#RIA3#WKfo_h0j*cKci@r}h+Ajue zAbf92sC~i=sbXLy-#7%3olbU^Oug2_dLi>P9Fy#Mbbcw%C975^ZbZWM-S0qX$;(@$ z6N=BlcVdEeP06ZKZ5COzJJ78b*A)2H{^SoMgAAbNoktWA3m3{w}a8T@yBXCkTz&4_%@E9NVv;LW{NrMNn8Ll-jt zGrVhxAIlt?@=J8-=Td%I%)hS=S=ny)Ma{l5h*LI)Rj29_I}>4$!Re|CbmPA(|koOu$s zRMq|QqoKavycZ$&&n`kpteDHHdLP0fS$Mz!L^ZuFl5gv8hvL5heBIPV1A~s*wh5U? zy*?9Ja1nE`;A2*mv8QWr2Yb>YU8;#hH|qYb{N}v&E@U#!I`@JXiM8FxL=qZS-Yy4> zLIU$Q$`NqJT6SH`X|@CZjQ)5({*QZ-_PHj1q#h`9c->;=J=Zi;0@61^l$He&Ax$1tt!?aIeiK-v8$&@}Xf@zW3CF#|zX)l2>CMemM48F>DFpfK^Y573(( zccE?TjwJDOEI0xde=ubf67$>;H|b3>im?(&XnKeDbx93`zmlcpW!MdQab34 zH>$wtHOQP6b*nq6Lx>G8*mbZo%lgMB#o5S}78$hH$cZfF)W-=R^8UAt{aIazf)^HS;_C+oa7VIz>MT@AlTW}nUQpt zWj8a!#!sm>4y8?;6>qG~but85EmjQZ8Q;lQ~_k@4UD2Li-%yef+Zj9o{E?cBX$B zQcNl%7n3${g2#?+TYu4|;=likt1FM^DtrIuLzrS{vsAW`lB^$1QHZ8dgcdCr+b6g?IQ>e*=o@c*Qddp+JJW(j(=!WCI8dZ1FAH#?KBF==ZvWfM27Unw(|&EU5T#Ohu>7oGQzPPo(y2LG!iiEhMpA>-})z)qJP zP0&@Yv~m`ytW6!bU2xv#v`lvXby^0H6|HH{Uyhaus4Vpw02^v zJnt6oXf%8hAv6;-G&mzNl0{uN691fTBg6Y79tVXqu^o|N*+FwLXAq+i16l1hLXj6g zjDb62DE{l$ZZ=Pn_`(CV!yQj4DLDZslH0oBa?`)%1BrU43b^VF5! zPJF0Nwy{)x5Pa(AsiDeYVLUowJ?S01{fH_!;?cLH6xF3Fg;jqWxC%AzPdX({Ui;31J{`S;cbtt@3=3lnOFC~u#OFq55#zz}V8yiK z&*BwB*v)lte_-Xu&Sry=^qpyXD&+AKPMmzitFWO-5eYp-?6FP250`RCKmB8!V8ZqIo1=mc2YHZRTJ1zmxb zT^&#rItx}yKk8Coc*h*St+y@Q%`3ZPFPIH-2|c`8|Bv3xkCr=-{x~i3w$rD6NrDC3 z8PeQ%KQp-0Pt${_E@dm2yANfCCPav|=`3-IvV?m?Q2+9cYK$Hc&H31N@IwrxXmFKk z)by}$bS_fsul3x747biGKh$1=Sd#QhmVsjR2YNGugXPoK*omDGUot(B9Q>t7XTtHi zt)AHLc(in%f!j9s@K5CP<~7?5aNvWlUn0N0#@(KeIqv*a3dW zg<3t%B-LR`|?QD zZ?Lt1)%yR1Zb`dm>@6YGf2=rjW78@|O zyy;;u^Aewb|M>VRf2K1tb1b2#JTNq8FV5XKQhC6!=;yYg+xxtpkJ5Y4Yxk85lzooJ zqy&EZ5zJGSp63D^OUbf~Jv?aSeP>mxj^Ptc!Qm!n5lyKkB@BIp@V`Cs9w2wcCUY@Z zX#cOpnS02nMEWgjb2L)`46`YxO1f~!JxcTIRmsTs{U6&CjMajdS&XZ- z6z+yeN?hIyAob*a^K6ntJZv}3>#b;AXDgk|hzwtyX~KizJ*S3xbT|*OZGZb7L)vNn z9&@owt?8R{z;dU{5+j&J@Ox*Hb%!L%_K)@D*xeY_W3o%E?}5FAmso@MAN{clIMPpc zz5bzd!On<*B78`l4c;z@VxY3lW|DfQGmPh=mfFU0>yjbCthXLS_RnF=iMaXZHkLzS zEAQ?XuE9Rv!be#sDPn)Gk$&a5qbZEUHswsApOEw|h(nTFw;}mkKw!(h*pUPGFD#Bh z+b2jx$?^!* znR23XtK+gD5|llotl0LDBAobdI9`<$+dO#t>Zo0h#E~sLE06f?Oxk z$d2L;i6lUjv-8R4BiUKl~IPoPblabXsqMAA7CovHtv}ehAMPdw5p$j`z+E$W0-w|x4*`8 z-nZ|SXRJ$R+l3@MbbQlmF1v!Q4C!N-q@Vt?A2-$s!umZlbYy;lQ`ePFjMpAxYyjL{2y=Cs5nO-!g*kLOQ!*QPF{&3^5b2#*^ zKeHZ;bsEC0rDPx1@e*jY8JUX(zbgjQXlWmJ+$-ZSon&V(>bZPw|I+c%gxKlMNy{o3 z=Ht6Cvi7Vro%yTb1bDpMl*e5W zW@i4u7^bv|P|vC9sZ~y*YnC-7rS1jYd6zUxBim+}Dg6*4H84`UskxZv64c^G(z1BX zuW%WpdJqLZ{eRk6Ry_!YB>+<@3;Z0jyR;^{lQH4qo6zNU&QCj^O$J%}Am`ck>7*gM z$Wn3?9IVvyFW9(#snlrJ-ERLwPuPBmC&WJ;%c8(8*QSSIA6NduIZ!h3HkUn9AszkY zsuI?l`nEdKH_6Es?KB3ibs<$**b#9^0kmhad!t~cKhrZ^1v72cf)p$MQ+K=H;q{!0 zKUWUj_5E(;gb%PIqQ2pWKqsHzurGN)jGsZoIDanK4dgILgbyvUr%WZ{5zuMr`*y0s zdF_6LMCru)vyT659lUwnY$a9>=4Z4Xn7{HRKEs`WvL;wk@JyOv5X5+5*A-(l^;Rt= zH0M08#F|Rro*dIJ6|Fn@IOhLKpnd=M2cD0YV@4!{6Oy?{Ojf0nqUvw<(=*xyj;c32 zJdnR^Jx4KvyxxM7Gpk8BHK*qGe#5x{%C(6-UXHKq1Vm^XGzp`p1=t*>1>};`ms_P= z%{zAC|E=mm`gsV6Nf2-<5b--Uv-Wjv?{P3vnIY4Cj-3T~|IACZYOC=Zd}>E85i{l5 zGqJws_9y>Yd#Ti8Ovn8$c4nf<>IYEjm`M%VEdk7NjK0RB&Ls5&g%U-;a+W_06T!I; zEDlVMi6Fa7ortq%*ugLMuy|(Tn-7Ro4vjB6v~Qyomh0@3m{#PIGRtZ|_qjcWy9g9* zXAscZs#7pu-5Jt3ISF{a?A)_{37TJ@@aS@qLPnq8o1R)@Tlic$I*Lnr$Z;dH{;dQ6 zb;AlsSb*6PLl#ty;`6hezxF;JPDpo5=kcd;-eR7=2oQtA#)iR;{(Zrt=CjkoKP>#U z!KZs8lk@P35hC^+bKF(xer=Av8IUG|oAX2{1BgHAb=X)|HF*1eJF34j-SISN{lWZ6 z_59}l&Mh9#Ex)_MekCtjv2tnsT-c6;wwbyX65 zU@9wKbrdl+R>2o@AYTK&H>rDPXBmG7s!Zzv4#I8B#RjW9fy!rs)NWT&PFm;;@oUks zC4&Hgm!L_N_o}FQ#Mrpaafa`cr@`zf_=ne#nqN&N69AyLK{Q`<6o>9!2UVEf7kYX{ zU%1nI>RR<7|DbsB^9`uSVrKe-8uM#&L}u`<+y5D{rwHa{w7#pN^>=<$wmQ`_*gg9# zBe7l1?LFq8xwP3>fepo;nTXXJxP7%Wr5*VmpPFzls`=G6=&5w{6%gT@^vECM%IX&D z=JxXO8XCham%mkesU69n%#co2T4uln&j4RXD_eAK+r>XV&%>H2nedb<*Xg*q^v6K( zQ^517Ty)T{?oz1RaLQH%097%2DRy(Akmj4?Pk22m4yJpd_W}yb!gF^Ak#W6f>dI54GrpNZoA8RCA3T z0EAV3gk1?U;`<%UO=lm%7J@C8iG&+i*9o@)wvs8dJ{@(&^(NJD#>k1vIp-Ir=_=}} zAfP`!J~|ldm_Mo8VfLS$W}Lbh&958ISyA}J>0XM`{sbkwq@#Hg5QHtCHg-5PJskXa zdKucXvZ*=EUhLaIaev`|_P-F_11+AMhmt;y%nJ-ol-32ns~CXFr*KCIWfQU4v*NNY z8>$?PHiN7(zq!u6%6{X$|A@W+ZdYso;FA5v`RLiQ_**`S;Gc))B90n05)13^W%JI# zjc)G^s?JKHHw*eEe3%N~23)J|%gu@k|BzSL+%3u_gT$~9aQx>a860~;IumCuxGw3Q z^1Ytmo_}c8A`=U1fw%5e4jYs4tbnaIpPQYY?V%OmLc+kE4j|dR?o!$m55z`cM~RH} z{)If$TOMQXqZ^WQftNGwKmQH;h2JvI`;Z#_c!*x0h?jC63(BsI-!wHVQ3yM1IJ5pUXp{8qnU?Dc+({BEp*RTk zUlk&R5^wyQ(Dmz5ZfOFAv8iH_9nfW1kl+BBrmZ1^FfJWFfWhJdIUsvY40k+Ke-ZGy zH5;nj*efF=fq;C%>TISec*M}ygQZ9@I|2tPBaGcH+t@ZCU-ZNaq*66!N5Mb1*s6eS zKplV?!{GeB2M1{#l5=zVGRRoK`V9nH1vbo@vB8n0Ad-jU8q!_0;hbfa^MBQ_4wxmy z8d~4|j(Q5nA`o3FBO<7-&;QzLu;u_BIF0r+?>=R+UF>f-WZrz>B{XlRo6%)eRp zqyAa{7mig|ydB+F@uGlKY%-4_?l>Y8MCJFKgBa>|G?q;Q*)*Gnyj4>5yd(!H6XAsX z{e!2u{DZ{=OH5D0*6?jR&V{DtqyF8r4t|H?BxaZC!7m&-Km|q&;&4d!OH-2~!#=Eo zbkD!&I3iCk$C5{bcRWP8izB15}rMvxKDrE8=u7MAgS++Aez{;wEit z_B4yecgOrQtPDc%UCv7*`A6r9B~-(_0T!cS8>5D@5s`>?{0h z+jpQ*B|ETS<<`Hm!rP8l6=3%yo>r~kj)bySxV;b4Izls;wEIe7)BeY3zJ?;G1eq}O za&(XVz7S$pQqhTa$dL~#B8j+Ak4c5=OI^Hhu&R2sUCosr3Q>zRG-`BJIDcwUNBFe0Z|&*jhVa=a){ ztS+Hg%lagGZ3b(G4)g4wyCLjrA6u6B6yhTfCpomcWkZ1P)5%9zCHhY&iz3I%I+QKm zO?yTEym3B%s9VW9!f*HQq2~t(Or|ret>Xrp1_58eFS=U&eHU$=x!jXU4tV2V1A70% zTg!Mo0O|Q1EGPU;yuS$B0=uZlDj*-o}W5^fUuA+@dV;p4A9*`_t9-kMZ=oobW`LM2`IPf}fKR~#nR~f9Mm^XX{7o@5IgcgQnKAF=7dWhgJ z9rmiVbG`%P;dyb zCQip`i(fYI$KwQT_)$iykHT3L9vJF#>r(La5~zx*uObDIui@P<2J%5eF3Gx#QSif;OEcc zBg&`ajG;8!Un^HoO~Pyd{B!P8A9agcdgH`!x$Jo}@Ku*kkQh860|IQc?vq9)oGTpp zjjem`i<{U6bDR*@F9P|{51|Uqa6z|WLP&oDP-_-1Si^ao57#fIMb$2N0Oi*JMaISy zA9C395_UjfZc?Hk_0JVI&d<7ZeH4lPkn{qcs_nt~x@UDNFIq>{hB?Qew$jhjS^SqY zVu}1*_XT(8)vqU` zTsXh3dzvvU0|hL6;c^_rc@6GpNE^fwdGQTdMQgbXt2C^D^4sAhP?Vxrx2!1$MCXkH z7?rG)dt>UFFx)NW)IBDLBgY|a3MZ>A$s|Kh0mqqa;z#v#uX16V$TZ3)Ammm<{{PSd zi|BvW;3|*L1sCm|ZPvQ%ZF;EbZoQfjxw!fPB8GWcOCgwZOsRk(tBaAqGf+aTAECJq z3`GzpB>Mtlr2&QKKHiB?zEIo>9nb@)B?M~=ko$hX2?pmZcM!aoD6mI$94Uq~fcQU9DTDrR*k7H~L9Sa$VNO zqG_@lK`I&wGj$+x?ht>#%zSx3psfEvhJsinz4E9&pq!jGcPif3a3u!x-mf%U9a92n zxKe~(b^(^gBFk$6Xt`4qu4eX!g0Sd^<60RvvhrLtZfStSY)wBVDyuTe6vuO*5KMmq zUPd`KkCg9< zSh_2plGB3xfOBj}GX=|!ezeKLuMda?v@Gxfo2*yULvy@1+Wwb=$VJH+v*?qd?5qrG( zihSxrA=#x0D_}8!7z~YaMgz6s)@KR?>)G)4lO;K7u-VOyH2j}n2VCUEld*sNh+CDQ zIWltq&KkI&?i8@3Wx$dEuX_HX@M@wDNNdnFyKCmpaGH)Kv(N{&2zfxxm+>!*i3US+ zZy-I$l&9tJ6i)#!R5M!RnTVx8Psjn})&|`dP9K)?*!BHUO?W?{;Y-+1WHi2m>=V+f zBgFiUYU}@TF7H9?Gei&`dpQs3f>yd9g~vUb5Oi7sl;P&FAjKft&w9J#l^96hJ{6Sm z_O4P`&g^bDA{v}XzI^ZkdJ1*jlG%b*9vxAGw4z#eQS6EL2GteUtn7Npx#qzi-DC4Ll_JI zMp{3ZV8xISAT5;~isPF7v|%}4bSA%aaI!j34mX`%?5`t!KAeb$I`mFqFUaUM}mobraLyhE3&B@#0kK6*y3H@kU)Z65bZIxT%- zed{l&7AZpbrRAo;eJ^`OOA2^O=GpI?pZS-5@~`KX99nF#srTN=aqrPD+kac=(47gJ zp;X%AXC|MleGoOo!}Xxz-P;iH$?t?MPPUeg8~D#aYR@?V&D(UJ)c0~}P_d$)b=QmZ zx@-aUtK?CCt_X1r0yD~^3r79chDhDN4zDilzFza#WB7kUF99YCjvXXnm>eX16Hwgq z9Ch+>HX{)0h6MxNfQqv8-@-gSu_u}I=fz+ANDqin}9 zoxKL31T{OFzSXB-cc_cAE>2&qeQw4N(;6==Bu#^HwD@NtJKr@dzl-ey*A7!pzrAJS z9(P|{4+9sSNKzT-9iGBep!yB~`5tiayPTt!D1Zq2#A^mx+`Hs4Nwtz_}PfMJSb24_+HA>B1xScyUxZ3_+ zoTH>PZfYtr^vB=J_1RYA7H7q8JczZ5j(FF>WZRD3m_z(I8C`1%{vUZ>i)~r@u^ECw zIxl$e%Ya3mbz_r*NkS=~Ut_B&NN(w1W zhW%Q(r-)G)G~K`lh>6Gxhqta#F=pI}AvTC+gD~W<^0&ZJ4tj#=yw+XhrU-hZD;7P5 zR-ekmP)TuxyF-0#yQK4FVGY{YcI5`mmQT35QVd(lxKPGqqqEMnF*v2os z`u?rM6Y9dY#f;YvhD>x2>rYycQbj`o62WX|ueSZo@U5cH)rYsO0J}a9Xcbs&Srwf1 z^xkyG?1^I5rw6%~Wgj=eVzHi+1EQN?LxK_PbVfpFspy&qpqxIMnwjgK0V_5ik27S$ z0TGVMm4K#XxY5_9Eaih-0@OC>=YM#PSHOFedG$T4i$x-%14es!&}NGD5yC^_E>~AE z((8H^R0p8^xM!bcqu&cKT;%dk!5ozTa#PiZtfzU7{^B0o5s5o0z*M$_?QD22r+3vi z8a7=9+VUVFI{fE-JaHUNd{w+%MOk-s^M{SVzz8MiWfMLdLjAyORy(bp<%8in*a@SL zz*VrSD@&bmYEXn{r=t-3BTsaZ;-fjN2SE<7)t* z*`Jx~=r02iZoIi}R^8CUni#^hOmMgJFCh@&t|!DS&UL!gGI1Ni$Nh;|D2oxX2fWrq zU;se*Sk*319jmrhlH>u4k+nxhR$5lYIdyQ3;S<=smDu3ql?hZvVl_oDviiW^3x*c& zb^_?-L+YS${)wO|ubP*zb+8g9jwR@i? zW;v16rk->gaaiN6W#Z5qa}e*`T&XpAdk!RoEU2-keOB%`USv|;H^kKiYX%TrzWtiY zfEhpMV@O>mkaXbL7a<%GFS!FsXA7jWrR^b{qv=dTH?iOV>>ugcP$vKD$mOQItfzI- z`Li{YiRJg=YD;u?pk%O?{&68<3%mktruLi?-d8a-sYp2@?Fbjgw@t3-BWeJpT$mY2N!>4~Pfa6{^M(UETQ9Ig>;-)A7 zBxf?sN?svbcS|C@gQ-I!`CqeQyfs`6)8;0C|KRf3!;_3ADs6<$QdlzxBR z_Q?;^HkT(5{DcfgCuTI6G9TpZ%6!T1|lz3RQpdtTE>d-f}OtD_;*|0nf0f%yXRqEF|JWE z6)1fzP(LHTr(I5%=*ZEMj1hRxNW71O7(e1X|5BZX1NYb_pcv{r0?e32g2P-hYFSH1 zM4}s@L`&Ei%)Yc!66rsn0*>zrfB=PRkt|+WQ%zvF08aJ!TGfA)e#9Jii~$7{iOYIw zH4~DV;ffBzx48Jpk%^hr&PoQS&I(O;DB^f!c!{>nPKXl+yu^7g!eaC9Qu+=|0^Stf z;8(S`DoIbJMmw61JI|n}B#nGzu<;V3r55Z{-tltn^P9jL@6r?1-3_sAtq-&=iSDlI z$4{Zx?jB=lMm6gI@SiNjM;E%Dc`5W_*(&VKAlzzQUilWK#~pZ z;E7IcM*jvJay=3#hFd|;RSM5_FU&A~crZ-?{Blef1le*Am}E)5Ti7J?cIbbESp@!x zX$#Q-jBRP~8tyc&($j1M;mA+@UHcrWOq{PFgK6bjRp8|<+IYvKn)iIqC7z;#$$-jL zDeP>nir{K%Hv2nd@0h*#imrhv4J^(C6Ex%w>#!P1PSOu@FIU1$kGo4}ouOz!|73>M zjnPD?bVX*yK4t>juq)rg6lVs%=1M@_9ux_iw^G>`87$B7!xaTtJc(S5b&nod@K5zpKizDlc}2YgC%O6ZZoYFt++iFu}Hdn#ruYYjv#?b_E~A z9J7a)vi)_Ke|%~<;FSr+#`0+m>`@giQe*(Vg!%*Ltag~+G8~`EV^`k>#?CFuaTeUc ze(wid8qlcO&xlcuK@^ny|}5amOO@qYNZeny;q^U(9i{1nH)A$#!UTtj1B) zhZz=2Ut14z?Ma3JjdOr8Ki8;)vX8&hS>ps9p?*FTHhHvF4smL#GIK4lgah1dN3`6F z7lO*Pee&Zh2qE;L#zyelN}T5=IP7C)PVb+MDUtcpO}7t&Awa*G=vE|t(xsSZk033F zZtO4(&R&_3AP241NtY$TR^YO}CqOglYn+(?eh+a}?TP7Fi`A-Ug-AD^Mx9%iBFLFceGB(RhBqiG2&Rs@jm582ZGuA@@TF4dPN11Pc7>DI zAnwQCBApxCY9j7Bt&jhOr=J(H*6ljlBC=~iK13nCkH!-tvjfy9uw#7;9eh+C5(nF0 z$FkrbLEM^!QHhl4LQ)%M+fd8!BGw%6n&bmeZli-y+!`>SxgtzK@b%MaMMnp1L6L>_ zfFbgI3DI3D4q)X2ctZF|=k47pU|6}y>J!vy)Wp-`f-iJ-XH)&x^oVJ59056JF9VJE zpWi;s2n_^N6dSaKB^I(^#Gb1+gI?s#Ksg|3+IiZ>r>d*0noSN2f7tHbr|IJw)m$#K zqXrpnS)Z|IPD_BDCc`jn&<3&8rXom&rI!Z8K}&KKL*4f=#4imGB>qCY&OICEGV+Iy?v}?p`(kjm&OT>ZTIHZ>FT8>qrdvd_(}W>knDV@ad@qv< z`_h-p2mQbqVDSLfTcCOp`M(^v^UDKLU-#^;nZVdu)8u*DuWdMNRZzQKdMMshzok7T zVdl@Ue5<D}=c z@AO(`akJ1Eq`s&wnJ2O1hRVR^C8+Qq-tG!2|F(n=x?Hp&h6AGVyQwlzzDbdCUkrOJ zN%^%yj2N!$q5fu|5bMq7py|p@Ro_2-_k2MLFDJ~h16C+tQ{_jOt0^NeLK6$B#lY4 zf|n5`1zjk4{lT!wJ^UOsD1<;WXP+jf!vB3!7?}uh)Vkf~Se9Q2EH8p4h&Pl7!Huzv zr!yLz6W#1n?Kfr_6rQsztFpg6>ptN-_G#{wjK^gm!0{rWHFC4LWxWptVw3q;Io(hS zg_cFXgRJBgu!g7(?c3n>L?SyhJl*p|=9gHEo01Vl%CyMdOZ(kWd}$9UJQk_*+A`PeBhp z-sL0qB|{KqfyM-|YE-ns{1y$Wb~spl>h4|hI@Yo(dGcEViQwJ^2m8$f{Z_HaUiCjJ-?g`&eO z87+e2r+nPjP0AdOlKudvRp3YlR0LIpaT}ZWsMw$}6~3Fj5)#TT{tMgWlL2rQw*skj zL}Tpu@;Q;w-Biw7MpqaUJaN|52=)wAWE%3`;@-kEf-Bc0vXV_WCp>fj!NX zcK(Hk`BzQi^g-h*#{{euH8BpW)1!}7z6^PY`}f1Y|Fr)z7&Zu{~G z-Y^*wcM`n0*5=-Tan%IoaLuL4xz&F;oNMR8T74*8eFQQukzodOLYd0ZBosjs8`UqWOluS7glboXf>b#F+eMvS{v1%~vX3uC+Yia=`H z(feRxQ{O*2V0oY|pZ~wudIue5<+knywt!9;r4ofluDF2?=2Ulez%@NA7b+HvF?R#p zIKz4z>hzG`+9hv!3Yskwg9kT3nd=~Cr|c`GU|$=#G*VCmAC4xUvHjqHm`9c+;-Gv5 zA3eoYCqOxVmqC*<5+-CRrlnM3LpBq>umy*kDgp2wSuv^N_>|5 zPv|xL2CTvcM??|W=II-9Gqy7JUaW{~lwO%K(3;(T+&2{7pMGHk1VggSqGS>1Kp{jUIfk~aSytoweruyVpLwY zJ;HVRYq%swIDtkqzB?pCM@(H%T|;``gIvGxVsKZS`;A<2ybcFhDyVugKF|+OVRiBx zbn;tE%rpu&EvgR?^(=I@``f_ZWzQ7_?-{noJ$T23&K3Tc&A8O=s%rn#XFTl0OfBF# z`3Q9E;Q#}u&=HO7nlTCGmgvIi^s?D`HG^PPtw1c3aRI^R(kne*Vtg0awPh3?T=ys5 zfC%l3?eXGYAlhpe!FTNGyzSO3dnYzR*+Vo~07LpVIQYZMlGTyngUgAyD*!cY@+FtW z$@nZIm;bkp-k-i|>@1(Y$E}?c@0-`fEo|P=cw@0<0uOqLA(UkyZ2v+bx{JGpiDKS9 zDBQgj`<@|0NW-@D$ct9hA7Db{F8h`Mzn$WQdUgonyaHkxUL5>vaPz6rT4iV+u|MSh zMXEtCIsx!QLV4CWIW>EMBb47zWTid)H%+C-EFBN_kb>2D=P85^X3i#VhX3FMQW6@v6{zQ=uda4@ zF+S{w8TF45KZ_CSSc3^(;tJ&{D<{||27t~5rDp!z_&f=lMz~e}Om|a9xAB~NN zD`FkEfWWaiTCiIGywgGyz~vdJn!-!p956m@p9^H(HL~}yzMAm8*vti!CTde>Gk^Y8 zFd`9*D|sdjJ0X`%3$nZGLvmU#6UQGW&Wl4ORdXez(-$w6Rf$g$K>c(Uo8J%uZ|Jd8(#8N6#?t;=GAIvVq8!h2{;kb& zZNI`GG0w3)Fr7)dkSz|i=es<_i$+Mk&oel>LDjFOu4vbfCUi32j9s4o>~_^Pv@Zt& zE#w;#Q-1qLZ9icsc=bhk-T;Q@q^zfXuqOI~R!CforiSlg@9`k3=lV}@NogRm_G~ru zsi*0tV$6DA8Thz&dIY#!TYu1l8Ns1=dsXF^4#61r<^xMbTlx;XR;vX@>{+$iNxE%Z zmR=NVQG+-n)xFYvf=f7yNu-C4F-esA%_+pm4-Y=T=y^-^RC?liujjjGFDe;tY~DKIM*1S!!nV!sX}$?uzwd>8PzaZpz17hybi{mU-RnI_rb|~G?THv{hD3{vO8aEy4GE0k^UjDVMR!3><0qxE1@rU z;J3!KX%PAfAaphN8Cj#4fOb-wR*LB% zl15R@%2>;p?|St6hy)BA?GJFu$@w2nH1Jjqva>_?kRAj5tH))ho|Dk{6Czpg5TC0B?`i2^TEmaaH~C@T zYjEFGtnK}B`ehPu2#X;PX>-f)JdQag1y$Gcr-gZuX-3%T!_``*C#9Udg8L&&EM#E{}gV{S7-} znMzYpN*mcV(>}TovL%S;GI`C4CDtX9+qk8!05Y+NNp0uhPlZHGag^F8GDi1D3wyC? z0Upa5G@348`aq7f;P|^8OmVT#QL!LDWD+Pm7ZJTuCh_c~epO5@@5Ne%caz7sL0~Ms z?1;Ity0_*K^GT1#RpnMs_&~NB9g4bj!soFxp{!x~4Cx;l7kKZjSG;}8kR#~(_P5N5 z7d}War0?A7bQKQUVw;nN3S=H<#hX;^UIfn!{jk;3|N8mL0`KT#%`qTNyV#$ z{(U>Q$IDn~XN8?*Khlq=DIM)^E8%g*5te+-8zbD$!bNjsd({Ip*Wmnf_eTYq7hsXp zH{LeT?@F%(Hzo3RL5poQ*j>;Oo3s7KMK8LXdbK&a&<#bL7F~!3irS%o9^g9Vy`$nx z$ExeU((6!{p!0~+fI_{OeF6XEO9xGWywklcHLQS%c(S)eyz~;e`c?e1&AI-{qL75T zTYWqu27!9brLeyP93(+JHE&cKl$|ahyt2BeC_5B;xQ3J155@GH>)y*azh@f~D#6us zol@U;Ak~7ji#WoPT9j+M{MfxVL=W?(d$hdb{2(NDvr1+VJx+^Kn;Wxtqm< zWe=W~!EuI5e)T86k{=LC1?AS8xj9USes2)_?r3+%*={i{m3-ley|C{E4C}fCr>-W! z{bRvTvMRNSM~;+$`jNT|xjBpB#;d>t^$wnb53!s1{9W#iHQJVW;LC?KaE_%e9l-2+&?h)7T*^%n zPiES|mT$L-!8^#d5#3Xl>Qme@xq9q2NI`e8`0_)uo;qjr81oRv;UvsD z>Q>#OMQ{o8Tl{r7mm3gvf|qXJD*4aH#3QOxD_i^*BhG@$VPKpKK7_2VSj=VCgzQQ$ z&qbA&<$+{`qW)=KVtGT^arO$RvL{j379TBwk0ZS8vNzK-X2R}~+>`qqz8@}GFu5S% zm0;1_@Dss17-(#ZkD9L%)V+7kN}4zMcOdCkVG9itbuaGI&U@FmaXV`0<3%oO!F}Kv zjSEGr`>WyVbm;1pFgxF6Yc8HypS&#LRrvE9DC&I{jP5)S6Yg8Z?Fw^5FV{Smi#}`w zXtS8na$JBT;JZ!~wJf3THg4!U3p3qjITtOvsq`|;alxQ>3G<3+SJ`cyEeb(>dEKwA zj9@!*qwbwg@8=`bo3xYL8?k+{CJ+*@HxRCbh^10t;Pc`aaxFulS^b3?dlGz=R}rPu z1{*atoMCW&&Xvgye<$NEWOKsy%i|LZcK1UE^zF=5M|=ypZ{%;>T);Peb;mx~#Ifs7 zz7zwiPni}Y@=!Lw%ZuMio*5ykZ_6PIr4=iX%m0EFeUn=@x}{Z#&%b3H6vp;r!D#Uv z<-Zm9Rh_V)J*CANEc2NTCNU6>jra#X3U0!fV3!p zcX7l&R2GVL*_26^n{Zu)s5kBE(1?bssO8%&=5R)rprD;0z$?Df0(V)>!+Er8I}dU1 zP*{Fw`_xkh_*!A=ZZNh}+2((gdEn2=T3nurEwbM@ddlCiDwQiAi3||$`3KEn-%m(z z7?_7#jqw z!-M9MY|FXdyR-ZJPd#g?W z-60=|lhc<2sx{z}jdZq?6ss1{O1^zVZ+9cy9g-hOh3P(1yo$mh0oK=XUh^2{#OtTi zu{EYE_>UIMgG0Pm&cH}-?G-v!VZKordm%QNE1-H0PZ;mkht6ZG_*s|6r_Q^rp!KRo z>oowbLQgph-&%bOW#3+Zc2uM8MRjks(#w<0VReQ+1^!SdN8{baj>^v1Lm#~l^OG6O zQ5i5WS(2$FMlT}kIY5s?P9&i1IRlhpp$P5ZDg%&{2Q4Q}Q3mlxU~J9vvl7T6rgvv8 z*yy~W4bI=z_gok9l`O4>xZaJm(0{0@fJhrQv{iLxJ*{@w7DWptG8=rzXg!1`b0K0Lm=Wx40kk;-?cueQ@ehqZ zH`G3lBpM+8McX6R=kAHv%CT>TOMMcG;08OP!UcRfwljZ!mjyKx=4`Pp8(?ESoCPxe zP-P$vNb;;H8NP&g@C;Uw|GU+vHx^BXJj9eXA}nZ-sQAYSb-Szbpwl&O^KDCdjN6Ki zIBbLK@?7w@B>PYLvC}{%F7a1meV7UF<%(c$i8t4S_2;eh$_#J+4WzVQ4B&VU>lmOp zN})a@$4SFF5Cp6jx~(#hDmkIC``W01d6#-z>EUh+!p!?!Ah+SdEj&WoZ+IpMQ0F zC+1hnrW&6hs<_mJfNCSqR|{eIo-o5A>rPn1hgb%bkOJYmU_?vmr7@&_5sGl&Vm*d! zcEKA1K03CpeHh-p4Pb5Zn$oL0QHZ?q*C%?RbPTQ#16*ttE9!s0kejSAv4>%{I~4hz zJF>%}BU}hJ0)UyO(;r1D+%O+rBPIu1)Thc?*cx#si-yY+j|7hV)eR2CKY!FpGegoC zLihPh8PBDeU`nAG&U8zZWBN%y2(Cy0R8`_{Swgz;Av4Vwdw?z#Dhk@Y``;Y(d~nG0 zL#|OiA}6nIYt?3N@4CH^HzmAo5nMF&r7tfP%gE?`$a<39$=#j>qGgF7A`kE`9f&QY za%B~_^)V1X^#O+#rDb!`>BsXK=pJl&J7OZLS9qb!=fz4dhhnLeo%y%HJ8=*g?T^%k z4t&}NP4D+&-%YFX^)7O^W7vWZM`r|LABM^A_>?2hd3dbz+?6n}Rr>2O2YlL>nSrU6 zl2nbDm-bPdx%KtfcJpiIp$z^EcsbGw4M^`jktApYezC7U#7`V!#bE4ZF5LMVfM1=b zxnKF`>Z(;3kUTq~_9H|)^i|#kNC;CK?2L`W6LL;=OPh1x`;e*5P*@WsgUo%Z3{e@| zHl4nd_BD$*qsC*^Yix|cd2jv1+1j;D)29T8368+0uT#~-7r@C*TAe|h7_)1m#^%g5 zOis5YfTx?8X5*t)3==G+ucecsZ!Ue=%iPWN;`A+e(gSa$m$yWL1|l!sGitO3#=w%G z;H}rh0IA=xf=z$@X3$T6XA@F;?$^SZD`zj4I)6R8ily~!sg;w@DJY~IO}H>U4Hi?s zy>WE+R%Qb1gDjB@1;yZpv-S=UsVio4h(6dxI=`|In3{$-Wb{WVFKAc~=6UUf0fKw{ z`23SD^AyM^zC1bn2f#mPV(3<-^j}b+clTX{V@zeQc1o-bcLD1E3ySCxptw{&2R>Z^ zH;naGD>0$6LsJtefh7!~!|=7?cp$5*^ORn;1=!6SQZ-xwuWb+A zSdReM{Ylephs_M)XsUu&CNTLOuS7pP`m9KCyXBsy*E;xUA#Z*t335tP1%Dsj0wut- zo4B?F2OUc3AVY*|rON=F2Mh{jWCx!&g@brno1r{SeUc1r-VY00$;a7rH4JeSY;Vex zcr3|OI~y={Uth$w_BYU=4pE!}Mjsao48pGLVk@On)N%^sAYY!zNBE2n_FlQkXbF-; z92x2pJFxE`rqyq8{Dkf3y9|Z10at;-@-Au_7lR|%Vxz7$%*kdzdjvwy-#&q*KC9CH zJwU}mFrmY7%hwJWzISbiFbBAih^JVCa$E>v?Zo3tfKg@yhQ5m`E7)k#b$c;yeyXoj zZXqAsC9>WANGZHv2vj=Cw5u4p%?-M6wio8@n8Xf}zNWpyK{FaYlaehWfLfd>n@19_ zsf>--42J{b zB_)doJ&=v?dn)pv-Z!M-?Qy&H&A9QTOQuodn9ix67o}C6g5=lt(<29;9iupYBkAiO z%13sQcO>Gs6lo;K#aX<-SXey$&YzXP4g2VZ-lE0;UX&6=Dil!cwJVQBD4Y|9+R=RD z(IpvbXPZ}xe*R}m9){kCUn6qFApZ3mR*)Gq3_Es})6hU=N>s^ll@a&n{+~(zYjmRsJ2^ zZqX&?W3YW7{01$?Lv{Rc|+Tn`b%4f{TEeU0zC3!na58YtJAd8~`dl&SomZ^gR=)h8&K;#zvqY2{0GR3lbi^JgGtdv7 zS%sCkl5&8Uz$k+qlLwW7ueWSmhB(2LY|G$-e4I_NV)Y|ar<-~QTh9I}T_gJWIb` z`%hrZ>KD}<{S}ybt<<=v{tq0HA7A@0B1c2Z)9!Ua40kFZiA+&JKlK6!MhbQ(Om90UMgYDv zGG#<%*OtqnG&i*FiK_{&*VIwpf|JMuf8YmbW68glYd-s>X*447+nFyt*oe@V2ls>*v#R*MGm#Yqw*(JDeg{ zWM2LW?K=W_=-0Qf z$-Gj|Y3tI6x0kAX)(hkxNz~{ zSqM=#!r{7*&D?-}>0&T)$tcwC`xSDW#Wp9J0EIMfpt^rR;M!9KHy3)tn2(M%GYZ`x z(|qFzM{39?G8V+?0BIa_isd->-Q@u98p=p^y=F2Eq92s&Fk-Yr`e6aJG${F%Nd{&H zkWksVIJY$6R%rXv8KoVsU89vC0Ib5ZW36bF_E<35!Y+^K zO9TPtpYIKL%5H5+oGARJ)w_FAigsH44R>P!D-FwRA=d@)YvGw2R_E>`{q#C4)}z-% z7LG4a;Zh<~7m5Q{PQF3$Q1D5Wb&GBkZTHHYc`{R>**rNp63l+cWkOb(_@q80z6R3()fXLY;HnmG98s)QsyCz zlzHxSE206FBtjYpMJZ)IH$oH|DVa(rW0@n~&wieB-0$z*e-zGn_OtieYxu0S_g-r^ z^kBNna#4!YIB=Y=GV^3Z+WD;dyrCM;zIVc#j|Hs`aN<{5sMs{dZ=1#MG5hosUryk^ z&g!1vw{lhdYRhf%4=&kvE?Xbzp`sFCz4`K~cHgLk`;E#yHycE!lpkj*;!tW~C&y4D z_mhiPCYibq9NOKHcP-)KqlmX`0oFEk=xKL8m)+#96gy=U#9L6_>fhFu?D8Ha| zI^R*;=;jzpAnrv`4F~7)ag1lnP>%16zOCDxH@7#}&*h%3tJH#V&3szZiH-h|azPp2 zoVQ$ZeD`Ma(5iZeyN=whv#!5Q4Z;;@LY1K${!VGBXD*I7(#%ntLHAB^Rr8k1=g1iu@cO*Fo}V4RWiTPhmVInuZrpTB zFC2Z|xg6qfE{v%K)3dv~98Rc+5)ck;939*?Q(o5D^#1oS4MKyx75#B?)tbeG?FKI7&@ zM=cM@;BNi%Nj{1?BK-n>xQWsE<=L_W;%T!$a<;Be-;cJz1e48}18JQLK8R6kzhxvB zhYKklL^w3|*X{k8H$((Rxw}>*e)^-WsxgF-AXH!5p(a`mqU(v1UtkPPzcPb-vN7eIdV4cqzL7;y9IX-~HRns44f^af5nKu!OcB zjlP{0Qrz@-45h!quf?h&A8T$$k39H#j+~tw75V<&7@cR)vlX5ld{z>4(8fU`#;Any zJ_(tnviy!-U+LY8i?~?PW+(H|!IZ|)6%9O(KEw4i6p(abWo|o89O)k*L}!Yhu_a&0 zP-=-@6WCKG33NY8i+Fk>^hLu5O0!PAmGJiCQ!VdJE46TD0X@qlng1`%9|oq)it@<7 zVOzOH6D*t$g6eN|f#-agl2lZuiQg&e#lS0?+^QN=(k}D$3;S)sN+txRRh??X;U0=* z5Ivai*?(CW*96zCJ0e4!DGv2I$aFZmj0`cb*I}zWK6r{icmH|o-DhLlhH{1brD(Pj zNPO{?isX`jlgNw3cHf%!d#`zDo?>=5&y#I($*6Tsgmheq zQxRo9G1?3rX#G^U<^>KF337F+`~8z+bp8@5|ElVDLIA@R3LKjg5wRb*Gp{VxzuuFa zK4&>rSQ1{|1R9fe)Rib9yS8QYiSJ>U>I5>OFKt zSvLz0JJdUC4q_h~J?7P$=XM*WPik4sGhJP3*C*i+iq6hmVw{M?`P3ObB+%5+;1Zx= z`)qdCicQq5nB?MpR&1#tttm&Igz~vBIH`uQs?=wsO{k*v=LGlZA99k5iK|REJFK{9 z=;ppMPK3%!F_ygQx~gj2*%B3DL|XFId`c|>G-!ot_HWA;8>1aHjs%Q!-aYGS>pa?y zYj%_lzkJ^B!S5Xz+&(qleqT|H(~GglK;&>UxwyX1AprxQiJ5&t{V@7lWyciwL{oF* z!Yl`q#{9eWwdC-xVHs!)W{1)(3_)x9UH>BpJ z8{6(9rhk18zq@Ii(?+DeIGPPttb}VhD7MLwylch5*_kOgO>LKU34ElaHtEeQzs(x) zST7l?{#hLgK87pnc$^v#n5KTJ4VgXT-4{p=)$nz?AR`lsPEbFU=#?*e4FzGBdWkxt zZywr`Vk#SwZON|rvUEEc&BZDAz4|T{t|I=r>_DZ_H?1W2Nle@JhPKc-a?_PLi3wQ! z;twlUW5029q)CzXLL*6Qd!?T3$@n?ReJ*S@C2{HKpUjC>SP6=qZ*qE>&>IysF7wx^ zPd!p`sx6-3rJi`@U)5zSS*pszm;6A-P?6V?QvElfZL_s`sF(jKu1tA{!LBvG6XTTpNOY7+I|Sx>7=rhk zF`XKJPyDrdEIUG2*O+%eW{%Z4_ek_dWHB&L~PhgO$iT>(- zY@$~TRSEU)&Sy`5fY20h7Kkh9@`z$6f8L{~j+IaemClS>u|QDPyV}ZK+1u z;OLewP0H;vOuxcofrzEVkUdN;zAeFm><=;*bwoH#9LZ0+rjs+S+$zzCJXK8YArf}x;js4x4t zBDKvWm^xFm=bt1Pf`c=t=rH(>BSZUwqtQU-Y56;g~3yQl}t{i2o5KC8R*4bSsu7nM1{l z;(=OvT~IC{K1qhkHKM}3##f}$lEfJ8qF1Z=*{LgZQQ-kvy(xFm{D;nxS|ihsxG9W9 z=ib+e80jp~>NEU!$_0sF>Bcz@%Xy~wQF8H~acV(teQXKLE#xOjD`-1{#E)2q$-6fP z->&D3+~y;a*xBZ*LPp54?a2GoW4HpzTj^Fe0u4K{N*^NxTN)pazC$nu(aAHpol&ff zace}9LyJlkbsUbZ>?e29`i0|KzSH0;AoDMxiALAsxHZM6V{(66lN?n>v*D9m^ipurI~(V-fqPw(3=7>8rz)b|L#omJfkKi5Xe?)N&H1{{==@hLn8<1dRrPnG||@kg7uJCf^Hn`_PN<2RZ#N~ z5Gr>A6}}=W>ckl2Sm25?6&wo{w$m?ZsJU@D9#NY^%h@^a_QZ!1T2}MeZR0%X>pMP% zACx(7Bim;Im9krQQB=(ZTG!~1j8)qtT{j5GbTuGO~$$XtNKgwme`qrMiwsTW$*O$=#NB!>{9VQ9)RzgD>4_);Y^y zGcZg^^m{xY=C;g3@|KbRp&Qv;y{J?0LA>t*e|`gyXu6kK-A3$LueDV>)XWk8vQ?fF{jqaJw;5G_k0%_G8>_R_Z9jWt(xsk=&R73L(ywzb=`hq?_{L~CfBTCq>111l;^3rJDX(eVw6 zx(@wbHuJdApWul4kFRp%{8xE)yr<3-C=XTn)DL*|%M#a z3$1zNs;qNG{#gb|(r3n_@G4JjkqVtrB8Pw4f|;IqaWuC?Icfc0{Z2lHr3;V;;>`In zxO4%(=RoU_rn}BObmG*94=*jq9jQp%Q=TsE{=V8k;*ZK)pjO0 z;EfG$;_1P+a#BAH-T+zK!lxl#08$!i9nxBgbm;tvm=a1r7KwoP%KlG z3El-{rOnJ=yJn^=VIR`oHfYM_1X$G17&#9$og!7Tn{j|6!-JpD|uriO{x9Ku;a__G^cyY^Y` z-9k&0Zr8B{!G{1GHV&gBqTw& zm$X~*b-dhfC1n;v1MqmFz?vwp2u7+c| z!IjMw+yMr$I-^eMtRLmrvBIqGVM5J$yB<}`9iBr(Q29R^JS0(otd>N_1p?tXq$e7h z8Qazik!;B0QNiHo+*Kd$kQLg2!?h%vQ`MG`0I%CY^cO&z zS<-+^|;YWkpgAPN)KNfee?e1UN(*TxnBl6&x6scnh~j5oTN@z z`IKpo;m0DTA1XiTf1-EVP1Lz^!Ux`?TJNu3F?S+N?4?sc&9rs9Rmo~j@NyWHNb008 zA}7^G-?oes`(J2N#EVGLZ{oy5={`%yE7?XrKWp6gd&GYuP5J}DGXe#=yLj=Nsd*py zC}mzuflP!mNEH69xc!;+d{5^*^JD;OQ;r-qmFUKnn0iE28PGaz~4iXuap(oyGS$&r*?GJPn1`(oL2o0WT9BCl#dtc#MFY;bgz(34? zJ}O{8S*z08;d&ucIM{4_m2?uVCe|*4UgAy63dT&H6sKQ{{QeV&Uq`%pipO8Uf!Skf zNFqX(Dd7e~|FGzH_)To-Bm(8P{6&w2Vy#9fHP-Pxz~neFYc&}ilN_uB7?tAUJ*4h-5j#97P7T?jcx<9-t8BWhe|RPjs| zoba$4z6dc9RB%{qo2zBSX_q+Iw&1G>;M8Aa#zehV(`@&|OJRtFx@(elgN_3WB|m^Mx9M!>K2Tzkn4>&U_reFRq)?Rf1<;6pae0H>A;%Ms0uCjD?y%c^n2#bND2 zcsXwr7xph1UAh-~|0I2$dcZ?X0S=H&If(lTjUYCy(>Hm_ofM#n_a zOC^YrU>LVp(j*p}J|TKpJEW~ndsM^^`pxw`erfL6z8OXDzVk(ns5XuibFP=eoAouD1_e4i=Gd)HLb$wmX1*_-MLIm=6f2m1we7q zA>Wf=ofL>^m<*O*L4y6HN!)zvbBzg$aL!CxBNeAit$lvf1sqe%)x=Qt zPYYqm7QQ{YmVAG@{-f$npEnvwdx`lxW730&*jKtM91UIuI~;d~{Q#`vUh{PmX8SaE zDk3_(jSUKr@BV};{Q#RJVlF5%;A#JqY4zXYQl!Wxz4Xrqm;qP!a4LoR_W89j0V|7B zyNHcEf7p)LKC!k^d@D!*5z=abEllv08t>O22YUJ9U>y{SM#%` zt*bh+P*^P}JRO4Bt7{TvcXW&1 zADIch>fgoFa65%R^2ZuHs@6KJEju~XKcF5424Qak5)Mr^^S!3w>yK4eTpngQ3V_z% zAyL)01kAWEs&8^o``Qfe>;51H!(b>Kdq54fh(#0Fv>X%~1YK%2Z=Ep^xXhHuQ z#f#?09`#yziuhR;@BBID73j`eUXUwIJ z%_tRG2|UT^<86Q}geAYvFB%csmIe`TTP^cjASoMHB-Nsn>xYk&@^93D*7DBQx&S|8_s zNK#ZyPqqHf0UJuXX)cR_9N4lOmDu$$$AY=4L{XafwBdS@uQ`iWR=0E$oah0ZT1M*; z)`ktx$NLuWi6+*wx>9_$TkULrw&w*UV}V>G$QSa--YH{!e=G*kS>Si_CE+8|*h=_B z)x-P#6t=tCL`vI%^4>~t<2Fk=suFyYh^y0FY_P<(T|+!w=`Rt02F-MOlTLRdQ7>2O zzlm7Ro{EQR%Kw)Z^BxdRGa?wZF7zF=%eRtFNo;vj8jR552me{KPBc$FV&vbZ7ohq? zsN=cWIM(EeU>o+xPQckDWA_2;-6DY)r^780m&fQyvlx>m-nWxKf8TsQ1%Rpl0x{G! zo~)JC>nveSJsYsf)8TvG(KA^@=jjL&Q+Gt_!*rrrRTZzR#S=||CUFAcvV}ni*qkYR zn7pa!Rp%QzW*}7IcE1g}M484=Y{_cuN-sCU{ckl*n^eHU^G1EL-3~UCm=iK2Q=Q9b zCFU2VeaZy~*OAz?17RE<7COfvp7fSH$Jz-Bces0bbs56&f4C1fSdth?S)`HyLLKEThE(Bq_gf9;A$b=ID28>)dU++@)5|dZtQzv9|(SE z;z>u-wawJ6FWoV(tiEu5RVWIPoh$>K16YlY>bzjcgkcM5Flhu_yo4vq{InnN&0~)l zia+Y?^iw%SL!T8~v|X1jFGCqv1=l2EOCBSs(-sn$7?qR8fSOgaAm# zF^E<1O@59E4t;(sse<<)WAOX_v+QuC%>2W7srl#b?|1``K*mQhS_K%eIZ>ZE-^tYS{foxd zIVoY;N`QP;m?A&#Pt&Pj8}Bfjtw0BKt3jNB-3$S_i_OH-ZF`_Nk064@? zw9X&cW7?vTIZ-rX4}a9ZqU+gDh3tfmkO@R`0j^6hd!(#pw#?)64wL4%y7frf-I`mv z`}RLcl-<~;MxwT3qf`HBNvd;kwQ!}>71h}W2kiZ>{VSYvv!Ks4Lw%#bzYm8$bnUz@ z=H6M;*tAnRS9c{R@!{ez61RHSN4g#`@g5l5UwQUgm4E8C*wYg~vu{KY+P0RshMReH z^;cb;xDHd8zB14lwv%qHuMvm3C8@ej>(Ys)KyB5pA@k3n2Fl?MYvSWoFa-pVyVX*f zPtm6B*VC)L9nZEYi3t3DoorQ;CSvDyY!DY zbmM3}>T4^gClzTgi*@q1{dzu+WHv>JkzK)Ln^4>dc{Y0=Fort*`SM${hGhW*8Y^3E zJ=^jsY75KM56vlVzb@DxtwL5Hr&9iHL_t``hAU*Js_Y5&dFfVpGZbgyRyYKDOAC}O z4a23qC&q@g1SA|vrT5Au5@0!#6J>wxm`V-1s}1ZO-&%`u^dlaKW zIRvFHs%$+=K&--`o6d8#*U0k?Np>ywy`tP$kU6o=8?vigR(s_;4IMQ8jg|fNfJ-W! z3PQ(vV0uC-A+X%VU!u`(cLB5VN=vPIjQ-zW--L7rZDTE}1TW{FA6WWhWKHoLD|stl zhcK#OahmEZzD}PVgj!P$hkf7rr6g!xNjH9*qb}vK8-hVviZ=X^i;kbCAcO75g5k(> zUlrk*H>pt6eq;K$n9zI3ixD!WaB}xgfBk&%6A{_ts-7nCZdI2?vU*Q^A2E$oy&!8X zpM8ib1;AwIT{i3~Rz~EuZbD;~|1Pe=b3W)5op?x+?~TDGj`*XA-4|?dKGOio%=Mcp zqr=_jU-6?f*6ez(dThU4x=&r5vwB-?oZal9g^yGx-Fq@Jsip0>2&JG3i-t&U<`$$l zx>Ho5CU14Y5bwx&qm`84)45TWD}*V6Z5@k7w&6@@&%jM_(1{z;erogysSrPp5#vR}A_r0wd*92Amsw$FiqrBZ$K( zW%9xZ8D!qNCF?ksGBfrHx%PYumF!yjNj~>b(=YSk_9J1f=bzNp9G?D*ue{Bp^X^~Z z+Z!o_`T8&PcCF&U>+;)eDpvAeN<ji4Mn;iymr> ziLq{bnC*JvyDRDo9hCxqb;^B;m}R}-=eNSE#5`McYAq#6!s#cd%9FJ891P65d?NR( z_VlkI&!19SK^T24ck~|-{zxwUg{}5TL6}#`;5Oy#UZ=9bklF|QDD|*JhUtjPxMJ9| zQW!BcQi z;6SoUcoC=eCCPnixa&O$RdcIzvFE}(cmB}BJ1y9ynfRsDH(eYEN@&52VUY_YMa14 zrYCCUCBvRSMLmJk-EeT#l@oN)q|eWrQixV)<`$!)veNF3L2UC3xjD*-u{F#Nklhee z(w&p95s&T#9SD>mEk$rM5xprYmrmR%T6nBK2G=(MU8r)VRvUZnUX-{Hzj6kH9U{(< zMOsOB+SIgqQ>Hl;E-F4)@eqtUbF|NWfT&A`DOR`HwE?w7dX0nM4F4VARJ=Q3Lsimc80|o8+g>e741_DLc z4kZbmQ&j>Kjt8ah!lZ?Zjdzez0CZztFfGt1BDXll(xPX+DOE6yL8B`dg;Z zW1JqHiO3#RKA%n@#lGPcjByw z$&4kRzqFBvBCCLESDdCLkhjU)-ZDebo8Eko$wL8SsJ&>|vtXZ;xt^6)!RNam!HZGi zIm;&g==D%~f?3+m{g)Mrj8k^xO0X$ocqq1M%9b&>#&(s6XQv~~5;BS|_}SK>RDo5{ zar|G_cRq|IG`L`Dt*lF@W!m4-cM)8DpR1Fru~U3~zk*3*5yo{Ru&er*S4|@5&uJ-2 z_^rHerg_f!^fMmY!P2Dg2h*AjcD+vwIW!5Es5dD}ZVh?T@x6vD`eV8(^7TiEuj5V}(BvX1w(kMvBAvjjLeDny-sg`I~da%2^uYCiSL>By^}y*>7tS9~0vF zFj-$EG5`S7o!(3t*p0ae_jnr-Mz((}k{NKo;ZAhE_qlXKyv6f*UNSvXQ2icBHFS({ zwy+sl7x~nL91@r3_4ufF?pZmi^&NJ{DdF=GPI#_2FpXcA?A=bS=qzIR~y4or5X6AdVs?#e@x5>$^pH6VAp#|YB7nWe1cZK2U>86Av{ zEh@;sj{bd3+Rb+&0^Mhl5<0A}C|&CB^?7+(4#FKk%m--<;{mV?Z1KyR+|i^aT(blB zebf5PUx3we7y&^sH_rX7^YMoP$TBqu#lamPv$(E}NdSO6(%rWKKl$Ul1oC1Y-TrB; z=LThnuq^n=hp7t3#)@Pv$4anCrYV?yQ$~0{otEkV+>*KsUNBPqk9q`6M*>ZU)LT3? zLw=+R27>C$VT>p}y6Z5z*v(dR>A)s4fsV>Tb~ z*z6qzk5qe(pG!9YyF}ggDGIOUiW_zP1#}n}IeJW*!|g=Kr&~#ugvx<@Y<;P*Ppo!W%4u>}40;(_L->m{A|4$Z zf-1WCf-e~Gcg&*H`$VI|2-YuG<@T2mhFq1UcmvCl^h)(d5ffQ_^_cgdQsX8l!2am} zPdwfM&#zhe<mxD16<4rYF)pncbIBO-<7Q*S{l7$c;$-=H^1vPVby-(MLo$m?h$Z zKI=XLWQ-iVQB!qwo6-|7jN*2p+ep(X#9(B;(E@l8Du*R&1Q=k0R|GNZ+y53G$MsZi ze81@d9uU&=x^&>B@*SV}lcxcxgkF2>Xb|Vgt}s!17oH!Uus_gyroS~EKO*lUP%${L?c6pc2_e)}*kFz;YtYq%}jg_8oh=9V!YwUZHY#l&wFqQlBl` z5JuL4o6~dXyBR-QZwpz6xl)aPK?+K}t#8yY?4h<@Dhd-P&G`szyw|t4wLRuy3c7SM z)yz#6JpAYi6(ddvB9nifY`{8OWo~znBC7Z~;anVP5g;0l=Q`^cftoYYr=i-0(*)0Z z#8$%k{?{D}iRmoV1H1(s(5Zdy(Lxs{!oVN*-p~XWyZ&2L6+&nyDJ-&rYl1R}$(3zO zJ)mdX%oP6wQbk0Zt;N6t1rV zi*DC}TztEq>WZ+PUtW`*Q&O%@1D~^HYsg89W^R7a;U8oPA~Uy7#z_>i5>!lb&Yt)R zUtb^bF31ADDz+UXrW+STt-r7IJ#-Y?v1l;dOoVUfJz!MnOBvG1HED-3qadO!SYx@$ zy)3nDb#^VPb4b18qhkC^BQPquU#!>s#2*nPRX72+*~q3G90|vXy2Z`OpZiIdDxX1g z>Cqgp36*>}b9;yKopT4KUwSmLCnx{&T^v3O>Nn+Tlk&&3*JU5*Ew{#D_rdYOSI+iI zAvi+mo?xKBaX9-iDosR&t))H_##>b>k8)v9$jJ9&Z4W(YQ8pycg=Wn=0rYs_;?JSL zET1_SNRE0Uum+Y)<|QzcSt3<_{Z5gl&_qO2qT(__h+v$AhqDz4yqD(q%;M6sw=U~{ zA`^wXi13~~w8irV;UBlC#0fYm$rao<^$FBUYx0KD9U!oU2nk~(p(Igm{^s05lXF9* z$e+>vKbC_K&fuRvKoer4RhmYtrfq|>u~Ov_gEV~9x50v-0wXaFcg}_9KS3I_!p}JJ zz!3)0W}0bnf~PpC4{%XAOy@Wi)jGG)07Nn1Rszgc)>DJxRklN-UhX;jr zA?Sc21<>_dUlv|H`}*toN3gzoxB^9Y?R=PI=e`gD=QKr?X3Uofzsb(+Xx~R~#U(jrCRKN}qE=_5yj5{}Y$0SM^;(24cEwAW_r7 zGZ`a1{2gjg?>b-vfeBHaF*w&u^`8LBR_Y>SJ%X_&h6w(}hm>YK-O7>Ly_hffO7Fl@ z@fjUKlk-^ zAK|I@IOZP!68y*WganS8CS<&wGH`%kblpJPdT6j0Ov)06%u7_v4q{>IvZkO%u`pn+ zeUsm6Db~|e4XOPXH@vKUH}EJRN5gcI3|4kk9Bq=htHs5uPY6ZdadmxVnA)7#PnvEM z(Wadot$kB>6m0o&8~ndsY9*5BYBR_WpSR|5qnYh1F9{#+$ow#qwIr-9TSFPNc3 zKGQibK;6?s=($zQ9T1EtPbYS`cR$%?qnsBe{%}FP5XchIO;!yj*5d3GDceOiZmfkw zD)UOyJ1|JVL}CY7hK!x|u7aa-UHt%J6JWHAv~1(XwZuqQ1`%+)i$KK{e~!G2L-_!~ z)9}bG$yq_tQb>gql)Xq%uWA~3+A)*UG!q*19HUEWgAE_!r$LmI#N@Z*>k?^zkFXNP zFV)OERew9(h5Y&>*D&1|u_2z1Lk_*A3{=8&%&Fm~xmp!TWRxT)DZb)edk>@RQe2FA zLnWa7UIgWseC5XgZvBc~mb<7Iyxkh~B| zb*>2c|eJbG(kP<`Cb9ZoM|N;(~^;?e}?1cL1akN3z<-oh0z!9uBF1rb&JLmi8cY&aOTbq=|;V|6ciHAi)clxwk70H z0fl%%%sh?`fEZP`ovUW74^5zY?|>cHv$5%j=3Qb$ve|FU>P8ZBq7T8bu86{i|3y9} zgagFaCE=qu<)Nj0rUJLVK|v6NF8LR;2EMJ(huH%NsuI85tg2VzZJrTX&4mXX7SnaK zKKUO;(&IT@D;oD;q(-PC!?emvhBQWbK(#Mix~`_i$tP)yj$`y*jMrj_=5vk+I3>m0 z_9ZbF2+XP!f`>4u&F~E8`1iODmrwD?G;zqd-g`*oA{068D3H#vsZrs-Y@P26bKmU( ztQo2OnO<4h1z}C_u5^n{ha%i$vBdrjMr4tw90j8Cq%KpH}`C? zY_?=wQOnghpNBe(7@s_eKc=4xFWh478xU4o2zUt>9mNy<30kMyV6dUfU{d&A+HBUX zBQdp9Op?8Zw&cje2ta6&i$9}cvi(0q=6m5LxS)8@G?jp`nt~xIgkcE9bLq$y>9h4U zi9jHZ=qs0(N&;cu@C3b>*J{Bguww*Lw^krrFNr&8@3zHS?}=0F6!;Ic=Yx;gng^3x4o?3*dm~@)xq%{srwbvs(+Kpx_>dlUH;IHH zc_;KT`wtcZkvk#{N1Fkn1B_8#sZA_fRjI>TtI!02Efl!RM`&l%q44<+hkC=OSe$o> z9>2U+T#_vYD*A_En4rfZ@rWAF&CxIruyAOkE7iH5?jnN2wmCc}|4~ipoU{@fy(cQw zQl@Xg#j(96cJp}(aAC<+5oLtNKM((TC2}}k9)r>-zlS9yR5iVNDm9U{#A$*vzuI=1 zS$5`b#Pd|AOv_c38B}jVcE3FtkoGYx((lpWG?7CY1Ha+1Z2Q{T^RyiazF*Pnfe$Up zcFn6W|8%G#Cw3*d#jK+miHNEn3z5d&-vb|utXWfufUbJ5 zTZB@h+Sa^UaLg<jLvPK|7;aJdmP-Z+$PyGbg{{6@T_TAJWEl;!gx5J)l1gymt^Eto=*vBeIz< z`OuuO0M+jX;(b9uOis9Tq@SkSv^O+oZM2UIC^ePH%d?vBsyqNR;t?gJ?NgV;$0 zPJlvN(s4M_MP(|$7U~Q2$F=!X*wL|G%+bG{*~W@GJljcLfK&782`Iab*jNx;qs=N? z?X&lvPrWy0#8rhCJgS|OIF@Ry9HnkFa57{?^P%&2)_k~Ri&y<|old>^De6S4k9J0V zL=gpO_BZ2(aB@FJsb+)|hN_$TNaUZu^m7Q%Ah?RWq6ELmTiJ*5Q)VB3x`vamH5x|? z8d`Ug*Q*y>{Eq(({j|wTq>x^{3`Qp1AG*9eZbMaO-hBO4`ZBr7J#_YC?+`XYpv5a| z&Slq>jv#1tK2N66S>5g+AEkXmGrAHkC*4{%!|S+H6gICfk?+>6bLp4s^@r4V{dN;# zt=J6ylHg~wcvL$kQS&z)is- znrCvpXhUGWW@Y;>gC?1B{3@~1RsE_jyA)7Nozy)>i~2k9$KahJhp#2#%c$OVwP8K^ zuN{q{n2GmhuN<{sqOSN7yQWf$ed~JeVh-GnmDtHto$)8#q(fH;2T)P6foYm4Ghq>e zuPN_S>K;%0+FEzlnJB)5&lRUIN>`)oRnch{!&Z`(6 zw#)QJn3pt*=w0(bAcHqr(fbgIvY0;kd71Wwyq9yvvL9)=<>#9ADgW=ZfW5K}E=VaT z108ci^lBQ$hEFPvJn9M)41cuQWqr`Zo=*pI?ZaAU%Z{T2Z~y7@S;A9o+uFqL+$rk# zQesV{`ogel+jkB&d`*ld245j=iV!#8R#vWc?w;%~|B#8Rso!6cd|3O2{$%00Gy4NJ zl_6JXLWNI!%`0Jf_cyit98uwCKiaxM>M!n#y7S~38J577!LIiaCx(Zdj*r2Xm1f=X zRPfv1T$bqA*YNK_%IBekrk2b^)PM2vB_UM~$JVggtoB$JF1RlF5Mdv*a1)=7vL3A@ z@Uf@WG6XMK%er1uVK0s(Uw6q~tm7H23Q}(8>gSIi#HeoGZdYa0^gZ4pq+8X;XM3yw z&0ijPsqpg`%DJIpxSJfGj>BvQJK{S~Ay0rVZx*(kr*t>0D3A7PzbB|$yvD1H&i2556>8wrTJ|v>@GlpKsG*+66pt0R z=BWAB9Fx>?Z{cIi;#_VtHvE%+++C_%ox6D`abImtWPUxVa!;2~f8SU2-1F?!16u~E z5I3bcv@0}*@)}itFwR|e?Zj*Q-ZVC*X>kjxwgvmrK26=aPlB=6i>UHNXF~*Skreii zH@1m^r%cJF{!`kt#N8~c-4Gcro9+Z zoOgyK7D86&<`YaG8DK~;CdR-0OV@L9l|v|$akLatv4UaTO+7`)W5X3%!9ypppa?!} z9Aqb5B}K4jc5Nm$d7lmQvxrkXv>&?rXV_`w`%`mS#+J zT=x}znQyV#f;zCKT+3p2Z$8!h7~DKv8Q|!GTJEB%E)81w)fIvghr@onvMTubCKLLr z;s+J1#*74Ppp|jXBbbjR`)%%!=+=<3_3gRibYJ}#Np*AEc*J_|EVj>js%cZ0T7}$H zn=$3{IGX&ICqGj8$2F)Ik?Ennyor-McBUfUHme(;@a9t&+)Li1bw`z)AbNi zR)2btzg`=OLYjYPs3?)HIaMg^8hCBP*00|(<3;q{^vmAxS=8yRcXS6!lcuPJM5!`y zTL!*V;Z%v^_vPrC90BiV?uW3S>^+OKHNG9HLwrHGUQ9c;ax!fP zTg`TDALb+^QGa&(Sf!J1I`|44Swpvq4-L%ek=XGKSLRA$A_h z-pU@FYPjAd?`+KmpTJMLf^5x9GELGHE$AGdsPJ*%p9<3q7iyyiY8Ea5-_(Vd1L4~vP0-&forzY6CsvtnYi699Ds7;PJAq$ef|p>BW0Wg_kSESo(g6oFR{2_ z4Cz6~F=s}++f+i_cc|}~rfsBX~?`;=TQy4G! zg6iBfeUzMZ(*OHl{RKy_J3aY}C5s}ypU;A%Im`_NcAN28*stn4&lf?SgtpbjRqN8k zD@dFZ|K!|{ajfhJiFiRy$<}q;c?+4$Cp+A0a-O0nj!#&Vy*PF)@jiAga~+p+#aYnn zIV~D@*Md`Yms4ceoXIbKH2#b@;n{Ih3pv6BINDbOxjjlev^U?f*bSL;H?8PwiO5_B zZo0510D~NkqHhXORgb4Jv^GD39Tc@#?G~xeYGL&1OU4o1(y0H|ePHvuwuX{_5oR|U z<^2iq$o77@UE|tEQH90#Tb5o4-#aGwLzw_p{r~olwRCKxbDdI0-T$1NMd!<P8zcPLQ+z__eQY-ud~fB3}J2CHeUO z5+iceq@NtYqdvMZW} zXPn)$78=_>zRZA6ZDt&gTgCJ-C)tI_q`u~9@Y4(SK`+nm<&PPGd!wXGAE2-($yk$a z<$?MM7kwSh4y}eL4T}G%aJITi8-tYj#??FZjegc=##hDL-KEZ5%hqCst0RW;UxhGF zWVC#oMN0@bYcm%t+10xt0>;au;J+G8b&;lhS&Imw|K+~s9e7t5AW%dV{2-S@NYhyz zX-5TLq1Wc=h4j7HXvnnJ&LmTBVoX)atVNi5VpZ1uuPllqwYGLy%Ws8^^uw7K-@n=c zRXtNdd#%rvzc}%u^wiZQVkOxvc`dCd*=mK;EUNntq=$QJtW^i*V|NsCmy@zbJ22Z; zz3#nn;~Pxndb=$Hm}755q0 z1j==GaTUsSqEOz?zW;a%bd;W@;Jxqqj5afbTw{4vO6S+8+7wF_B?_WS zI#-R;w`83NSrdm0vL?3NNrnthzlxBs?x&4A=Yu2f?OA)BXaJ0QW?naS5yel)FR&_C z4d10oU}LhjNflhX{O6YCItQ5{aZScvvbr4^wL8b--pM+1m#HNV7wJd+UMgzRKj0r% zS%`Ol(*f+pplE}0P;K&*6*>mXU&BF$xK8B`YDsR<7zzJ_Wf2m5)cUxNI5}o=cl%2e zv(*DAp${N5-PmKmvB8%w#edYw(I>S@?ynP$$%SB8GZ%X~!nQ6>b4pT?UAW?iRZ>1y zcfr|s1|m^+zx#i85v&~&>oMjr29;%aT>jNbd|kQrZJ+GANx_lCn0BsY%vNg+e!F?J zdfLDiFp5G&pMLH}nkRIP35-eX?_Be4y)<=FX_RX5Fi{sBYo-mnm&(~Ddyt%JbLoKG79Sk^KaM+(Y7#q73m+40jKlqwsgaa~v z_xUcg8jDK;gnwMQl-vdl@OmJ7$uEK`yr=%rchmXa!JRWPoHxcB>5IJKc8G)!C>fwz zycLYSB*$FV0A&?XNLPVyk2-hWM5P=X9t4Bkg;Wo#>U6v-!_i7Ka~HvRYfbb0kLwCp z#I@FP#n|8pGL}0QAIaLNeXheGlKSn2U6}RU(QQOk_Xme%_!L(z)F(D;IIgO}YXPnI z+1e$fY{p=~pEXpbBLFeoShjyYE{!IYBOEt(q>9ehF^Iak%L*E;O_8xnxkC@dEg}id z0=81y_gW#DdO zl`oLE#Jv&l8CIrFV%?mj=WS&tknynRv?ZEi6hcqkc9FMJCVkC@}8|4kuQ8}QMPWzWOVfgt|{QvR$o>eVZT$ehUlenk}Br#7}XYF-j$M93d4Iew~i4omAeH zv>cIXX~H&`E~Zkah~Y})rpCh@b97y1d6i-IXep+^F%PnWJg?9^iNA346A6$dphKa5 z=yVZIbgxK^*Z|?;^fG4YD9Xw^uj!U_9;@gJR7iR{x3OE*cCjiWc9rKL&erMID$RLH z@#ss8MYnX@brraAhKudK@at|Ox~7twCs3Hk+|ABt->=e?&ac?y^sDZjbT(RxCb;VE zp{R>MH}(Ul5!ov^U5b6OUrl_9LS?U`jfiwqGtyCa6w_3TJ>JCx27({vM1S|aq8UO@ zj?<2;Z=F`7aA4jJ9tPC@eQG$oZI(VCGXs2kJJSemTw_mv+kyWf?PohciN1%GsxoTZkTb&^Id60L`BI4;)Hd{Z5jH( z-ZEYmCW?JC4SK{t9)dkw*U==+kIiBz4|;Np2-KyJ$;qn%wBoM@Z~7FtrV_J$LmVH{ z)aa5Jsr@y`kK{YGB;1|q{)(kDVgyriJ2axGrr=&a$*>W80piXY>r?yiGQCoq`ks^B z`z})bdyS---l&2F02jOMi9Ryk+Ynbf&}Fa`)uFnZ*6~x6I+GrXQ$O(hZ_ZC+#`RRR z`Ezncby?CvM(0ZS5&1wJGH!<)eYk4ylLlHpJ~>_e z^qf3^+dBtmyKvQ|Z7`MB>ZBq!CK9pzH%_Z?G>Hnk zQy+PaRa&%vhunE$10PZ~87%JzHz&Mo>#aM$1*>AJc;`RQ(!Y>f1D5NC$3O zq}lm&f8fyWDcEn!*J?=dG52KA#)R$rA|d14H$X{l^IMYm!*(&Sb91POG_G(EL!Y9) zshfP9VZ8u&Wb^4|U1=O_FxY+iQz+f2%P8t>m)Z1c(cQ5Bby|<^50_z&;Sf<QV$yg|#VQdXdE*T;0q_`clC2i>ExgyUm=P1Wococ9m|Cn@S2;&yU?gqKpv0GH-6wSCIx;<%I5 zz~TwiGyk+o(BbPRpMrY~0sU|Sb*J31dGw&rA-n!yS$sN)T^xS7V~Z@!&CEVTwF-FQ zUe%o~oR>!i`i%nX6J&p_k)?-)u(LjiH7ws3W!OVdY>F3mNfw4S zYXx-SpNoF+8-!_J(dXf70EJ9fveAw#G91*;L zGlrTT@^1v#GE+YX%bt?{vG{zaF>HI`@2A zdjf~$@bT`^I2NV2Uj+-wiFlQoUj#VR|CTxG8M%yg9b`V%V zkF4|mXedQnL(QOX`sDo!O1*JsOp_@cU9R88w+j+er!N0#wIuM`!M=$`T!oU&vzrJZ zGKBl&!y_D5K-0p<;{fR*GWnpt%ju=7OC)mgnH0vPn?=ed44&-+fYr^EE@RtAoTb>& z+mVHGXiclg9UlJRB-3g$ps-CkqEGHVaUAmMk8MW{6X8??i&$Sf}Rs)Bn*q^EwbhL2NlZ%KJ3&PtP} zy`8Wr-3TA+*}c;s2b|4ph!i!CW1gPf#J7$zYqs7SDL|bIX84RUi{ar=T+bvgWFL2l zHtZp>i}Fbfv;RgtEK1>MBC?_uY+#lcMUAAoJ4q>5y0QPg7Jwgy4N1_yS(tg4T z-2kMT7fa{D0DC%a=iy6^4))M4eA6S#x@Ox02YG_Zs+n>FW-Nmg{%c_PH5lPk;co`pE(T=47fQIcBX`1WjC3hTB7 zJ=hjSq$h(bJwcdN;5Ue5*1tG327RET&Fdy_8rG2VY$Um&(2_AY=$s&&*>7Q#aQ8MW zQcd$F5Vm{U=iE3R3m^Wl8jt9dMKYXJ_YLF8N6;SL@)~+siKao*ykzJyqXb~Yecn1x zs(fCX&mck^q*bPz=Au*5( z76N(;AhR*zp9X(QH?IDFWPNuamERx#gF+E0aVw)#q>M|2tZqXkp^}}QP%=U`w{J-y znuv@~p@Gao)-9_t(;%D5B?_5E{NCqz9@X#nJAYJe?s?96&)0jL^AK`8$Uu&o7-f#tiKek|2&U) zy@pTJT?iTMxyA&(`9mxRNKHWo=xO&;=**;GmMSf* z#jT{f!zNBivRdxXestChx5OZH;q$hb&gT_!8%h7Koy2bHL8vYvhu}y`KjXxc$tDPm z&}d*G5d{1@Y%gNNUdwp>C(I)Tt!8Flx1MdAWWoz`M zLOFKBgBSO;2si4bM0jS?b<7Fhb$Ss%I9k+l))I^D9XrDC2d@bsdJ z%Y-^8+(nYiHUTXX=oOv~qJ&um?E2M1%1sk;2mM{wY@yc|SkvS%lmKs2DMDMp`b0QD z4~P*~jXX^A5UkCNu#%Qa05mMCd8`GruUUjbdz<+8*r@*5m2T^ajXZ-U{Xe(WW0i1# zSzij$D;NV)tEfS;_4_W7S6S1-!9TQtg%O!7Rt(4pEh8ax`ir(yLd&$o{PXd@X@WP! z07}q%N$H%Q!RQ36WFOp}U|uN{!ivpGwoBOjBu$x5*Umrk_34`axGHXasZ<0|XmbRR zpe*Ix$eL+mC>NUYGD3p|t3UGGu_n7%7d8vOB%1cu1}h_2O1U}>!eI@*YL?I(sw!wr z=57hOYUtP0U|}>rnJlyQg$5{moE zv%#4mpC8a-ffb%@Kv4-x(n}6?+NgvB@D%%Z=x;*|<(=?IF4%$G@%%2DrsFt|a!qFEVcpWD+k6+n{^~L4~n(4%Ijp=|bz(?GZ`BFP$ z0G1K0bPvLfSxL0D-^7a@EvG=T6L+gCXsAbQ=U<5VFP%&qi~p)ubB>?|!ytVR zmMO;sKEP;RW%mx0Tp|GwFDicgB5W{Y_?DcmHOwVr9jSdydkOhthUO%yGK-c6F0bsnD!d+_C)44DBL8`NqGZe%g7j^6A*%mMvldob+0=NjM( zeD&L_N6P;7_0yS))>ev&_zG^u4_9X`(^a!p~;Lo;kx zgDvul}{@cM%7qE-C{Cd>DNXLU4fa}Ku=82LfwMJZXxqhkKuEA`M^^z_&& zRF(+%IZG-XLi@UVkw@U5s)7)( z(VA2%13Vy;gi%N=xUMmv&lhm3PY=*HG};E}E9q*nJxtT0eeAK0kck@44HmqE0r3}MUa*uI(If;(?ZLQ0BH6@L95${ zke61toJ2m71Ds%k93HxR5za2`puzi5BM~4`giv`KA;%n8#;xz7av*!}It+>3*{Pog z9WDZ#2&2mvq#i`|J3Iq9-irk6WGF#gN8>dsPm2*N1!a(3F?nUwC{oUlP-}YNA$9oW zv05_rdI|?TQ+Jlc)F8NSV*Ba$8Sm z^_0w)<@nOzkD2{QW|X_urhW}V3F!HWnvO2O#+o&1AYear7#sF<4a`3{c{$o*UUu#c zgZxph;QR+7*WeyB3Z`mJB0OJFFRqhSJl0OJ=mqwnZ1j9lp~|F5WpI52U5DL3ui34o zVWNn!J6HMfHKDEPOHE#fwL~;}U&do%7t&}wOndb9oZJ>ojEdPIf8@M}g=+39njdbv z!fh?#nZ4xTgjMEh9)X3;X<$Fl7(9r!IAPxr9}9xPeUmWfu38&>z-U-vJl0@>;$M#m zg|Sk_T-$eq8<$tNP6Xgz#fsVx(4dPb3*g*iIN=!wU8n%g)_@jbWdw$_EC+V#`cb(V znMrISrKov*J3j{_GkL&YsyccLDP9Ah08$GrX^~c{bfkFdyLyWROmAHtc1nk*&km+WI^Fp|-2y?cnS4$I(k_j<|Yj9mxUQ zFr6)!t9jIta}Mk!D`79Ab48cduV-P<0UN*(m=$g^JoMT9P2Z5|ZZ@ySNlvd-5@4aO zfdm*M-%A9S5zD;nNAXp7Ja@MI<0Y7H88P3~CjUb-P_$+LzOlCnImoIWtnYEYj%el0 z{%68&V#I2i^Kq$FJB#o$FgFf`y9}N-J0hM6JC6$pHVc@NKpQqLe50m%pWhyBT+c&y z(rjWH8T?Vq+EXFNK7u$mcy`lt$`q}8#U>+$hhnR7Cu zo`Z0a^z_3DEZnt67Pz+oH_2(nAkRqRAZecve>;SOD7=KTnt}U&+-X&iz@pr~7Y(pd zP>$JyH5cd4#-|LpfaNidB1rd5S&)Ug{@Z9ogKdZii^9+zLhpR>#M`tssF)sMH-c(0zl9OW z0@NGyb-X;d3JYu1sBs26WX)5-lIh+5Yt@!3QQ*lg6BB;VO*kLFpOvyScnd4TD=rlO z)Kd12HmH}vA9j#)mPn{Qm;&DJ{T>yqzsVckv!1!#Rl8WHsVSOQZl02HDzITHfEW6t zB0k$79xXnl8E=7*AQ3avil2Ft2;=!f=YZy7lr8Rs4(1_uY`R zu(^s8LehS!J_S#ltsbLH*AyUA=||FT9igkk!N7~7vq=YeyF7ZHd5h#kt41+}H3^bW zd_T5;!AJwu{d6l{_c}f&!<>L25Y&2Q%%PYPqYpvmvYp?1Q{yxwv!~uR$rr~OIrjt< zEM4UM=6jBg%51{zfU2lj=aKNv(NUumIXlunI6*3$8o68(W6J@d3TyGMLA^yV3U8I1 z>g50V>GMyODM$yIp3Ks@)jz`ikd`q}VI4fH zI3nQ86}mq`|O*Vpkb(Dnq!oRB97;S@xQUfgki4 z6*cGC`tY1!Vb}1+6SLS@WWCI}oe(2Hml35Hf-PPk3ye%(V|`g0X!%0d3EzaK-o2!S zpMztk9#VRm7$Y_L%tP?j8P<<0SjKLX0@$a9l$~+aj8qF}{siEkfakaE!SL}V!XtKR zKw=%wYZkpaT=NaYC3^QY%>Rvfp26}j-uUwyOd&X{IwQ8olCIjsiTQbeVTM63zD-~? zLmaCSv0~RYfNFHlpZ5;Rir?zjzSYlScI&aOoZ@Nkj?=!s@$X^3297L%Gq`(bqJY(| zvNArv{Xzkoa9>7mhmr-JcZa;Q&)6}**H_7jEB>srbtm|=&R8*7n{W+Q_npr~lCRgl z8r^wARDzT=%Z-mg(y%bb74W<;CQcv~925nE+C&=EQ-~-EUpMvUAtMjAZ({A^9rqu; z+hP_WUcVuM;|O8?toA3cm7|>}3W!0&sO!q1b3FeaQ)G`K&D&3E#3o>tHL7knwrSL4 z;5-rTVIAD%Tde6jBYT5+L>FI^r5CZr@|`u1WJiNorFj<|vV8)d(DxAH;*3DJW9{LZ z(+4-bjja$0UC^9#v~lGY?c}&`(lm+Px zB)6W5CupF!Wtqxj9dP+4oNV$AJ1>3C^YXw;?}IDhe!QPlZ<%j|)nQNjedmoGE8{8z zQP@oF;Ud9jq;7Q})6*p(03lO2_@wjI>HrxVLyu{ zf5`V9ThEv*AMf63CedTtlGj;zXFoA(WwPXtPexFy;5LSWf+U=DbLAhp{CJ*a-)?V3 zu&_&o03=w+xCQRehk4#cS4XoQhFfyXl0l8-KFDiN2eDcv_4v3@sy$Q=5EhJ%C$H8i z#HsGEScI0#_!~s$o`EYN zp6j$x*WvHOWelr#+JkYo6>~yf2fumXk0L+`QWH$q%oR5-kla8b0Acth^WIa-<;2=u z_}p6ge=4syS#c+1!9~g0k_fmHB6Vs+ujbZKv^Y2s2a!Pd7aGjLc-fw0mtKy4oGG~Y z2=MCzS*yYViC~)U3D#D)`pSB^8l%xF`r)Agtxe4#uAJQ5v>9R1r-AkT=NdXeUbbH+ z+`_PMJd>r8VWm73x3H@-i76@Ei2p~XC~^=}7we%fgg!*dT%oPL8J<4xOFEi4F4voP zv}lD}Tu6R7>+c2uW^!q(_so><^rW8{ab%%uAK37ODODCq@Dt8q6)pzXvGVC8lzo6P z2zQ{6xMuiWydgYnn3n)=%{N!`VKip)d3k4>0NtQti^mfS8~@Xm`|`O!N%nWUIVmJw z|2^cSUjtu(dHvTo3i1Gh1T?#Vmv)3AXY!$f4^rf@y_-HCdB7q3?9AqCeL z<6jzHrF|`->Wugz%k>7pb@RS8Cy0w}=qsDZx0U~ez?<730d z>NHgHw66umtBUe(sej}ny{b9>X+Fqxkr{6iD?WG8+oKB@myo+JdI2ZR-ljb&COX}k z)8%{5gMHXLpq}~_b^Oppe@5VTuK8x#qo^#DSMd230d^crq(ylf#7zqo)~#Sx9IHyo ze%;wxnwL7C*6oS|GDmzlxQt<>bTnAR}!QHe*zCt9xNwXFN-mdR2uvAs{AdTK3oFF#5WsYbUioGdv?BGHbJTqWCW&b>sQarscM4 zG3wEN>~`we#&dTjZ_KUuDqT@Vspxz@^0|OIdT- zA-&k&d0bSY?k;q1;P#3hcn^EYLjqL1)u4-ZP90p?E8~ONg zH^`0_xrfc*2xG-cs{ zg}Q4qUxQ*l?Lo@Rg)9xyA!GP|oxOht%}8mTg+{C{3jz+nISjc{u;+Wf+5c!dQXKrs z?V)*$t+LR;J?_^U39=;e!tGr;@jGs?QDUJtj=tnDq*GyJvh{ab!op&6J5@L^%LWmf zIYoq;hRM*XIVSCqpa`TgXM2<4EE;*U4888mJUQwtDK@b-Krlk0-q!p6@?-*X(0Bn7 zP{0_`ul?|2!7?@by|ZkK>Yt4XtwWDu7~ERUYyj@scEY2F#Ypvk>QG}_b8aJ;9iB$gP;pXw3t1t#v(r)$o zmY+6r4FP=Ca^5mcTFE|KawDroRBj&lQ6gQVa*ePX4RD|p?8b&gmPy~i0j7)dELhl= zJ;wd%zE}Ntps~Jg1@0L#SM&?Tphhc7T7Cb_PGtRLiGmqDUCTnmIb_DCrvWa9Io&AI z146LI-b6&aKG`Q#(==1iH~h&1d)_d$-zWHc2{*GFPzpt^9_ZCN@t?oIx0dI4f?_VA zxNy7EYSX5LVZJBpAFjOEBNQaR<*Lqtv+RwR8OnS{D*rRsXQ{;cmT&8d=P8SWT=jN` z$*g!@eCoDkY^1d5jx|9E%!j{x{`Rv82Qp*LWLNbszaBWEFFx^E`w-68cf*1IDJ4hHnNVw}xD|N=Aq?p%Y zBG47uPGlw zxFLv?#pmVnG0OF`!aO^m9?^H{bXG@eJFmh*-@I2kS>`{|9c4{g7BF3RMP_l!3VhLy z{TSI>@t*f7zD-Q^h~9^;-z(_OL+xysP~-ZKGg9T=`RauSs~;^TkWOg1ua!C&21}~? z#|RK2FEU3gWKwi)M70J=Z1ivyr{2$YrOo``+Bc|F_`FK7ADSK66uF3Z!_8rDXRYC< zI)P`>)}XFW;G2d=eB8w^#n8c8F0KN70ilyC0>U(Wtn^cGV>0YFdOU}>b`NB0&L6r2 zU-wLwAn{D0_@axi%dW7O=euXWts3dNNng*%LTrE}OkfT#}>a;k*IrNUg0|;wye<^~d(k^2% zfH0{mn?YT4#F#2ju9tc7!(bUUX9QtJ-vgtR$+bH9k<l1p%z6UxoHL-@xN6*&RkWRe^lnZ zx}5x-GosLF8NN*Im(D9LhSHsiMNJ-gR#!B9G}<^#q=xSeD{t3U2=dN}yRG^|G@{gV zEdn<5j?K%^#QqbK(wiqW;OX^5R+HZ3zM>bG4)P(E@Qi!M^>U%J&p<4(-um2;skx#x zQ2(4wE1y#^*wr4>S)^jHZ8W{EqQ=B*u?}w z;22LuzYDeN0eCEdp$o7ay^9_vhZx zw$Yb2&aa!2s$5@pBV`W@T{LYEgLX_}u^Y=&N9>6X#iol{N1Y4ea?=FFdX~O$b6ypx zl*R9CddRaS%}h0rtg$WyJNQ*~RDBf`=-PttioBO3Nu_7Kdn;32rocs!BkXXT-F+*l z0n<59B`B=?>CvII0`suyr$)~2?+B*%)LUN)scM$23bh_-JN(1{u=z0wwV8L~-EJz2 zGlHJ%L(*%xGz1wrT0%{+A*gIFkuJvKu13BGsbh8z8~uHpCMZ(QuY&bhfCE%&Ez6%) z_`C`3HIbKTvg%2>(E7jPjC+>O`7uLlg}(Xqxih_D{5DsI4p0L2O7vz8*PuoBmFA{} zDAzxJ(TxkU3rvG0k{wWe*GH*=sF;Tjde(zc#~4Qi-#iuyBO@J1S$_TAayhKw`nNf~ zj*h|)Vb!PIc%DdqY>n4DU7s2WbkoS6JT=?TR_a&EEvs1~`@HqHe~H~Nm7rePvQQ&+ zXy{5Def%~Xb?jpHWO`S5oph>3oL$0;Ze&7;L3yTJ-*e*UX!(gLtaQbY+)Y-t|I#?= zySF=u`%E)D`X^jW^Yfk@2#}KNkpATRGBot(i-dl=FK0^SwMAqe`G&l;o7ocLYS9U` z6By@@tVhu1*O?Bm2Mwf4bFbBi_Pl)MIr&74l9@tt=Igz(6$=>H3t#hnVwx|gRw(3j zKbn^Z@AH7_;Qs29`pl0}E>q|TL7azRUF%|mCr3#XQ}JkL z{(CoK6klenpXWMbDfGD@)$&q@s~eY+tkVOV$MA+7Q+bFD^rhvd$&{V>(~64yI(SZm zQJrM_2r(x^R#9P#{q{gYw86?iUZ z<^?KY0oUA-bTaMW*~8=}-+~Lj_(d0XPmBgdk9cj=+a>#ipM}jGQYQ2d1iYstf}UH! zp;C-y#}76Jn;;d--{p4xL!4RgAzkF*sFYKeNN56oA4?I0DBKN@_{Y+3zwS6tauw_$z*G|_k76Eaev)a4G4kwpDc3}=$?`w`*q zph5syqoSJw6Zs-{{QYtiI9HL#dI%`#23se2Uo?2ZDOdl(uh(jf&8pcA9(JwZ%OBB@qFbTVDCG4hylnYPTjYKSJiX(hKOXYXZ^-FZkJ zQ&H&}0-7R3P@(esClYpmtQM1X!3^-Yyw!p*IWaPM!MCrE6}u0zDTEhYyCjLnaa*+6 z_3A)y>}v=rjw$gIgy4Se-NU|b{_7@3nIE^-hzZT|oe<-rm!Sw_A$y&h5SD0mL%BX2 z-!k>spLI{W4a@=~x~`Ht@~vGG&iq5t#3cx4*sesq92q@G*%(_$J@JZyO*N;YEn&e# z?V7$5H+TPVq~NP+Y>@GVvK=oDu4f%S87amXg|28jva7%aR`KGD^dg`|K9?SN8%!xF z@G{y6onA=1ko&h%e zehT=*^R;vj{4fH&|qGC+zeV|>&fvhH?Jy5hY7gU58|*$&|1X~wqm{S>fG-(KAc}D8 z64I$2Mt?0C!=R0(cCf9jdx&bcjHNFvnRD0=-fX?_@1_iJM@8HG{RQ4(S44JRbA$SX zX3m)Yu_>QE++>tv69XoJdFr-dh~en&)^GD(SY7WCgygY>bO}7=|*BOGT(bEEOEO9g{i(Rt4F%F^Zo&~ zl&D?sTXpy0yR&52*p+O%LUCW=9}mtLe&?;grFWsMw5&+?CX;3C77`2M!s zs>)=+Ccw%70T`E(kQGS>Uc?VV?hZ~*Oh*CL!0Lp0mD$AE1zs$hY$PZ}uk(6aV+cX^ z7x5bTnoTugBAW#;N?wZnxM~exI`&g*S@(WDF$t&W=>`W;po3ABHKKW~VY9gd5JAB| zFms|w5mIsTH4fG}q=)XyZnrMZSRDfti*gJZ7kHI&%=uR2`&~z|vK1v0x@A9PYBZv4 z;Cy@hsBh%~2i3pI7bk`g{qVa{2_T6fnVUZhZrltsR%s@Jgqn$l;#N8_KOaHJ)&?O> z@wf6b#mV{DuhfFPb&<^CTkMPnN)dK7^}hl7V!34R;7g$FqtQVtZMK@_64c%1|4}2ojQ0lsm^j2Y@ZicnJr@+(VgTB+} zZO!j9#jPj}v|mAEp@r6jV-lB(SNFgVDJf>{#yU1iUI8a1D;c;rAr>&?fO@(x`UuG2 zVWx4B?ih+RN{?QG5Bo@r9A+%_#&m;%6(A1ut8&|&dXG<@*M8pNk*${|81d=iCNVa9 z-6@~dLru-$z6x0LnU30MNe>P(!$0Qxdg*V#kF-11Ohl24 zpno=nOv^Wpm@~zN$MqiNo!t9egt0CtpKrjl(a~0&*#a}fv21>LuOW( zy0?4alhjB4-U@H%kqP)Den$oGNzZk}`wi$Lmxp#Vc~3|R#Y3BH7e`*Vp93mRBx;ei z#s_j}Ae}MIxlo}-!z=!D0iH72;G1V{ua>#HL?lkFmId$tk^w-u*}VuR_9AEkIg4H@lDyOH(35c%JCtYk41(BC3)gV zhB(~c6gL*V?Hv%pRHH`Cn~!m^VhxWGnsYGT-URXjttnn}5K87fKyT$n}7^OLHc-%hH@%4|V zskOR12y=pMZOo1VI@&4u$&!Rh z+}*Kt;8lGH0k8~;L zmNbd$EQxHrV)R-MNcdwD)`Wx|Cel}q^{BrkMQIu1}6TyfMm}-mb7E@=@Usom< zi+#U7#5MU{a9AVMH?R~mc*B{>^+ir8DE&I^CbbsxIkA>?s?OtwzxQ^?vMU0v*%&DR zZTsM;av#4y?HcgX^dbvTAotsZ2zt>>445AAwZvlZ2M)ep|1(RfCxE3hczUj(SgfQ^ z{3HMHHykc&pWglE z{Gl?DrBjBp-<$IIr~)M*2uR2eKM51VN{+ zViJ z+X>i=1;|RLLg+`_K!Fp>yrxpsMHitlh~R>VXWj+y!FX8%i5*ll=u;+T$?N_<2nLdw zXYWgKi@0xwi135*}p`ZHCa_>Rfhw9>nf6;&bi~kjD?}GT3bc68ZuEUG@7_um`o%f=Cw<7iV z@T3phSyYEj)Fn2-*n&XkrUKN`G{5B6#}*7{LORz9^~li%#69AL@w5Rx?5hf?d5CAkpeJnFgGFTHtR(;$4L6t$y(n)QhH}yTpvW!s8Fmk);;5oM4Y~eb z4kpZx4=H^QY)uSK!SPAi6;^eysj(S91Yl4v?D|*U2!_K2JG&Q*F+8I6(f9FuJ*l*3H-kv$?p9j_h=E(A2}skEfMX0Yo(D;enny zk&D25A=U*yHj3Ao=4T6QFf%bYFxJ(NIu4pcQLhrTx?E1B20{ST_aBiT8de4qaiF9< zbZOJQ1R>`*)gB9P!4vSNywuOF{>gJfRY$xc;7UrupPp>58AtS1yn|M86g6QF9|^|L2M94Gwy^-fMvk?-Mb6%5CUA;I?m9kUdn zf+CyOty-z(gGry9SkewoT45mRQ{dpY;uL33^@xnQg#A##bro>+C|!V=5!Gpo=abmS zB9@80_+ zM-KSgt9Kfu-M31_sEEzwdcgHz-7?%*RCOuY$F8B{6{(mHV`>u=0}a5i<sb1{^{qk z53}ZeLUn<3vJ9^aNL8n(a|!0&d0NTY_!3K(FAC8i;@6Lku&P_O&_`CzL4HQ)j9Haq z*`1Ju^QX43j5X4!)tP?R@%`hgXM>>2Bnv#nzxJBr81hghtPPO8t9@~-;(8tg>&q*= zr?aKsO>aJ4yx48!iA@4n3+YG9>ouZRqX3H-)HQhy#*(xDJL@IQf(=$3#2+?ua5NzA zwSgBw1UU$pTbD3!9aDLj0W0n7d zrr2R2(&4VyrYKg#<@WbBD~uh^zT593u>~n1GrRmcTg|BuT?5dsgqrnR>c~K6r&UMr z?fuDa;bnfDqk*4nR&eoZurl;W@i2^xd@m2K0(S?9AhZOt`zWcFJB{b_2Z*Fe*IF?1 z%NK5g^S^BuMsEgD=vnr!M|+xzOJCg4Oj7Kz8Ta%W>>ZKg=fEOJJENRfAlV)GC*ck6 zbXy@t|A2Zb8c5VLQ-{DkQvwV2%upTLBUU`Fpnh>A>u3rGV~AW{IMiY}s}uf08@Ngw z<0p9xJjgmaqflDBVPtsDBT!4K>cRk@& zVEPBrYDPh}ZQ0O!7U-DVz-kE=NXr907DMYE2{hGUrG2gAAB55OK(;pJ9Aj1r$;Y$5eC0#9r0BYs zbm|idT|ISlQ2t{Zhqu_0fKR9W9$#RFsTr~CCF5rIV>t5I_ZOhKAA*f%oLHBJ$_Kgl z(K@)y6|%VA=prYqh}E`)<_xfbYkBpk`i0y3FYF7czqR9qn;qW?pn>ZYk;O}#w!UWW zx11_e67`q$!mh230-S>uZTou>m^E1uXh1p`k^jI!LBB`v8NeS0xM`vgzihvI>VeU- zl9t={S)&8h!{dIx%V)yD6D%>kU%P6@)89}26I>o&KG@?X`T71)0n08D5$Pt`db}yR z?IBL)0f73-iA!08YQ$W5B{5nn;0qXC*KjZGGBwIeH-89u?MUxvMYfqSr|a699YC(P z1v=Fy3nJzY={?J9{@KuTuE^%Ld5z!GEmdreA|QNK-?CmN(U4sci&OivYKJDY|_H4l4&$`hiQHqD{&l~P~I!Rhh8LyA`y2aQWe7u?XHz7_U3p?bSnKyzl)`_VIoBkNZUw)G({r5G(v+*PhL zmo9{!FGDzH(%ZCFj}EkDGi09=q;e=#!;1)I<$f+|=Sg5wwwQn3fpXK0KDETNXGg zRLQ(qd#HT7XY6Z?g~CEmo_fgJxcy?Syxu>Sp>Q!DP2wpC@Hu= zjv5yk_%E`Nr{}G%?zYx!tAD8EoSrroSmB{PXYj!D))Jd5Zp%6W(8cMIBX&3`om(#M@ZyM12r!P1}pJQ&GVh`*l(=AucFuiR=#Uu5aeS zBU+p43isDr{`l?NVe}wx@h7Pn<}}rbefa5)i`xq>yS?RGKq0%7s4o3g`KV!^9El4J z9S+kHSL5V6Ns(lzF*9Sb%+;wQrC>3GL5lS? zba4$iXl36Ut?4w+JLfTMJZ7*rXA6#hGVebDuM%Un*`M{7y<$RoHq%Z}Le0J&E307A|58X#HeNXwp7U%EA7j(s*6{##>ivR*xvQOT|77Zb*ZZ?`DlGc<_39I zqu^-iCaBj3Fi<>7D3w@XHc^YmN{;{D+z|T3d+v7B`(57V)7HIeGH*lO#wWLWvQRil zw#$1Pr^{>j%r75%H}G6+c>}?Qza;u9ms7%p9ZHz;>)6XpOMq>K4i?Zd44bN{9ru3} zrQdt^v)DDEdaMLAd;Wwub=N(%42F|yuEkRihi?yAn+=qB#vJ;D0?$cfs!BD zEf;Iy^do&0zZmvQ>{fX3z#G$FLKlA&&33NaanW^Ugg(Vk&F9n*K(YCHBPCD2*GlUT z5yIV82r4XP8{4fU>Ri@~4r)9hTCo`aeE^*c|NK)mQPIaB(Q|pFZucthZsqMl4-Y!8 zFrU3MZYO;dsQG8PQooz?YW~4|F8Nw~v>`{>`k~lYPUxS=7b?ZH zbOBf*0TymdJ(C3Nden_@-|(@x&WXZS3l199vb zQ)2mtXG+j_(xCOPvIPGqRE9I^3#dkFk`2C#KSdR#)xB-G9kyR7WztHjta`HF$Wv0F z|L4rYzLkW0xrmi)YV^JO*SpEO+=zN_z}>LJb?3EAJRNkJ^sP(5%AE)(WW#u%wWqJq zN!`@8U9itASZ>Lr!Rmd-$0qAX+?@F;wX2-J?2ehn#x4+sPiZaOwxRm3DIR76C2zuw zV=Z!{(2ZaP$J#7)4IRvs=sHQuvSfuzuUM2l8EzXNcE4cbFIp)N$6`lJf8O2Cn@C2N zjLWa8zbZIJ-u~Obft7q)E1SBDyWtd06A}wmH6Q5tcIbeR?K*mNuJk709F?{J?|2yxfJ8rG8ZzWa}`4KLyKHeO*(-*=8`(D-ycl0xO%eQrcXK4B0dd|FP;`g%eu`Bqmh(1VO{cFzb(8>05Se=vI!{`e zSNrn-(HBM+PbDyge?F$}>roq*A2@OPT~oghR51ulh5a+>jP2=jB9w5Kk`B9iE@(D% z!7KjIfw9+Qxbv4ZbgFLbV=>8#g*Mz?GmgR%E}Lr~3NJ*p8CDg@YrEY)J=9phN~izogWu{}{heQv<#cPUU3KQq^z&w&7qYmUiudh1 z=byBFIrFrjhEHj-8#`Tq&|+*h_^%|;Z#;c^Xd(A?5Nx{7K>!T5@dTbyBUI?=LyKsl zNu7*Bugc*~vYQwB2uan&Q3YIOJTj-Xl{{0zS!urHI_LhcDftst=FoV#51AXZ*0{kT zQ4eESF@Q>&&$b?-3X0(PW_u<5bT^h`xl#gs+9n-#QVph;<72*OeL8lsaKH0)4?Xoo zF4^;QNBq0ooVx0-hqSt%)tTOJe>kn^J6sqZGG-p^ z_Oe)wMph}YIdsJwIT)`l)nER(Zy|Zf_&)qr)O&G9xNa5mtJva0I|U?>cT|5` zWV+C2t9ODh)1XzuN3^kE0j-Q&US|?ql}C=Q^=YHoKKyLlEht0sgt!=v9s^DRyB-VT zT7l(Sfd+WE?6^ydsPupf!Zexu*b*`v^NadtqlZK5FQ!*uWsxD&42Wi2wh~J5E*bK& zUA`<0Z?-Y@L#Oo;_nlJS*cdw{#9<##7^kk_f7B-IgYqqBma;tPf@8c9FZ?-(Zl4F^ zv{^QFIuo^7-YL32T$~8d{$jh|SrG+i)3#(*wP*RA;cky>#ftO#+gKCV5|Tz2{3lGL zDvx+)KA~7{#Z6uXVe;NVz>H0)uVGDG0#8c&a;h$lRZZEQk7*g3!0Z=dOJtd!=^r;j z-4q27FNCFX?Vb8b<3g_gLH{sO48~4gn`ec@Y4F(_G$=PBCsrir9}Bs|Gn6O#R>*On z;G<>=MKDp0oVfTOluro?E=rTCWBY`Ug&PF-?Q)D2>I7<6!04NH{7 zw>?mMWamczYrQ@4r<_l}6N_`h-jVqLm!<#MiSy%Eqx9FZQxbA1F9AAW;Y0$9-tyUY zoT6ZO9i!h^p76Hy5{vzF4N8VdOVe~N&xW!7l4*rFj?>W>TqQl|KBPkPOz2>-VVNAsQn`gRdg^i*oEbt~)pAc$OreW~ z+gc}|fFWUKpSD(E;Z3E#Q&uDA@~?ZNmlX1|gqfNfOy0IrX2HVA<4DiaAx|LjNm;Fo zts$C_OOxjJdJ{z-)#x)h=OLp__{Q74Dqt4?c#_D@e%sCc)4Kj z#}F2sU3`_!Km(;_BG>vZ?d8lx4($F|0dGU@(~xKNpG(#Z4e3bj#ghUfx4R6b2fw=Ir=0esJU6B|>wI#(|z%{8=dkR72gyT9D}`#j~zn@@6#*c)V#fbl3~y+rN% zmXs2VIdIwTh+7B;%PR&2IicgfJY_|V1f zEa7C#WPi}yAobv1(@blU!R_dsJLt%LcsAG-7)vtq)v>UqTXy4gaj^3_g9Ups91@N! zQYsVR>gWg>@!j;!G+0jU)(-WWZm@2!;UOJKL%KK-GGVKd{~GExc3zOTv}NQLZT4%MRMj%|Mp+80%yP6 zVNv=YVRsW<0TdR4B9p@eI#loEA~oS4`t<0}zSxdOtD(E3KK)p#&V;&OOm=L2T{d%B zaY}lUnve5~sZSSPkF`X<9oiMCr^nSrCC!h)XIuT_A%ByIE$%jlMU+l=uUabDiSp-+ zodo_)Tzd*G8mafBt5vWTYnDw<-Hdvl{y?|V!qz8NPLL6_-CvY>maFjD)?|!_%*WW* z8fy3mz-4OWu!r}Q&@z#`*hz}frK1O2A^%CalKWE<^^j#>lk;E+#`sU%VeKyTSo{BV zj=%{31>6tVAChW_-CE4slzn5;RWYGGJfG9)eIrD^@$-`+LHoynG~5iXvG%21k8ee{ zum2&%B7mW{pBQ46QmPpoTTj0qo|_YIyXRPba9BCaC@F7MtEfQll!D{qSarkQXYEq> z7y;z`ar?Ql%LDiO{Y}TyP0YY1Z)ES_al?57`?o9X&DesJs2x z{+Nt|FQ%6Dc2E-3itX1H-jZC9Nv1?;c0aP8@y6Pw{3C=ighidv2LC}nPZWG<=Mh#; zU}8HO-20nu$sa1|E^|`fTsg*(yzS5q-kCcKDZ&I4pxytG?NbvUI`;T)LBb{X7AL(_ ztNxUa!E$uzAw=eoalM)AH3wmtWoG<~X7DGkozz21jZ2-p6r-;9s&rgQ-!EJH^FI6B zT*j^w%oH9*HR)EA1qDwmS~vUBjl3)qrlOzsKfpD}GIImTwJ)hNZy^D|usq$f>ZCQc z%KZ&c`gKlK5#vF~SfkyfPZv{$@qko8pLMdS`pxKGQF1UkM6!G5?=0jOYP=c>gMTj*5@W_{QLr$^}+Mte;lgEc)Mz%^Tk@UX`BMuvn&r#pJn$%2ly-Re~35 zl7!pbY92m5WBhcy?A(lZ{H5iZURS#eHD|9%lSQ&#sCznT*0=Bs9#+5(}Q-##tarO5me%S$P{ zGU?HXk9o%XW<|G04DE2zW*gN)L;mfh)dzrLSwxbuhPv;$+ zqD?IxH~vOh8@|7O6n~ke`JVTGKum_7J-%(m+KOqfQfRzd_2pR_+<8NyRTW;l1%m^j6D4)F-`T=TI5bMw9b&gy zT#Z~5n5Rz}rKVVwpZ;u?m0J9>Av40K<6K&km1~s4xNFC&+tRDLTnYAKaZ8=Q237p6 zm(jyxZt44PW?xe^s{zc?l_&1{3qE8p?w@rE(iB_I_v$g$es;{X75xx5P|AOaFYyR391kHNlyI#{9kJ zte9#LL35!-B*OXJW3S5t!tRIEHtGGYRf_6<%p%%ogS|fUdE+XnK4EpNSW#xmx&ppXX^F(W|Xs94=I7ro5Nk(B|K5v`sp^bH8)rH6=lLRwXHNUY$*; zze<|h*F1yT3^^K(WKGrW;-)z{bR_SB#?8+AgsuiZg?$Lcr95SJ8+Wo$0kr-K9jnW6)6Ia8iHKE#4jYjQlUXsU3M3FKKGyOGfZ1{AGSIV0_>w~?Pz zxYw4yI&nMYAw`6-GwH7AVi#7-uamS$&BuV9O5E-p>)-kq-u)J}bc|^l4?-kYX zupHqfx~Csv7LBB+z`LM7X+O}UK9ZRa}Bw7Y1Jqu#>}6-(b9$LT97c9h*E*RQ#l`Yr>D(Eo3RVj$ZbL zT=gxbwHv}Ty}Tv=6jBqdL|!N@XLC7@6aB`n6KFR=yQMnj*05tr4U6u1LkgrN0F~Mz(n)AxBF2 z*^h=o&DN>GyQi!_Jp@XjsapclI-9QLRk)r!UDAIjEH%=f$zq~KPMrs)^g_k@FgRjM z4*I%6sZ^8aDv6S~BYpQ*O7?Y$h+s}#vv87#L*R`xH(6f%;^cN1q%Jcp9#UN=d*f!_9VwO#(h z1|a*I^T#&pT*>dI=oAho>}RqS#C+6s6vqNdF@qPg{Zf@u7bv`Ybe;fMFjYF$N18p+ zp|PYNy~faDB>~fHHm3RN;#m3YnCZHv$)fi7LrfX-+bzpu`O zq{v^;z9y%7tKFi+hbLf>Q?E&Fcmu}mYAuM|Djt^Ln**~s{wv=UbFP{;TlMc`+A$3I zMW0Mo%`c87t_Z$5l*K+fGr=SnsVr7Pdhx2QaCBFxjy29T#-TW-Z)Z-a){w>@#$Y0c z!D3VFy1zL8Rj7%w+Mg4eN}_ek2$)?FpAjgLVby3NHc0FTN(T@9FpEIfcKFX34wGV(H76hc8y3gST_r2rsb@S=qtb|HIdp$3wmS|G&o?#UzCgtwf5- zGBTJpCB#j#WlLq>WnZQRMW`gQyJXE)Bv~W0VM-w}wiqHyV~zN|-eYt>-~0Laoj>j) z*KywGyq4!{Ij?h$hCce7(H1lRI^nu{k7iCG?N*~-^N93a`jAa9B%t5AzJKl?s5;mY zsiU}wk#wQp7Hk$m_#}3bw(n4Yngnln$<&c4LVegMBmM$^kInpw6bUy@5)=)^kwRe;0iDgA6@`4v#a}$dp6~%X z1|dvPTnOOvQ@RVD_9QFHtN^k>Wl#rQb$8u?9QVBj*M{zri$lJe^` z>o%6|o8%Cp@7J)v0-=GgJN#~Z4O8*97-o3A!W_jt*cBm8agf^#X47i8eJ6FO}V00lPjT$lQf{6YMbm`nu2AbRQJKWnFP zNDea?4zkLx5?8sXTst8!sOajm?|1oL&aJ}q;W(2Xf1j6XtbW&3?Qw_ZOrtY(eS1m& zGuRp5jnn>M;UPss(q>#vOE48YQ`I{U)gFG9%@ZQZh--(NwBU{XYb9IF7CP>Pf1WZQ z&e@CoXZ#G*?NP6O4u=it&9z!b|1;$R;922ap&}*} zyXvV7fUdG=I)0r1XL!y{`nKGBFYfLiG8Mix;?v%bjHF(RYwhVrub=Vywlj_~zAml$ zZ*(e2+Xl=Q;J}^7Cs;AyO9I`c@^i1waB`NQ)B7V*-Yo z<`Wi78aOboPtuW@1wO*VGsez+W+n5P#3l_legx)Mo)4;3hw}%TyYHi7M&L$G$kLV1 zDIlPZYvg*`zQ5ommeUPLQu3LXIjVe7|0sLPp=KbI=NYH_=sSCgjt~@r0HFibNYr>h zxZ(p=j>K)w<%moeP~oYDkr29EaGXO;+@#;7uO+@2lj3pV1ie1 zmQ$wuyILf~AQc0zZO9HF42sU%88}UO!g7i)w5^qA*hmMC!Xq(6ZsCjy+Dd2x3aZ;C z6|ZN6D@v;4;G$ndk&as$s2{Tt>^aBORtAH?tjFJ#jBPlbA4@kpWgb+d%Z9;rayNS) zzHCA7`OqInv&1TiHJ{bY$%hsQQ&@R*R z&9{BGsR@f+U9(iiZ*+4(c$a$N!1W`CXoR3!e62`u1(ns~G2YywFO@|h&_ zcYx5KlegzvzpWP-)DYCgygp24D^IW-wg$IWQaNy9D-C1Nq1psECXs20O&Sm3rh!4J z<7;Tjn<~wE#BKvrlfk`x9!17-B(M}E12;O_3K6#zi7P;@=>9eT;R=ph^ABP!ZkW!? zUWT&2{(*DpPvBfmlEAry#6G`7tCF>Z!L%oSuU7{_a=$RsA0uwCkF2#0)iAJX$I$tq zt@}7g-)o5x3ya#^BzCCr7CVMQ13??4kJu+n==)}UDy@fi-r2}9iI>CR85XhJLg&gM zh|ew+l)iNPCrj(W11sjFNZe-K4;-Pp@}VF|ha~KGVhG`{O2pLmBOFoLS08w{j?^4~ z{s7-h7<+m+ZHgSs^T#jXyd}4}COR-mxVr7+GQz-&{7wR27*8=DO1}qzsO44$boTtH z>*94Wa|lafDB-t^4}PHgd~Kf*F0Ap`gbBYk!$o>FkH8A)JaIRbc+%q~p!#h?b9~m3t3wKqg5T7YmmXK%YIDEKJ%0( zG(XHNp-2378{R=A_(VA`hG0xrBiHj_sHIqX`UCz1Y~3!5<*Ien+z7r?za4HvcD;J! z20Cx)P7ned86$lvch1H9!_OObV+U?0Lbl49F#jA}#n(lndZK5m5e53tlgIQwKh5{x zt^4QP+n^XQkALSTBY8NuM%u>Cg_4-0H?-JM1kY&XLk7?ga`<=8yTo*s4jA^z^rXYo zqCImx$yg5O_^1C7$V{G;SlpyimigPTr|yqp{eB__qGlPFZ(>YKIT6FOZo(@M2r&&! zjMvK(>+~!P^1NH|1LpdlEed*U`DS`O?m@=JD-F{vN!N_QgWG};T9LrCRP%=m5CylN4V)MWI2CI z5RfbAzOPfx9x?&01IN^5vz05Vfpa`Bz{-lS5T|ds zY=+6EkN@F~ z={-J1%CW>$OlmFz_Bgy~*=iOS@PA;380fvG?>Z=Q=lW?%b3VLxjF67dy~qtNU=%Q| z1;!8fh&=B;YYl^g9#;GHWb*~Nncr=GY+qkLRIYD5vM;?h_k(F{1@%v^4(S3Wh~GZ! z;Yoj~1y;sy&0V*%@d_6+A^laag!=`k{a4{Sg08v!0$`kA6B4%1;%Lq-kJxl@b}zJ^ z%;T#60D3*HD?jLNvCEh!Nl(F7ML>|gHYBn7IT~IpXi0pUkEMnANcYwYE{~kRO8kus zJ~+Gk@&+`TBl0XKvxw(T&h+I1Mu*r{PuPN3^i%qWFX*Q447zngVBS5vKJOii&;1A~ zLA6S^2NMpJ8zifu=sMZ`rdMNHN~EX1etTDg8$jq&z2_6#s&-#17)PWR6^ zAne}MVA&2;h7hcfxU$dhA%_JHkY#4PBpRDkuZ; zThfCbOKuJj$dv3!yfT6J@qt`J?s672mmQJEr>`LCta^hs}Vn=qXTeqLH@B_*EB06t+8#VMxkWOLZ28ivd z)yQHPGg#F3!nP%8_{niiieemOjo+T*9LLd(z-y7ycS1E(s^5jz<_>{_Rj6U><<8Y; z!)jJ${lv;v*63UO?&Y$2BX0k6f7p*ZvJt_nKyo*!hc3I92H@>5=G>1@=aKk(({?5N z8S%C!j)qRQo3-C*nt0~Wdb3Ai1I4rKP2n#MYh!HH4!U}7Ep4;iDZ2$Ssli502SGir z>sjuXZHoOd(c!_xBlkBmx>TZYaAi6cznilv+!FLtnjSM2HayYdWg-rMh0YK6zMJV0 zE7_1(Y5BT-?vX_UZ_~kC?@PlhvnweRT=XZOi(0$SF(QPgwSSk5T}ZB#8a?>fUmVa3 z+H5X3(4r6h(pi(POT(cUe(Pl?u6<|2XT4JlM3&OE^>8&Jk@KCW(jpbz=5pGT+i-rx3&g$c`ej|iTPlZp-gyl_^UG9ZB<$6}?FQvWV1-rRHx(wW=r zmp}UUD4*~V7$AAkv8?lpuf1Y{YaK~bf5dXOR>0lvg zBDm$vll~1o+kA>qYsg9GBrez^LP=JXO;UMda0`^;op-A4PIvS>w8s2O(#-7$s@(EU zC~+y6dZJE6mGNtQ!RFYGl-Ydv0T8n%tZntgj%iPym7<&(bX%rR3LcF;1Oun%Iw?0Yk#XWyRQbDDf?3k^pOnHNg?zB&%;WP40K7mUKlvHnRLnSfIsr`yj`T zkgrFyW8#+hpT(>4x^OP#>8+*uHoEW`9;}+7gD+u6YTB*|mJ7BQexgCnfe>bH^Zn#6 z1tM>QW>Nn&FP(+DuE~WL_;SLBFkn7wL#jRsmc&O zEh0%)Ak$vXN)`KlDmmDUx|OO6xcWXry|XRvaTNF7ZJ+rVx)tbhoV5P}^ zORs%_$_EEIvxSXoMbcDL>Ul&zc*1@CgGH02-3MgV*X-cAv*4bb3632>_*K_Pz0exg zt-R<20+-`w;xjmCNMtF2E!T7Fh}4O4k4o4>1u~o!wrIZ-?y^B8ePAhltXeWR2NOn5 zDIol!n;OCzj`@ji%!livl`m`~V|(x}AMXr!`p3=JH64J%oycsjpsF?$Ef7abE!!QY z;YDH&&i&e!)Px#<1nhQ-JdrJ72b3mj1mB_53t#VExmTM84Zg%(Swr-=@ zeO>LY7(aEoJ>ZulFNA1lR=>jru)!7Ecnyi_d9kYprm{j5khAXhfp9e|pJ3}oy9@}^&#Hg7U z5~Ke6ioz&<1#8^!SPALRknFaMnf>}Z-Lb(f?Is6Oe}8nF4N8!9Ro37-)P#_)kyh`}m|EOYS1+}%=6M9fG;puwjs2&YKO^0PANPwb z8V`~s5XY1BT*3wLg&mK{O?d9#k2Kal#fklt<1$}LJMQvbauMuH`rd{Kyf3SB2|Gn< zMpl=YPTy=KwwwycXAQUvWg))P$EjtqnnG%cz5|-zxFD_&ZR;$sB945O z0mJ#XdSwu4jfHY436TI0!m6I-4HrV}-eVBiuP0c{Twem^XWUR)brXD&kDt+@3w}dh_LPiKL1Apb{Z+=&?A~!-TL&xjAIE(fKDD!_r;BkOKDb5 zEL!8{;arhmSQ`9)#QO?YUJibogCIcD0}@>i(6?xDi-@b6|14DR2;MBYa@ow5|EE4V z(9V(^Nc#9}QJ5lyH2l3B2_o@-KZN_B)320!l7%US1zLTNl;xm`y|JDqVICD8<&*mw zzQDokla;J>U6?$GK`zpRmK+{R8*hVEBSP?pUF7jOAD>M;au2&MZyRc{#bUvfPTee+D?>U95-q#6TWhU+QG7|L%AeAEHvxo63rSV!N6B^r~^@9B7b#g92q@}17aiGcb3fphnMoJn>tJJ0JE0?zac zQDe%&gWRk3wHQKF>U?3ucK?EHNRt1uIY|2ak(vOt>fMN}6(=iV5_8FTDHh@(ZkIh8 z0+ae=IVT17L^v``zU&w4`PkFSg~@|nriBb{YM6w8OM_aLrvWKel2T@-u@9+7;&$`6 z<`}!i6j0dyW~)q}M*}?3`a)dhR;Y$OS~)QCfk5-|UD0AI92xeG^glZ-J%6(xOMztk z)*mf)0#4+<8Ep%<6;aw0GGHSCYO5ryV^r!Pt{pp0&Z zyn>DyCmxvt`j2a%g<}Q6_CKZ!I%G%rC+|MU^fc0L+!tmO$CckO16+VY%R3NlMmfKeu!6SqrdFu5ayDxov>DE^|EuDZwyC0+6~VdJ1!_l zx>a^QG+cv}JrF^)4RNi7z4wc*Inzj0ajk`R$b!sCG8B(w78;5zcE%j&n$Y+_47_b- zs08uo!ODi~HkXL4bT~tTm1BnjoD4?T$(CmrkUJ;hCO5_ME*VnGEYRanNu`S)&%#MJ zJv_C`%tyj4RNNk&kf5sV&)@ruvOC}j)rwQcgG7ef)*$0&{&8=XdDDmpejY)__Pd9d z`a*6&ku*JVR$2JzHuLsCtdsqI>U-v@Vfywn;vW&LgJnl0VtOci!1Ma2vbI-wl=zff zS}F-CmtQKynm^>%f%4iDvlQ%5+%J)M6EHO2Hss0sg=kbXADu)<-b2wiwmvA_s@g+W zim#6-M?2*hw+3nWq(K&4882#bqt%5HuU{L-fq%t;n1sxf)gG9Z%l?3NK4WN(fxT{G@VVf6@~fr#_`ryS|M zF$bF_zVWr*G5Z?2dBKNR9P_0EF&;C4??NMvg)~f7Unqc$_S#XEe(y{)pJ3a&UyX8R zgiR2E}uQSz-QTPAKW?iYtLVB_^fum z8@9O?Ws7$&fWqRN=*o0B&K#^>pGzvd1X3l0it=NZpEy>$yGEJ5N1~2bT9Imt+4;5& z6(zopwYf{2w3W1dI1l;d9xc~(iXJGFv)+Rl30f9MWc*mBR>w}6=GefAkh4&Vut8LB zpZfS)x6_9-BQ|cjz=U*<$M@0p?m=jRYLd)wGLHJ)oW%e-;X(MsN3EcW%JSRkfOH2Q#K#D zluLRN-oLs`((Go|x7H#fFO>mb(ALBa!oHtF(x1IAt8LSL(6`cIs4k?Y-a1nH1%2(4 zD0~aMqn7o2HBOTRxFBnm+N%v>Tn-!%2bjW&ZO8@g|ma|8j4kH!(Lg$SXIyzErCTRLgWpOU_EoN{BXe@ z2@7XQgmj`Hb6m5hZ1DZ7-0Sf{b#amq(pRZ9?y8A?M4dqav3mA?r*Y$Ofhb0Q*z>i43fR3NE_G4MZqn$|G^-u$Nx>V zBriO*xg6#V-a*4WJO-EWspFScL)pOB4B-tO z^JJ^zX&7Set%hsn&E1D>w?D2Lb!t)!snNK=j3k|)JcRSVxNzpm8X6?j=8n%?NuGsQ zRtrBOnF-`FthU3PKoZLaGtAb{b9BFAu;su#|7_%tX!@(&To+f zMMcFicr0ljKRXBqLK2v_prpSn+K(jI=fb5>F;VGVDW!v+Gx)H+ED7U`Lw5H}BI@U4 zUf79x=IurVb}kp7AkVhS#R%00Dc4UNhK-&uHGy4A@zvK{EXN<+E4pJAG7j`9sLmv|pWv)AXESGRBf5@JIQ`Rw&R*KDd!>DAB@fnZvfodwe3#A^ zQ|gUDT5hR|^IP4=W}NXWtbb_V5gmm1;FO&Bqq-N$kEBcmiVc*T@C{86aY)OEmftqN z+p1je$ssMnqB)!-7HxRl5jOjBq_O^yl)=Qf&YMe@you~eMOM9){8 zQq~%Tw84t|Tb|hX5%bQ*hs@g=K-x~)uTyGz*^$XMsF`ex)d;QDJhFuh`=hok;&{o1 z8gwQ&zTT z@`BPpOW4Q45*OveOsMj2M0Ef(PN*HmI|NWLg!xA^-wTy^gcs%7B-bEEXH?i|WQizq z=s&g#nZ;O*y(&0>*!tM}@w*8)|F+QK2@-GG*WcQasIYLavOjSz^6c2n0j_CfL21*&^xY zLH6AA-R}-!`=yem0^lKv%9MtfZxCMGSnuF;K7$<@KWTNaB?xC+J6EemG|r>kdDndz zKJ!baUQvT0TVKnGtXx=juFy2-!EIIuI&Kj+%yd91}A{xu{uk&;9#Vkkmab^{5!BR9=s zV!tgYd(nX+Vq93p45CtCg>QB~tye ziI;a`{4jLp8;_fn4)at}_KK)E)>SvrrInX%oB?9F_d$!hNv-EdvPw20rEbM}CpItD z7ts}GK*}F|;YUB>E29gf(?km2;zryg4cHY70UytKC2X$<q<7(Row3n&*hnA%Pi^2uk*^>i4fXZ*JD$(^33!r8HkHqw3I19vVZuzmkHvS z2wFAb6PEDn!)^D4OWK29x^(o_@r-yCzaI;m+ys)M1Ye{xn$FO1n}AIX$#GJ5p;A80 z&ZT4_1P9;XzE+${$=%V+dye%BQg){CS-|!VWs9&rTwZoqp9h!nZ}CsV+5R^U=|Rro z+1`MjRIRwX<9INto?C4Azu$?+qGUP%aS1dpTM+S%o>#`I#LM3nr7LB}A1!T6tq%{* zdzdoWZq==}#P5HOCjmq3^iG5CKqEHyB_K~)#ou)f=fT$@TN6rP6k0@9l7eI8x*yr!kpnWKtyY< z2uPCrE4*Gh16-AyLi^5S8$N92EmE}w+yUu}OCvtUk+nD_pW;5r_-m7O6T_PLwLY3Y zIqwBLLPdl>Hr=#9k~S|JBP1!^Ii*~mzvP1pd$Za$@g^NsyyxtQQ%$>a;j>!e>_aGw4QW!&Bage7&VO zQxMFGzM~m6)YFmhR`ZQAm0gfmc=6-W@@!nFNqNkONhNr>oSCeh+ea3aUc36`pY$Cc z$+0l<(MX+l)+QQrdy6UcC$b^@h3>w-IqX#=(b{GYk31LxHYov9^fn_zyUe#2HhvFz zcH$++EJ7;G)^hlj@bz0g&w#IFui55$m(K`a8m_WUo0+IG(A&2aq{{U5$Mz+OPc}$$cuhE&iY47)FLa=S0?2B4>-t~As%wckz*~feb2lY#z02b;R&y5 zyG`P?$uLFkRIOUAF@M|dMY!O+TD2x7!qbFhy8tf&kW=fw zh#SQ`BbL(ssa}@i8|7KkZ30M%D8z@rGz%8xUxH_GeOxR|KMvc75-hT8tQ%({kiZgp z5|gw?beNcr_L2?UQ^9gZ z_|=m1WWP3%tOq?6=pF;|@p53ZM|5hvj=tbPePzZvxP+B5P~s%`^;j(@#XD2KS9MwL zNrN3eTuj3lWb$*6RuCm#oEeUb3Vu^)GV`)}4yDvTWa;W>F9e#dRVj>+4p@zOUq9^i zW`1ExwEA4oE~T*)50Ily%~E!f%MvR=ZDkG>_ZZIgZVPx^cC%h5DM95B#7n$AA~=Uk zLFt_h!;6|wlageFe%rO(`nSIjzl21{l7ZV4TazoWeV*650ue>xgJnnq)De@L zjSYHnw{dp|(|4k4kbBej2#mUJ zZncIZ`G~ybkdM8U5UZ4glkt;ucBDMME01BSrQQX4w*`mbE+<#8bf@_sNfAF6E3HVW z8xeyVncpwp*L)OIci=__i;G8hV36mD^ZXc=A3AsoV{t};=)jK%()l~&Q|-md1?2Y9 z))uo2#>tq9B^lN(-HCRgMx!?&55D>bDjd$7S0fE^d@kK*xBV3&4yfdUv<`sr{)Yql_65C5M;AI|$gM{^{JjJRJ<* zFQ!8p30=!x%@cn&rE_3rLzuo}NZQw^gUvp68?j>R=Sr-RNt3wJkxT|;CQjgXjYbC9 z8V4Inf#0`(YQ6RpdzXv}|ROX^mki*MMSAa@|}f zxG%`4Ze%U~(_ubjUCnX4u8!wFcj}~`OrlB=Ac6-XRfTR zi2o#MJyOI-@@t}drEP^i1vr^z`3)JYgnG8dlsszDCHuG3QfuH&QWFPHv2xVW;;s(< z7B>vB!g&j9D!ij6_mwH#pI*{9qQJiuL(nyPRj@{0S`+FOq4{ujqeK%#iI!WKC=ryr z_Xu_VHD~Xv$%3EKjYH~V9}j*~@*L|aJWlZiULLwQ_Uo6yCStN?M)SpA<$!vmBckB! zn_mJ?_*jwXW{Hz93Rb2Ye6SG`hv@LHRYq#C8bSR0Ie z5-aljs1A?hk@8ka=IyWMo4Aj$9y&H$5n_?J;U9xF7|7$d-`y!;%vfp0VJQ;)V z1t*@D#0=I7Ds`$oU(>)kl#^OP5X-nS;F~6scek)r>c>IbW*H}p7qaD#S{S|Ijc5)S zP05l_Nw&{F$i_c{kjg$)iO9T*|GMz1Dz)TG*ISern)W}7^p?&WQ#8y4?HQ&Y&oCf% zQ(|GB8F|dl^@Wgbg`aUl)CDoQBd1!UniDE1S&UUxk5p_-s(&8Y5?y?j2c@XT9V=Li2gP7J+Huq_Ej^j^&&JgDo!i)kAsXK@e>};BRz5F#_dPa~M6|W2Tx+qVm zV_YNFD^}h3s-g`soEjhxVYVzVYT#IqXW3bJAqPTBx48Oh5xM8?jWV}_ltD-n!9F`+jckK*AJu*(!VYc)h5h;h{owiOD(i~o~2%(D& zB7KjfED7?fnx(T0NyUo-O?NHcBBWt?U-mQe<&V0^S*@f`<;()!74^wv;kE&(Ns~UR z;*2fE73fv<>YrIw?JaruQ(Q}v`w}u5!T(jFH@ebc2}tK+-Afyv>Okpmdv-k$1`ST9*L;7I3t$aS3RW_HR-^Ey7>!4ps_*xuqyI9 z;#!Exgy7s6F~mzbT{!5_<5AC$%7zH78Q~c3I=|VuA8(DTq-Xes_cN1xR}g0~xMSAw zfl5V{66e(MX;+`jBgpkg*@JtJlo!}1|FsWq(EfZ@8MGg1-`Z8^8q+jWxjvT_JfQv0 zA>TkUC}AVxLqYF)(r$(Q_C-eSRnd+!TULIO>(#u3491&-rq)+}V5?*XOIpc!#Up|4#r z$lYc?F9%EepW-EB&oU*=d=yDLo)P;^Xb-Ec?P(c~+(d@S({YkN0L&S2iCi$x->#dS zEsaTfkePh?v%O*Gr}=rX0Qss6)WNqtmi!|PuvOx1qXf~vqmF*r&7ez`{t*2t4_NdZ zL$1elrQdn6}I_pVaZ4wo8QCX>C@LB8Dk z2YKOSFX-$@CdtMeu5n41cKD^X3;A^v)TzUB^dS3{Bywz#Ie$wyCTX*9O@qwUN6vs! z*I|YgW{Of{pM;KhNsT>RHhwf2Oe#y9K{Z6qFO*W}atYyE5L6C`Eg$VJ_UHQU&HwS_ zjLiAz@siC!vBo||W#I>Y?2;e(>cU9SxGB_qT~KA?K{53zpZ^a@5B1frFr7j#zlL#_I$ZQAoLe65Lf@pL>n^;1uKmUf@rt zYuI0w*K!d2u2pVwmNiN4%=MDV`9(F$1eGk*(nxuru51!E%xDHW+h z-a&E8aUC8pb=q8`#2H8ruUu`?=sU9IL9x~b7Nx|8Eb|kn9~1)?6@MCfx(=0`f&ijv zI+($+$;Y4nmv4^rl(s>jSNN;`_Wicj4Y;n-=-c+8a*E1PzxFa8_$9``#{&8kAu6B& zZnq?wZQQ%{EVf(q#=zkj%8?<;ImRQzFm6bOgeK^QR=2+N>hmdVft$X4+2vZ9XuP+} zc)LC4>FuLiGTcFYLJeiEtJyFl9;Hg33{ap*EoYwM!HN-2WWBw>gI)TIjoA3{AQy40 zVXiqL&y#Oj@k;jZP1p}Jj%8zwdwZh?b(p0jR@mO{ho(Fy9OHtt6p0SpF|5B2-s*Ee zanU2LOzB=-?{p$t8)Fh{#f^c0UoWa2-Gdq|JNam_&`;_zIR&ZU?B*w8O?l5mJh`-} zxZsKAR-nleZVC~K8zR9fzwxdhpS|R?F}zvoJ@I*zr(%;mQg-CGuaGGDWnUTSclD6GzV0yl1 zQWjjPzqYSwfVav5e#X4@u(LeGG>+pn+q+w`O?XXgbUIy69Qw*U(R_HgHu&OdL!S|n zqV9yxY`_6HlkBoIS#kGT;naS!PGKkY&U6N;^MA->^|Qe)bnH9z><* z>?U$`M!VV8)%_(H;A$g@2T(`tS1YJYXywh;Uth_@;`Czfr0!K&SGkn!lT))5Ei$@CC&B`V0-m z;7F{Z$>id)_moAoP&G7zP7I5#pD^AG>8)2+b|VSm6;Y{FqNff|vj?7i)M>bMV}e{m zlLF#{0x#N>lsWnme z(*8!QgE00iHmUgxb>Z{Wo93b!=)hN4W^uXV*KCVYg_GZ7F>aDdwlDGBRtefS2E)-P zkF*N{7E^6-kJCYtW*($$RnH6mXdI>Ql@Q55ETwzWmh$=?#~~HbK8Pmx9)q;j?9#)w zAy+-C(kP-K%%M^$|l=+o-o1BCzsBef}>??oM7QE)0+GC zf?x$i^rIO3=XBh;`M@ZvHC?pNuvnv+AlLpG8N~MZXt`_-K-~AO<*I^`I%(QxBp+M( z>}%Kz!Atq@u#FDFvNr+I=eXYGW)o?E?jNfIa7WSX6}ZY)|SFD>b?$k0|> z)+Z&jc!rHQdefyAViCl^`Sio;N%A*r{oHe*GRkEYV(FvUAkCWAtDb6?;O*&Yy|pa@ z%-*xaQX>#Qe=CXJ=kLwp2}>0Al`R0XHpt}u6LsjwyGh9~j1B>$F6_6Z?+(8kr`89+CV!M;AXYl#7QRP#KzqKY;8-LksYvPhG;>vM?M#Aef>a*xAUpy`&OBw4>TPz$ zlC16^fe4|I2-*mmKVVa^Mq{>xx0s*0ruDj;9`@b*!qaTAeI(}y94)1wP@p3O~1dkUXdspy9X-m z-zYxhfvo@0&iS1j-gz+-VohrYrr#suXX$>kfzlf%Cnh+JwSs#{>Ajk%GZ8(~*ewCE zN#zjN3Pi{*N*wLPiH53~c@lysoXy<^dkCVhvLswE`VCVwPu zt$J@wr4C>lF*mUwTqOA)V+?J)HBNy;Ma;r|XjLW3#7>2p2GjYnHR z0IpAO80KDI4Q^`Ll6BiK)Te@6+FSXhF-c_G7tL6_sdrJ%($1d7idQ%QZp*yp|*p9Hyp3kI#-o~M2c zLSBBC)*fUqc#2Ry>7>JxEmiKivBkIelZy@4_wKR)AeGt*soG*#r3du`MndgFk3|SQ z>)-xKBUe|$rJ_v5j*RiRpQ7ck@$(2&1|#+*MFY=g$z7mP8|_=(H2<25EmAhl$;?|E zDPpgY1K=1&vPFKq!|!X&Meo9#Y-w+Q1u$7@jx{;ZSU+ho%|N=(O2KDIPM>9yA=@Y)~Dt&}Lk*sMkd)_Oq{C$1n!G0C0Rkv(zkLsgv^TgTC+b#D0S8 zkCUM5pubg7BIO+^Wi9a;>anAZAo(qH_dDS^x#F0q4*Lyw_9fdjK1m;yM|xYR&ZU^q zAK^0ZFoU@%xJ5|Qf*<(6o4-d=h}s3NdimzXx4Rcy$`veg4Ws^ireT z^yIESjvU@Xv-s|BT$JByDZkKJAg-ijI2mIPB0laJuKj{?nQQdRSJ!&-qlqp4KMdz% zM}Lj^T}cc#HG~87w7`${SO4`N%YZD2u)ZGNNW}8Jj$n!)Ke}UTI6>cD|F7;z99d61 zxR~G&@ekO{f?t1Ksag1USK>3=Sq5UAMkM@#Xt%=pfA=C7g8Esup!mDX$dxyZNDxAoBm~c@lb}l@SQyc6+~KrcMzh{%D&aV`x2T(P9da>J}3cDLnA^{T{I{1zZ$9V7BJ8n z-ExOi*=lRy*M_}nA>%A2f;pNYhC&+vpM7X9@_)4?RqkDgyWrp;|#uK`RN9R$Wa>6o6B^g*YM1*bguW z0g}~<5&S;`!VhhafQZrq7hp`-2!T>L$b9{~&+^X-;1h><;LfrU>)z88Np=PH|J}bT zSH0H*Mhkr&Y@UV=T6lFeGP(7mLhn)r>H(4H|F7pWU{Z~T2h29~A*5ar#BL?}`}gOG zMz}L5?K@Uzzk1Icnl`bbw5L^tkwjLd(=!^KngJZp9W}^zuO@Z25Qjip095f*#AG)m4xLhb66w^P!O25L`ybWjg&>b)u@YiPi!? z&(rv+uqr=~>G1S<;Iu&Q)oF6SZ&kTZgAOPz3qno@DhGjI|FstMe-K51o`P7K*nNQs zEK`bkRZ87JMQ_>AxCX2yO9_y4a#{v4C8!cuW~tGtI+7l<2SZ3nI|b7rM6I};|DJ94 z2dHj*RilO)_%akn;?P3X`=|}eT@@a&;Ef)~zXvG$ z%MS}=C8@6Z`P(Tp(Kkm;2wEipFSc%=yLta!SrwMHdj-cpEE{PSYEb@M4#;zOrg_H9gE_kP{Cb&uD5TYu>KC6?yfruSdo;c6qKj7=8UpJP&#vH8*H zRWE#x)lt`78LKth)pgVe4fpA-N=>HU?jpk15JZ|nt055eNUl|UhJ?N89y*QX8;}%h zRNGA7-d@FQk8gmRI#awP93)YB)4sf#yji>v8-45fh^Z=LI(b-`W?b)YUd3#8mrQR! zW;J#<900=NL@S!fJ#sp_BZ6u0|>Y{_#Irm2UsW;{LdIUo$XLStdtX z6PN#>1+{knqm6HA2*4;33%0C7mo3GsLJxKS-E}rAu2)BX<`i<SL}*YeLw5!xLOs1;>u zGKk6*cB9G&_yeN6zS{rpUBxTS%QA^r=SRWsi6#0RHA%E^>%V(1MQWFcQR0F5^Xp$M zot+SV;VUd zSNr-AbmfJxa&zGqiz%p*zK>r;Ns0Z|Z%glElK4Rb5k%tZYTcOMg6fs%G9>YXrdXij zr^m3G_^m}ns#zSP6{&(!39hC8->z8IPw9T6w}@8g(ZOF&(OF6atonIW^U=?XhtlSG zYqgF6uf^=&d1^t&Kl9wM+C1NVcu;&7vod7pYC|%=79&TxWHS+TIFpB)P;Ve#dNp0X zybpyuV>aUVSvbRj=7*$I8ke$)P=fuBMx`l!5)&bkrgwS=R-Ni68kx``Ptb;3 zwdcVIZ6CwNoDGPM^-lPr>Coy}D#a~d8|zFK!)QIy9{d$g<9?^X|Hcxc=L%B|Xxukl zMLkVVxBuLC3CZ{f2>BPPAyTX0zSLTjq=rD!&nQWe#K)_Xw0ec41}h}(j#!PP&5iW_5^_e5;tT=#01vhK)r)QX7<{#HQ8oAv!6p*GS&s zK9bSCvi#Y&)kON%8!_^IYW4=?^YW;bhN}X3)v<3YMo+6~g;nG3HmiiRX+2hiF%L5W z)|7pj2JcowKK1&FGMmjvmOHzVKk7Rn6|15t)3=RX52z|^L{&=~5%N}_E}_;rIPcw@ z2K2=RB^FLZfQ!|k~QB4%8x-X5o2URnx@hwB!kX*D5iE#QuhNe~El5oj6*iOFq z6rz5LX4UcGtFbru8@M%|DNI$V058h|M%llLE@<6K$CR5yBGGX)u=c1}-TdB`NcYfg zQ~+{Oz@(o>HO!-8HC^cQ;wLGHW297RDm#8)HC0f#uiatn!%sq;D)|-6?!L5|)ZBgr zH2o$+qv^ZVXv+6;Y2BbvHhlcgfc>iu*w;g2)AEqvtC49uqJQI8XJavpl>VPF>sM3S z*<1gwy=#xFG5h*YF1ZzrUy~@wxD{PQ)Q~#l7BNkTB$8W_O3_`O8Ka0qrJ7QmL7|dd zlae#jBnlmq+`1jp)wOh_j*@rn{hY|yd)`0Z&-?kjpU->tKTgg*`|@3Deb?H1uV+6A z@^34(pl(?06s#y=lPT~wMW_mU#_ZWC^?*&)ol5Mr?5wS$iXIxT8hmAl_a)`lK7IZQ zKhaynAHE$$*4{9=32F8ScKNTi(dcc!gF8N?BI=( zk_L(G&sonOQj|$B5c+l6GbeW+qTZ%yH1S;4$)BjB0LM7LpGbS2_21NWn!#@k9?Z;h zJM8uyip_EoXd0?(0Q+9!;nwMT49^RiBp>zY>BpRDKE$=>5=3iHaWb?wt-7ndjYka^ z9Va^zK@ggFeDL{{KDg_;qYGf9gpx5-9&)cY>mX`!d0z!qK^(HP`cHR zcD!<@7@Xb8XK-l)-kr)AeiDs>P;w5yWFDwtNXphx{H&-q>bJ5T-mropeD@ZyGNH6x zQZ_d;%al}}k`p?wLmtB+n`Va{fB5m-VjE{EvNrxOdrLg~^d%HG6L=dQ``Dz@9Ez4; z>?^8u_0$e3>L^D`ma7hM1NAE0YAy|`s+EHp7K!6e%L)DV2mA-cIu1sQDGcAXGURFU zM9{i1$Gf!7^-drj56aK{*BG~|t=k@CI#7v+oZ67IBG5H7e= z7;>f`MY#4+jt%vjFvfR>;ssu|H{ODTU;U#9mjQ6Vi@fDKbyEHC&hN|BKaeeozdu2t z0fA-#Z*hSB&1jHa9h*P#cJWEv>@|J3yc5~W%V3I4^OsmFM09lg){D1Ew>fk@RAoOr zv-rn2rGIXp8S-0VVp)LOeWu}}g>9)P24WXY*Um&cs@Bw-=MI>{Oxr5us+SKbcFT)P zQcV|ot^wNMYt7xTtQf1@hiRK*=k_L!hJVCb;dnP+y4BSlvWSOd1s+_=Er+E>^FRvtm2p?lSRy!y#&{cfx$DVAWD!dN^ zEvA@(#dN-&H!|P-*Tg}RyQPe1oN4s3op^g$J()r^?7Bmash?t?%YkIC?xcQKhTC9 zhlj0lL(Du%gNtr41&P||E+OMvuusP+eD7p=M(yn<2p7-j2i<&K*hLEs6hcaUDvu0f zTzqqWhd-&)Qu={hamNY>U+u1 z`gS;Zhll-(QW9>Vu@=zuwyDfo!ASCJ&Gh9xSzWudkGC<1w1ufC|Fc>RAKGQj2dn7T zTpW&kT4duI;7@ZP#Wj#i4H=w5iU#-`RL)xzI2IYdL&03+Cwcy~C_Zp}{=}QbTXQE& zfg5F7;f+vS)3n{$wsfRi#{)isNzAZK`-m^fTKOl%o|{6I^Cp@_1Z;wF&+D={UN&7_ znxd8ScAOhloNWcox398kNkYN6(3GYw%6a@1q4kIsdm4lzOrG4L3}yQY+R{$%U9mG*og1~`rqhYFs~H+Uu4=Axv5lFxdIwFE8+S$in) zMr=0pUXTlst6s(f(?1CQX6iuN0V5qgi!+d9Bx9`CP z_GLQ$x{~(%SHx+8s(9+^Jz!qQS}PE5ly`bx#-yzNe2PAbf|=Ztl#ndoF!+c1u~k}C zd%k0u?1B*GGiHwDS+jTjCmqeqF@nd@Q*+>YUVO|Vnn#hB&{jLB;Asv-?t-}{HEP&9 zbR3|-Kdttbdu20{ROqlS@Y{w`DE4B)QxoRVzA$Nw&jjXEsQXNWW5bX&bH$ZfJs7*p zn?`sa)n_2B*;xQyz|gzd*1Z@nXLvCRUJi@Cr?N479KyES#S^P`I>r|9anS~eLteP!feJU&5YK>J2TG>yPdtd>eum%`ytbMNa;d6$mo+vf?d48K&Qae z^h1q)R*{ii)x{N|FYX-KeWt@?V2D19WBT&m%)viC))e2w%btN?nSMv2Zga(1 zx9?9G-7$i9izRF2G}OCUslua=>ssL*rW!av>YxZ^yETe2ceysX`9`~4&aJ&!>9MMD z_2#x0^{*zG6=mxksFUNVndp3UDNgjRw??zNew?i8!*^soyr;MnF{0Q5PI% zq%98^`Y2AK%RkfH+6lo!k-z%g>F~VL$_-9Dl+xj&NY<1L`}!B{m4eFpfTA~tpS_Qc zZp=x0?*G_WMdI)KePpZ8Ly6u}IoPL(Haoi*o8Ri{6Pt&9i$?d)(r{EcFSsM;d(o~x zg99!!mc!!G^ZM?W&+6-*IaD^MLoUZQr~S zu7=lD-uGsH2vkYmUusp^tZ^M>F}1~oNjz&pF(Hj5J5+vzwL)k9(K&CjK!ZMbJLt!y zSJ^_0vu~yNLuGMs?fa-TYlhrtX4rZ=9kb~*#lfE1Ff?`%`B;iKZs+Er5=Faj-!nW+>~h)46=zth^WS|db%l>dbh zJMCdREoI2AV$*|v>)M~KDma`oGuQJ7!|F<6%&u45M2)#vQ=mVk|GUl6A!}}wrw6_r zlBB9}eN|2NuI4qm*AvVn#U^t6?)#kU|?OEsa>o#e-w%UyfDEckCb8_HLNz0z9 zsf=iEhnsEbe^6w>E+Vgbd422`2>Vy5oyZRF4VVADR>0{dWwd<>c*V3%vm(~t%M>i_ z4G%$ofld%^;|)m%JF+hj>0?TwOL0WCDk01jP@4`&W?!VR|YfGa{>ZH5@w4HPDlW zlNnnn9+Y9O0N>!9_KX4KIt`J^dA#26ctzoNRd6{0?9MHx%wP(h!30=};QbjUo8luO zWc)nMm*M1TUadn%xot&VojxRY3gg*clF>!@>Eu1}wq5UD*PS{ydO%*&_UWuw*3s9QTI<^JhbOKn>C%P18u>*{h2rOR7rOV?188Ck^f_)@Zl!T9&CGpgjm4oX59zh7K_ z&JuX=PF_Jf8t1(UXf?k@s@BUp+9L}G@G;ZKP8ND?@mFu0g{+bdLcsA>Q9IfVTiiEf z#Q43DUMzjNu&@@dD>VC)!MMnWy(@OXnS$(agvVam4iY0Ue04(&OcMtc^C=Se8A;%j zaE{belxSH_4ExM?epWdKb~c%)SmD(Dz9#{WBVDrEOOe369iT#1zg~ze!Vep1jwjC) zW>>Q9R}b4;@PV04;j@M))Yd*AZfwK6?PNhg3kMsw<;=v^)SFyqZ#qJ@|Is8VaqE)l zjJF*L%CdZe6f+@3P|#(8_>f1|VI(FeN=Di-xF&#M_xVY++lh{UwQ-k~uOuJ5vKdnR z|FfeDw2L>%`gj-#VB*X{+R6Mt6E@ARFkii8EObb3i zM&Z2K4=lh`W|t?ea$OHUpd@JVT(AHHemMis=$Sy;mL4Oje)-7+Xw}%8=sZ$%Q_os& z0&cFefD9Upte`F2BMW6*!&`!{o6MMT8g>8A!ZQX}pjHKkxr}K31MO&R&8%5FT!la= zKR*XH63zFj*2N6oWK;Xu%p9fCeQ0ezK4uho++PSIm;229OeHO2j1d&S>hsnH_o`0Go%!xLvJRf!5^0pGevll} ze>V+M6^{D~vX1uMa3Q7`R(c}lLA5QYIOR&<)25`dIHL+ooJt8Oc`z94&P>zF4 zDXC1yo+0>-Jdnok0hDbX2+NARs^h`)U<;yEn_YrrJ3(iOUKQ4XHWLbTb+cg_s~b#e z&*LCL#EFO;OZ3bpbO$~9?Bt_MQUwGHzgiIzu#H{?)9 zGaRMI2mZY+2OI)tY1Ep(f=!yB=vxjwMZ#87Yv|7{wb#lWOEN@q7ym|*lcWZ}UrKJqX~h8auo$|GSB_bY_3 z9c#__pL9M(gJS0QkSaJCRXX6UI)|%Nas2qQ&)X;n{*_VRB@}(u^6SSivEFb150xC= zlN(dY2kT&y<4>+~-GCa4F{O+Roq_W0y}9W=C!dJ2T$dL|2su7GMN9Ny_%>U5;wM>J zDEKNP;$CDMmWlV0_@)%2_#2FZ-T3l`U9!~7?o&K{DXb+X!**pX2M6DS(jXrQia)en z?i%NTTBvK$`+t|?C%1W&Lq(;TU|c*OXbCT*ndcN&(HFUjU%a+rDQQx;WHwp^YNls* zImT@i?fn^ykUjLSr{8Y0Mq|#AtHC$PPSOs?Qp7C;Ra7eU%fUy^i~+^;e*O=r*dJ}J z>}sje^DOBx)FtrRE&*_*9i#b6g-<+Z{Eq_BgNa`iwy*`kk^!N`Fd^cdy;O(i?(%V{ z(9Qxya7u|}N9OzxW@9yl)3{KI#=Syw3K^IC=m8wwwYy!e^CD#z@zM+M z5vTqNWsp7 zcSDY>CIwc;A}s~HVcw)imLGW2=t)@f1lbh*w1KrEyX%~HAESP~)~W{G+35dh zl;^n7Q4^CKEW9*_Z=Pz`%cy~uF{LV$JvM-*l>AbJ^0E@iR-ODxwk*Qe|`D! z05HRT7%i(%+G?^Xo|E0OeYoORBl^(;e8RUquvIM`M|?hjH~SWhI-l!F_zShC@mMUp z3CCevUzfgG+2F@wqBpOuEJxq+ChR8G0~J^+?dmAGt8c~tXdQ9fyi&>X(mWOlt-E>! z4#qG#%png7b+)R9pKolm_i{9-jHusXs$ZAc9_3pvK(DUfadR9p8^>5*R}^(h$F4}0 z+c68!lD3Gjvoi)@c?kF4BX-y;Vig2)TcX+333atKm=gR9u6*4e{-4+392WM&>`66v z6CXn#%Rg(K#?JnN$_n+9=U#48O;~u&D0_9tj5(VpUHL_%N;^wD{g+vB|QJ&JL3FM;|DeHE?keWGTZy_)cUKc8Wy_MIRe( zaHCFov5+|GyjAl7f`w~lS$g6f6D@o4=^$9$#jR4RmVZWWYe`zc@qK`z*(1et_V88MdTA`7Oa(z2nIhSjTa;y@Xp%FHAJfuQx zQ*(?hM2+z5T1Jx^13(?irE2u*J@~w@i%`iNcs{Cquw!fy`j%Hl@dw&L@O#}$cZ~WO z=q90>`~fu86+AYnOI747lXip@0$W17qVmSh>R)0KPT=+yl1B(jr&~J*7}(!Jm3Tw` zb$D;a{Aq|!YS?s2xUmG;gJJp9s?mR5jKnIZM?Qgu&gQkoxN+GP3^~FPGB4gZ9fLcg z9Wo_jM<$2jWt-Kqoe(^eipVK9C}!g%^p$|}z`r3%2j;K6EHMp^QL)A~!^kiT86-G) z-`mU8Fi)^+s1ZuAXPnz$N561vGl6Hws;4*+%)*I1J2Dv`o-s}SWZnU?FLn{{9B816 zrN-`hNX*Y=kgJ-#s6fsK8Z9wX>wh|MKywgx4;Hp7yf!q z99D;x9sKx+9Q#l>ZWwz5(^eno(AJf1z~syH0V9 zo_jZs?2W3b&!rJpB3UdN`{#C?49TcdR$lPBOqU~W!yH%6$2@lO z9$5*j&lDVA*&7l*J|lgRY~@xNKfw?W2PhicS&xw}A*(a*7Ba=)cQL5*1-SEiO6Nb9 z0%P@gt6K%nnW83-E~j~om9BBn+|>wdAa^M%(lMzZ;!dGCQZ1>a%acR!NEc1maP0HR z*m<^O`mjPz-wWoM)WrqGL}6p+`BG}0N718*#0%3(wV2XAcGXtv+erGl-rEe+Aq z%aU|+%gLB(Xj$G?#~@wEDZU-)3m3*TQ1&(=oj@cPZ=^g4tLIz^<3+(o6U;<7&`9FU zMSp@V6w(wX^6j|9w(q~zEW2<96o*+kUV{F6bUFr{k?B~9( zQ*c}>4+?SIqy}t{3ul;0N+wPc+NZjcQRw|`7vuO0qVPyETy_w0`Hm4pKMOI{1^hw| zMByCd`P8Z%d!Je0$We5hLL;#t9IdRL3qTCk>huDN&;TUIgK#q zZy8aIM(PKj%30jZl_@h12lwJWDK+3B#PpcP1lnZX;G^RSQQKw7#*xWeI4$xdA3X3Z zz;;AVof^fdu<4}sJ(@b}Nt==|FjJBaPf4I!WB#o{zE`y`j8dw>)?{nskA@sSk*2i0 zd`hru0bmFceQ$!QNquD`CCy_@RwNv|t~W~X9^U2CBWZUM@jQnXV>u6_?8(nrfz76A zqg2c{D8thRn-N@}lz^|jHKd%%EkH*SosYNXW^oM9SENWd1a!O2Ec;E)3#F0>EMlG= z89av33X?W=>0cU~22nwjo2v=FG0%yz-4^KxhBCZmr-_@29)1m z0}bvZAvIvGQnntGu0+BM2YhaV;aA`daArI0SrE45${nLY(qNpqO1XDsQzkYQr_)<= zeRFMrCg62aP$;&)7N(?oq0l?N@sml5DAzeHfCCKn@|9v3sg%UqD7gob{3uwi08cD$ zf0Hu~8w(Ri3AG@51A?O3>;VQ_Yw$pQ)F?y6jb?R2fOga#=>U`3V^Q(8$x)WQE$cL}V9lPw)p^mb^~+ z0B>i+8o-LlbS21IG#W?k6?_`d;sdobmo(K-Eo~!=Dnr@h?26zBk$|?|H$tYd;h+>i5T2 z9rGJ{w+vq=zz8Xc>7pZmZrZy%zX)ful+5;k(+Yy@^5yMMb1IxFtvM1|O<2 zJLq}ok!>^B#2Gg2_j0n8N;tJ+0x6!OL$2D7H}6by#WLt(&fd+V`XSI5H_B?~pu-Fe z#ncb{${I;Uh3opph-ti@ujYzXN{gH-BVZv5B>=5T2*vmuOzd=^`UAC^_Op|CGc{62 z%o9%y$2Hxx$fm|ug8c=f|vH5>VNY>D5g)pNyBb2)`GrkuD6nX~fm0yUnLRqUB1 zn4XqWe=es&4cswaYtn)cc&IaMUY@hrsa8Ly#`boLkqv2ETTG%_1BxK3MvvmbW&`CJ z;OzoGllP`6Mjg+*uG^3Ab*9O0qFd{0d2zye?40V9nrYUIYKI;FuL`@|SLMlc#g~g(y{oqNtE|X#KTx~1pnm-| z+m!=w<%gE!=lXn>XG>hBspt9a{~&(rs?fCalD4tfPRGrse_e&0BK~P2ACiyLMOP;Eht0h*sg#1ubP<>!+j=7H8bX&Km2b@?w&L;PMr9bq3;AO?ju#%@H z7rlK(T(5efGNNIvale}Q+s@}J%k8-9b@##^1)(H9ay7L(s(v!h;Ih%Q4`Xu+wtNUU zY7msUb82fu%f=0yqBd>%4xb}^_50$Nbr+twYm}>v^7qz18vH?$`pW){xw_j?a|O5T z#I@%ubF zmX$dPH{wJ->S9*?RuM3DxL$DUz#B|eg<7D;n?W76>x8Z?f3()_$$qu|+T-_OxB>RX zNa8`5bY8r={X{;Oer@#)?F123&>Z!zP0uAuk87GeKt@N}@lY~fNWYy#kq!6tdfI^& z{`w|GE4betl1*5OG5$v;;qGWKJef*&$3ter-O*qe8EJiihm57afJ5e%zJNpKhrfVB z7I=IChb-{;0uEW=@dX^Rz~c)zWP!(*aL59WFYx%E2OfdvybJJ_%2YQa?5n18yqq;x`83njqYH`(2$|= z1p?jwknx5LjqV6^|18rD85-RY=>A!z8!|MyBhdY`OgCg`bVs23XPIvNFQQ@P@2!KH zpLny%PbGT+muH6ilkGxvhh2t-42|vxbpI^V4H+8U5$OI|rW-Oex+Bp2vrIQ+Xmm%Q q`)8SM$k6DHK=;ow-S|I&#-jDso*%!pou`XNV=-s(?DSa

    - Below are all available downloads for the latest version of Packer ( - <%= latest_version %>). Please download the proper package for your operating system and architecture. You can find SHA256 checksums for packages here. + Below are the available downloads for the latest version of Packer + (<%= latest_version %>). Please download the proper package for your + operating system and architecture. +

    +

    + You can find the + + SHA256 checksums for Packer <%= latest_version %> + + online and you can + + verify the checksums signature file + + which has been signed using HashiCorp's GPG key. + You can also download older versions of Packer from the releases service.

    - <% product_versions.each do |os, versions| %> + <% product_versions.each do |os, arches| %> + <% next if os == "web" %>
    -
    - <%= system_icon(os) %> -
    +
    <%= system_icon(os) %>
    -

    <%= os %>

    +

    <%= pretty_os(os) %>

    @@ -40,10 +48,11 @@ page_title: "Downloads"
    <% end %> + From 3257b26fa773cd2df5de3e23279e62f59781786b Mon Sep 17 00:00:00 2001 From: Adrian Bridgett Date: Mon, 26 Oct 2015 10:20:49 +0000 Subject: [PATCH 885/956] one more place to check SpotPrice --- builder/amazon/ebs/step_stop_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/ebs/step_stop_instance.go b/builder/amazon/ebs/step_stop_instance.go index 77bcd2d7b..d6f135368 100644 --- a/builder/amazon/ebs/step_stop_instance.go +++ b/builder/amazon/ebs/step_stop_instance.go @@ -19,7 +19,7 @@ func (s *stepStopInstance) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) // Skip when it is a spot instance - if s.SpotPrice != "" { + if s.SpotPrice != "" && s.SpotPrice != "0" { return multistep.ActionContinue } From 264e345827eb528c2abe27f5af81ced68fb09069 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Mon, 26 Oct 2015 11:52:34 -0400 Subject: [PATCH 886/956] Use vendored fastly logo --- website/Gemfile.lock | 2 +- website/source/assets/images/fastly_logo.png | Bin 182835 -> 0 bytes 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 website/source/assets/images/fastly_logo.png diff --git a/website/Gemfile.lock b/website/Gemfile.lock index 8cdac0cd6..450e67853 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -1,6 +1,6 @@ GIT remote: git://github.com/hashicorp/middleman-hashicorp.git - revision: f21146d03182b7236b85ee8bc430fd613125d5bb + revision: 15cbda0cf1d963fa71292dee921229e7ee618272 specs: middleman-hashicorp (0.2.0) bootstrap-sass (~> 3.3) diff --git a/website/source/assets/images/fastly_logo.png b/website/source/assets/images/fastly_logo.png deleted file mode 100644 index 0c4619e964742b43dad160839a7e4f4433b53790..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 182835 zcmeFa2~*C=HZ~ zrb`-BD$OZ+_de%bx^<7=f35d@*Sp@e?6u_Fd-r+v^L(Ch@3YT2S5*(~W1F{p9)cij ziu-q~A;^li2*OI9GaG!<6c(`;{5RKT|B*8YBDxy>V{(kxehz%O?Cc($v+CAnXB~{} zOpzVOt&L6j6)la-P1Q_|jyu`DHkCtWyrwAb-l5_6v8!PR6T**N5>Iz;OP!S@GGo@0 zI(9&VsBwnQB>D+_uW@kTFTcJDXlEk+`KH8MX5wG@O&+ii|4MqXn}hgQ^0lkH#J?iT z7R&$T*G+4k|MKhce*#ZN=bwfrqk;T0X*xLm`7j+E|AH1B9RH#;9UTAC5 zB*w_x3;(uiVZ8fq3o%y~{&lHk?_U>F{tY}y4dCz})bx1#GcI~O`2V>R9U%WA7abh` z0>~6NkbeP$9+7_mgdUH70fZipe-+3SJpKg`dO-dK5PCfR1rT~X{soXJc>Mn}fCLm= z19DK?*ep|>Tle{o*$+NTGVmSKH_0;amRwO@=w_+^#C-UoONObub6)q41QotXV!LPe zxw9B3&!GA@ygyd3X*N8{6zZWZKtQU+0{3>ZC*R z83DNSW!&UMki-=kw&sLlH_M&9dU9O#IU#a6&EFHdOio*CmuWT(o_a{IHHWIQ3XaH2(njWdtX}mo5sb{88CpEGw^G?Ak&u@Bz)=*WmhtC0Nt}U2!GVSn}SM@-yFIxyj%IWL@1nk=0cHq5srzf^A3SC98q%E0|vNk&AQ9kN2!97)`Ca zv9v^M>lAkJkK`ZAfaQ_p4@d-WR@vng6;!aU7?JOnO&tDOBx7fv-&!Nx#L*>xZE4B9 z(~Kz~_TrG^ZMTheu7AmMWj00JEOMR9#qwj~`V)@$bfk+lx~AzU><4`qHPzXW;pACK ztgRt2Gm`BumJSbg`S$d7)VF0WuN-qYVV$N^cAUWb+LywUdv~n&xYK%4xF=U4fTd!I zPgAo>dHx45x|4K#=IGc{3c)5UiV-)IUFH#YS|nw|S! z%IFI7eIba<7KR|mlRrP=_9(49(b&PJ!4$s9r`g%R+-(8~_0ZWemA5)baq^1vBu4MP zfFkYL4~_YulergCvFl>%$)i2rhiw6norm*|q%NynA>wx?Ytl)R4#|m|pnT>L+=u$J zEUF8gO!gpDO`yvyiNghNjkov`7;*b@DC^k+UPe*@!y~i3CXu8&Bptg3vW1O}M?Ye? zu5}UJZ+|`|e2Q1`;y7hlqFyN<5ac@MJ1)S&jxX z;xyiA%R541n2g!ubdpO@xf9-sddbqmVgz{XIg~ zzo$q*^=t^Rb+Q+T)BX2a9U^BV18N(Iz_N#6ZGc?rEEDldfSZ^ZG*S&Tbs=D3o5Si} zIPxMDn}>F{VEVPib z4s0T^A=D+3L^?&uwm*Tc;{c&T+#V1qKIO*XU(N(It&>8?nILy-5yZ>znGmh5 zAbGuM+gh*!^mZWBztCk!nO#vLxb2L8lMY~*nGgpxh(kLWLY6stUX2+UnMD{z*56oz zAhPkCh4u>lFUY(|Z>Jd%W)ZS}pgI#*V~rZEN9~|Dh?oT)U`@&H>pe~?xXa8QK*&wQ zmLQF9VqgsLE0naMJ4(7m<}fo-dpnCL@F_x$YkfEiH-{ChV0K0*=b$k$Y+iQ4ObEpa z%_pa>wJ{+%Z;1l>-7ouRAmj!`iKA`(4kjcHq&DO)9DvADo6lJ&9#)RJZC3!y28>#` z&^`d?X>G4h0$~{v;Vej3W9D1qQGNo3RTn`o*Sl-{q_nMkd^Rv)7jdG9AXJ(HASwVE z{&soW_*&Q(W-f+JsO%Q(3xea@c$U+wAYzYI6Cwe#IK_IuM15<&xf?UEO6CA1hEx&c zZmSMJ%|}Uswf`QfH(7ot0Ha2-g?bwafNo?70hW!oepuAEgP=SL-wb_2fggr*gHg6B z1YtidN)UBaAD95&Z3NQ!mq#o@Nc*7uGi+WEAu8K?4XF$l(YhK;iX=$F)A?${_EmNTvZvJPiBVLbB% zl7$aTGdp8&T_(1K4ggnpIRr{@xrm?S5{6?VWHl(i^F~mXa#t`T#r>K@ssH#(;5g?8 zPLr&|XCtZsg!RX9=wRoeK?&6#BqRp(LDq-Z-RED&}` zB741C4iZK7<8Z)^WkAFYeS{*Og`Hy2PL+gaSO&DEnt;Zd>r8+_Ucx-}4niB7wyYwk zMMjGmiQZ3?rBz{>K(Y^IYgYz-wc&q^{qP6`H@X5;xiO5Dirht+^yMr(hIXygCQ7Bt zp=ZXXEUU=Tn1MuV5HiRN8C;^qs*Q?lMJO^#&nt6T#TaxBJHjl-b`5BC5A@|TaWjz$ z=D&9=I)KG9RRChcU&OAWK;5i?#!&7`2oVLTDd0+vJFMrh%d zs9tUw2ee8ao`Fzb|7G%uj%;J|d*m`MLcfNgC!2Z*WXuV|*l7WiuWa;hi6w~gX*A5I zR712H6ate=bnePXWRfow#`EktP~~x?0~8rHDY&Y@v)k*_Rut(ZxB@|xo|Z#FhD1R{ zZXP=m5Is+*WtbVM3%dJO01NBgb%qETU6dd?|6d18!7Y|BPC$Z&P2HeKa;zjwWLi+g zd5RJT`yRFp^;B2faNDCb1sQU<89 zkRHO)sS3`olR4A@p~eJ4d11OWj8bFe8Vv^Fsh2p&W8;1UVvsqA-3i8m+Iw&wV*+q) z1nZU=8yK1cf=I#IsB1TzCu+2p{1>R259U$(EJlvp0b?oB9(er!@(r zwlIeX1;SY&%rN7(2^FmXr`)G2K|~}Q5Ka>Y;8fQTtUrO(Di9BQO9%rzMy|vc&Ol5V zNjx5wmy>15Q}Rr_i}le;|LRbFuVn$t(l(v&q%K&yS#!4Vf_ek7tW?Pp&T{gi{u65s z$dP-_^HQl@O*0jsum|9xrEFbQRo!RV4MLZD#hYTCZ7IL)jUZC@~f&$?DPofD;5G9v_l6#_0WCn2ZOEzgx1O{ab4jDG` zXIMDpZ;Ha-MF35L;( zQ48M>8BD?*>B$p9Y2oyhq79|h*+Ymh+Q}2RT4~Q-U{fV=9^b?5ge428_Ie>=pt+K5E`i|Fw&7%F#5+S56L)VEI_3c)Nd;l{9DfaZQnCppeGN4*Ap>m_4%r#=7nQ<=drHp`=oa5W z=n?2#xd~PvI@-4h*cXD?m0A!PY1M|p8_P~4Lj)?rDX6UuLK(iIGC;j1=Mx$@oJv#P zS5;vV*6`SSab67q_71=V?D!5ib#gXQ?nU63w1%dLV!*Gi?f{DUON22A;|Wek7DO$70gRquBcmf8**X_No{bZNB?2s7 z)ep`##0ee88U!Np&V$N%L1>v~L=;01w4)uo40UNMk+1gpL9q5gun5ZriXOy)2j(E# zSfhgg42CTirmAAw)e!eELIsusli@V9=r{y|gDR2ZlZT;vze1PZYF&+{z3<-fb80>a4D<)>!5(L@#6D)@?Y*Z6~7QZ`0 z>pr1s_#j)O@Xue^l7QoF2Y-+h!ertKL(!KW&;LEzvY^0hFbn6`lDeiyWTQS1_FS0g zt_-RZR6*qOKb?HnFoXas6v684RS@_?)*)2#BtrH#2?$ZoKs43W)veny!IBZb2BGVq z9Su;}dE<-@ZXzJ)3g@g};$Y0uASwzesITUq0|s#kreKE6W+qfm5{!_OlkDNVZz5~?044do?sGmxheN_R# zjN1*kX4aR6LPZF>EYyz*3cE9iZ4BED7-Ba`N)4WKTLC!Q2@Q;4lf4buQUMbMQuIp; zZocCTdx2}lDEs9o`-E}wC5rtA;Pne&Im@t7=0Z^2;PSyvK|=On?bMeFD*C@D@jFTg zO(-x9GHg_M&7FTWFlLbD=M3twHD6?9*bzZc=AGb*)13E2^I;n}oxACeS|AvmF{R*F z(B4h|9^g+x=Jin&2vMFJ#MF$&g2L$F2#w!|?S$~-nVGQ}8$(eA+h%hwuz7!}vVM%1T z3`&L(Vj(g*KUhAF*8UZu}%Y$#+?Xau}y zd?B3k8E%2BdqR^Vt~*N500hUIbA~9cxu?3lPxK@mvIhQlhE8N2c)8jb@!DYVRYS9O`_0@>$zut0IT(simp;8rIwD z0{y!qKINq)HMu{jY!6XnOpfd;1RVH@y;=eJ&7`aqnm>|EIgz=n*dfB%_- z?gUj6(smdakZqk8LY`kbzu(L4Q0xz(rHL`y&J;ZnK6H#vX-K-+A?)O#obQei2H&Y* zMV(=b-v=!PDl0!mjCtcToj`vL`_+rA8sd*z7vxsAe$a@|oWET=xlNvcfX*$bA#&3< z5!ND;J=@o5p68rfpy&`;VtYh5DM2S$`@nLB10#m=09t;lm^p~-rv)CLbAQGgvUZno z&2q`3|L4>X61$vAYD|4B<%T|9i`^eGZP|jw%?M>GG!fd`Crdl%Nwo77KAT4`kzwxHk%D|Dw&kRw}x+c70bN*Vph_P>{~03 zvk9`_Rjn>)P~j=|NZ3Ue7~Hvv{oT*?m;EDG zB?J}W;7+i^hjF7K5V(qOqaR|HAU8q@<)kn{)}nw2Jn3RH2riQ#h$ni#*0s*eyColq z>TX)6woNUMHxbNI5K18`;fwDgEswv*sc?F#WzSZ{nXM`fEtOFq!>9z9)p7-}o_FJWxC_y;~vQEEYtRX-~ zj)r}1%ugGGQo??&pdW&iXCBBJFeB>ZsDL_D+Xs$JV(?3@z`2fGdClM07h8?Wcvfb; ze>%ZsDFo3!1j10qW*H!(P9PCaRC@qv_#r-JWI^}ZmdDbB`tHQ|D*Xt`7sf3K1OCZR zn~jo;dw|>BLO}>XD9zIA{X>Wuhrt%1+A6^qz_4B0h4yGmR2R)7B=Z8^qvx9?zda{; z1}Qq7sN_XnKrjclNHLKDe%h=cWc}JcbWEe5zbU2}eGZgk?F&Mbnis+40ol`GcEpd2 z;=YHXWJK)ji_zOEWe-A@7&gNhs6nzkI*Pgs!#->Iq2O8KOq4H)2O0JNUy8HAC{-Ra z7g<|#kEoO3a&)wbObH^_rEEs|=+066i-QDspmLuNA=FhQYWOw==0rJ${1vs+`oHkH zl@AKrTc}(dAp=y=>r>aCI@%kcH^WeFy?+^_I-)`vbzFCQm~VcWNYwY3A>8l9;HRmL zoDmWLzN3035X&G4VZRrI7;ZvQn@R%xym&DvihsOOM#Qh*RWc{aljM_V(67&uCd7RJ z!+n4ev*<_xm`jHIg<;gOZBYAX$Sgk@)f<1yyCXz=7&@b}rhd3ih>J0*xi@Bc|FZ|N zeb8uZBMcqSQO^k|%^(Ei3VLs^c+lYg=&@O><((dpy!@ysS7cig6G2Tm&5GERWg%pw z&cASqMlNVc8h;TO#2NAr*Ue0n=>I_p`9EOBnHX3!o(1~ZeNA9!B;d;h#3lrOC5xl- zmLw_@p07k0?+2c}UvnQ-sk&hu9;*2$xV1bu%Q1v`#|rz7IFmBSr}g|3m`v zUxSWB*o>erlg%QIZrw=K=lTC_`b3nGIhetA5LW=-iE|C0ik(kHp7i&pAU;Eb1JTI~ z_Cmurp-JpZj{gUWC8GX+q7|ZZ|DTXLp@T3xFF;ErqQNnbBKj3KXQH3~zm}SaFHF}& zzy1?>Dl%Bmn))s6^FJ-qkf8mK23gvNY5)Jf9>k`Nh=1{a&#>RH;2BBog#1zMr!K#~ zsxw_IDVWe>G~7OAJk3&>*896T4!ryon%QQqu(bEs_VLWEl8jDy+5Mbw#sm8ACZ6kn zUoji(<#KU2Qz!da+AwXYgLTi7yF)Wqne&@cUx%-|$o_QI!8uKAPwy~=TwwjrW$5;{ zEf;;Hg!SKA+vz!@gD5+bPyaPPy+V@+BkACG|4yEU%s;@U zLh{c@rXqs;Gm@#0{4X^8wYl4+3qizHJa`DY|k5&0KMrXlkGTO;{pLN}#o zi%2RCGRko)Ov)b|b~}FO)AkspPRWUooChZ}f1RbT`6&hHlJ##aZXP!9EiuyLl^cI& z^7hoNoRQj&|6XJ$+@5f-9!-xj`YD{DJwY}99@dc8`{c^xhj;=C=IFI1loT~&P%~>; z#&tzx{FMFMv~&q@6WQ^wG5=3aUESh#4kP_a-VF<%X^uDzSpN)i|D`|nws5{r(SjzW zw|PAZ=A&Ko%lIU#KWkDw){^ntOb|z|bg;Xk_wbL{dLGx%FyTUlOY8o7x!zHv*kSCM ziG$eqF&Ub;XTwfPA(_gNmCeTfA$f*_DEy~;v9p0J^J^Y%%&zoDSIim0; z)2*=SX`v@Cdv{H0o#(>*#1kLOrtGg2y-2qXX;TFldK6TW_Ta08k({i40q5s!S9;|= zB+|H^4L{9uKBwnOe;tZC6TL3+8~?BeNyXfh97PA2lRm>q7iD>mW)~JrWTw_Ru#^i{ zmPRG}e9K$jxG^w8TL1Ss@PJ&7uGs$m&h<+E2LHmLcZ*&zD?EFjT%9^5<}qycHm}|2 zT+X2}IZEcnz;mb4es@O?dgvF8M?|(?`BIcFzKS|C-XKffJu%42ZH1P<)yw2Y(T2pz z#wE&i%huCTpukJrlfFsni0d<}P4rAxZ=^?dV+Y*@Z-(y72+Uel?F`V3eUPsS5zBi1hskH+R#GL4AzSG0edU!GH0%2za$ z{ov%bhjea$l-;7!%~1e}jM25`BB{%LT5FkmPc|;EbJ;&OF~j8~I{a$-;VtNgzc!%h zwDJY1k@I!X(B-d4aQxs?1^31Edip9BaOBxqsqg!ePi{a2QbT zCnq5>(Hak?ktGt3ow+~eC?!c&_QRo6T}{z|hoNw$J$Yz&H4RPb`aX^r*Uy-P;<+69;d}TH(6b; zg2K6cgpKbJj~ z=k`Lj|FkyRr57qQu(-M99E$n@^K72Cde$^Xw57aE=IAVnZmfovE|JjAIJ;#pPEjfc zsp&BYMNE&ctFrTS{paB&67kwB7vR>vKTUQ%4Y!_0NyVMXJ}pHJt6o#K=tK+(%GbzW zKCh7^3fx)%oI!mzpt#x z`-(Nl^KF*AV76$^R`RS=xYTAO@A(emYW@%mE3VJn_$Xa^3D}IXeO^~lkND@|rFCiN z_Ec8yolQqMjJ94*z0Ur?$yVgxEGnP6M$akdd{@@>(K7LtZ_`4+=0OCB64=u(otnN0 zkUXNU10Hy+%a>_g*s*h7=jHF)u9rD1rz3mQ2lq+J#vie{0Nn%mo0gDejzmc98T`B> zZ7_|8G#{M6a|c+sk^4$neXgaY%;f&e{inRT8X|^g*}BXz(XkHFy4GoVot2Jqqd!iW zWM`vlcXYlwlOxyn(E%l0z_mixL+5Q^lOTfFeU0mLuKhJ9aTYTo7WB+HGj*85*1O62 ztkJmJ>TZcd?G;>fRL6PY1#Qr%##dXJTmwg3$D|TQT*tYGcI3GSLEB;xiQ-w+kfF8|!wsF#)#V^!yFh&whn}NxzB& zU-}yUA?GaRw0!*76D|;?p#$4MAK^}7wtkllR7JgWwUgeNIB1uR%t7_gnY>+SOE>$0 z*|L`3oyMgTM&u?$x-L2ozktm9uRk`w@zT#h>g5;&S@${piT*hIqZL{qLm)kVd;*#I z-DJ_nP=k+@*T7X453vX)9h^yxFJk^)@Ip#%;^mmT=XdcBW5Dwvl;r;R+AW)|)%@42 z8#D*8FknSGc?q_39NTo@0h)ZLQoF^zc3=NzH=qQrPU(aKj~Y+M zD_uX6K}_kn4(<9-Q?HY|n~0;40797+_!+8wzpV?m7MIzDJfNi)boB-$2EFnPPupqW zD`O-lG+J(yaBwD>>pAoBf)~E*K0mhsg9`Z$1^d22q0+!pB0#37KM_2Mi@K*XE&olU zlKf?fXl+lT5S~v5P976WV2+V8CR_b~A zd06n+{=<3V9)nvSLZA8F__=7Ts{{LF9kXftxEpz8C=s7z%a*iD1u(CCI1hv6Wx(bB zU;D$#nUE1V@3iXF;ZwkK*T>4lLnDd)O%Wc9%)w(A>D<@wFUs5;O8WQaAYttmItK5& zk`B9ja=u?&1=uCKTaU!Ayy#?W`5$=VEoHrqXHTKCj}c#W-8_0*?&YOfhOnA!Pq{d+ zUsw2XkT-3kByob3SuA#cIBlseY17#SmX`H|w?K-G;K62!j#)UVIKd|oGs-Q=jX zIlbO;eew9xYjmj}k+oG=MwBhcaSj=D&>?>p6H`!eeOc%ki&m0y?3Ikq5SZ2T}~Ei<%KSt z&!I-i%+4~`wmtK^`4hBFZ*@6yWrj! zKyCqU6S)CCVB}3RAiJ`T8J9EidnjZ%`@KsK3JYlXf$}Od8o&U9=sP8Ggl5Th} zz~i9LoHAyzd)Mrah8NPR<@=HVEuL#~{pXBV1j}q%2sYETKl--O;o_jyVdg=%OPx;E ze`JBfp58eC{sY=H*W>sRb$I)MeAcHJC+|UP6!~q!p&NOsy#|o;(7N{K;PcCEn9{* z*O^xJ=7wGcA8)uVjd$RxGt65QDj zzcJFWNUdu0LA{RV}&*F-(6Ui@e^Ofop_2YofP0=epb7EabNU!OW#lX63uIpM3 zJ&ikY!TIg8!Q7I`Od`2rt1w(`E}c_Q6qVY%5OLHt70DO%JvC6%CDTx3XC@3z$c>ac zsGrysl^j@~}Ox^0HqDs{~Yx-S$y~A^`G)G?jn0KK3q1$aaX*Oi!X7O_Q zGO00%G36?fJYNO~Exr5tTt8fOUgx?@N*vVgwaYUC(|(>gnfZ3-;DH_>$P}{Dm$2yt zNxN_>;t3xNZ@T(pi{0oNCtyg)HO0Da$2QcAh<&hjtpK~-DCJ+WYy%Uj zUTp^6t~sE4y)HB-tUm6=RO}Yk#nQ0K8`f z2;7wK#gfMBI`VoqTRZ`IyfdjlXH$tCb73@t-3N%)6w2fx2XK-i)UklX+Onz8aN%S(59Zq&ZLB>cfv! zJ2dW>U43G^M1FksxA-*z+tK?rz_vx32Ikk^RSLJeE*wX=JzEP#GfSTzp|(}|eQmf3 z@}i;KXBV5$k1n51odgW)hnzd}A>@6~pwf?29cvf6iQXObvWKbT!G>8=DSyAEQz*-A zCd2wbmLp=#r+Zzxlr82D@)iNr+XQ?b>)DXr;?(vI_(|i|p4P~1FJ;_ceuzxe>G|^O zQy$%YKjf6(WYv^HBQs=^ocKpIKr&xk`hl8pPgZg~_~WL$ZO}V;JVvVZp6d!a6av|H zmu~r$9n|VNv9Dv^V78O{G=3=o;GPBZ((noO*Xt9jv# z{M?}z{=W(Fp-fVdPzrxT8A9y6}u{ zNcUM?zGen8POhv{$aP*}?j2@qeW;r+!x+TI_RJBFZQkhzzO78@lLJF}9&zp)yE!5{ zB5!}$IY#8Yix|lMe;V~Bjp5L*y!))Y`j@_LeuYp7+-mt| zYZ+gVSy=scIWK4j?w`lH1c1W2J()l*wrec4)Q%n2>o_o|rWZfFgU+WYsx&6$V+B=A zMAY;`9X2ck&$m`KwhFu|`MNwsX5BD{0~Fs;zPKM|gFyQ;mFG>yLlwl{yKsUu@~2g3 z<2NxGa#*}Jk2%i`@SFvvL1Sk>92`!zII}lvTbBC>KPZ~W4XUBQjk;c)d}A+Dn)#CN zYrk6FumD{R^J6EtV+gj~2fB9Zftd$+L5cn&#z|)o$}93mUaF~3Ij{`<4{KT<2dP9j zE8l9APLFWbLnl4Ixf_GGQNeaD-cRoO`@3xkYI+BmkqU>>i?K4mk6NK0Swe&FHM!dc zUb=ivty=1nvkurCdr_2HUT1es+i=yA4#5bz#7&W;nTL6-pcrZ#Y>46RwGsd^ zr}iiyT^<*pC%*TXQ{x7E_?$n4r7LZ0=x{9E>_^TUx7-n?^e-plP2UKt2O1k0QMCxZ z<#si1YnFSh7ch11i`|Kiz8`Pf0t1owaVn;fky+&L#({G6)a^TB;oToNBMn5G*3+mw0=}#b(ek@g}V}s(8WYK>m_GEI3Xu(z3#Na zJi2xK(LrD=vO?bzQulv5|2+oe5hVXW4BbY?-aRyw`KoTWrT8g2fm+vRL?bfu#!p++ zP+&ZIFp`ngo^NRew26!u9YB|DXgVkRfZNO92MX6&pH)f5(4TuxmzqRpN|dNvb}k0> z7y15`o`6)HpfCAA4nr<_5h(lD;~{Y!w63nI%1wDWt@ZXGb_xL}3-|e)V;cBz?DGnc z50KpD^r{tKgtN~Nb>V1Q+8k9=`g}`Q6e@cAy%DV=kwayU0-%H-=c&=_ljQ&o-9s%U z{YOKqzNmu{x-M@5g=e_T=ejQf)mzQh!V~qUo(O$Db~b03KZsyRa*p-) zJ!&BSRG6a>YcFZyE2(N+q4Ee-2IxU+y<35TC>sy^L2GuIkTzHYx!d&s!&h~mf55xJH$ah3MuR5uL6DKA}f2?t@y zO;y{^VX0(KtXbKoBemc0w&9=8&=B9a$XGeO4qu4fTNAfLkAUqVm<1u z6Z8nYU4g2)z7lYmujjfeIAW-K3VYfCgk5xVrhGEiU+_K;RFKITy?Hy(6 zTykg3lS2RuoWCZ2g%)@m-Bh3|_yEe>A>BKErdlcd9MjkYNQk#v|I~yqi)$(0Yt#&$ zUz1XUO0V*Hn-TA+#gccX3QsBk(S~Zcz69_}zBn;t&AZ*oOAVT~_|>fIM>aqiKIa&I zbmcqSL>GjiC<8Zv=(AOhKp(x}ml z0IN&pD((X$$kuOtLtx0KW(5jdcd*SL7aTfp56H-KqnmeB3&c74RtT~jIV4svEFxn~ z&x5+sU?oU?WL^4~J@KGqI0URSVzYacMe%^!jXb$5cSlYDB&&LGXIqN2gyb}PVukjk zf9WwOcMz&z)on53ggg6un2_3p60^_aqv{qV0}59FtRAb&`bu=Hr`;DXeiYcnh$tq? zvG#5r>DTf=iAkMXTQ`(FRKC@(3uPGy$~jIi20|_bcfyjzVqv9sQ>Mo(P)-gEgXAXb z+MQ{u;PgW^D}V2K{R`pGuGuhwKsJ7?E9=yb5vd5;4+f!Yh9dyeYxMxsK=*R=DV|+w z+M$_UFD||W<3Bz5rcO4_O)e_5e*0(lLE9rPvBik<+-kKjgV%7qv~gGU+sN%vZtD!(wT z0)4_+vR%A2-(-PI!%YU@@{IMg@}O1w8c;nre-_TaHn?}1x;+&EG1B?_!bu;ZZbN2& zMEbcdJr(kC5$MVt7!psKAsztx~^^) z34BN4w{H=M`{D*%uyB*N6IWZqZ*Ux!*_P?Qa9%ytiOIIA4_JD=enko=-Me}g&>-(p zRkG^dS}g(R9#rx%>BVhL?ow?{cswHWRsKx0stie;DiZfxFlX8pj)pSspl*dnczwOv zZbj;q{@xBC&}wj-{D^|BpOu?-O>l%cpikepxd4fBRV4)_OU1@LZz?~_p;47`qUyGP ziq`88P;oV%UEVKUR^(j^&1e*E$4p>XF*#hOT+;S z7`o7W4=9}4+{EF2{Y+nhy51&`HOH^Z%E&mB$lRu92q;txu%sdc!y_`1-OX-_7A?q# znDW9`0>q(eTQHEm$lZ+b<&{h1P8#xLf1^f5zN&jLRulZ&-#)qqqZr|qWt*vM& z0Du48#(nV-6x0FdLq^SKuqf7>mpvoT^3B0%a=2clozTH6rC(H!@l58!1h<9-4}JSp zb_*-7qXi9$oWnSA1}|pkv|6f#Pjv-~Hjg)YzF{t$5L8R}W2Qyj_x?6lO5=2it!=@C z6Ew0dD6viQROSt!r#pJ+_N`qHUI{_psM~1dk6OI$*XazPiTe=buBt{E1Ca9Q8@UI4 zr~rrnyDeVF>11MNMcqOK4>U5^R;w!IUrE2Z#TQFZ<@cxE^-XS5!86M7sxnKv@>u}C-v@C!!3a)0H_|!W@$L0X{bSP7RKeIGjhqR zvFRw!4KP2BfB6Vwn9QZ7r2pL6pTQ72Z|p(O7k7njqYR4`&?xge<_$1Vl>b>=QAWUb zi<}SA6tx3Cn8Zr0uL;HqtGbBO@>6%v$saK4XVaNTR@x_kp2;0b&owBm*G(6)t0Dz6 zMvsSJ8k?>N=j5Q(UI5looepvZeXAA3waI)KXw#lc60caqlrPXtX5d*#_!N{>{{D7U ziWW_shFsH`?hVA1GoT|fDp0+)6%Q9(|INVMeG(V?b-4Tdi1Ew4F(``LwBf$#WRp2w zVpz2MgGq4pT67WtNS)g=o!c=xTpPclf!q8H#Hl6kYB--Zor$5(I-ouPZA@E9!5dh$ zKW5cA8qhYTB_d>~Wk22@ERJ~%D$tmI3A_gPAe0>T$1n^xq+~v5NcL~iK~x8=R%*He z2QFjbhb#}4u}m6@f)Ah$OFpJPEtHVLc&?&!sP^w~Yp1v=(=2XQ`WL78>B?^R#;q)N(U`6J$M{T2P0#;jHA|gjRLz4LY3%uvo$rb4n3~c+#H= z2izka^Sg~c8{EOIX(-$;_X>j=jP)t*k#0w!uP;8c9%FduzMq`W;Peumm1Y2ZRq_X1 zxz23(-5peJ8>f~;h{qRPM@>n7xz`3y;TeY#3S>{|HeQQG&`InY?#DIN`z@R0YK;V3 zNCM5U92EExllq~D;+qd(=_da$P?Ow#R`@N6qw01|is-ai9%aEw+#Wk&>03t&u=1VV z@zZO6iYiS_VefCtq$FbGDEaTfVoCMwsf-xD!j&|B9olYA04$6^tIdRopONu;bm72E zB>1^5>^zSHLvlY&)7Ydj*vYup6~Jhbr^G9OT1ogii`ShxlG9AUW)6;F1zZ)iG#16G z7Loy|b5g9PO>-#sWpU$u$#SJFB|{MpSMu)~43eh9xC%Z|3AAhaB5;M&8J~AmaG_a< zR`N{Zo(Fq$6L<;{j03;!$dNETO`JA1GoI8BJ)~CP6$1A|m^ghn4Dbk(A0sGsXWQwi z{n@Lq_)JcT0RhSK&P%Xr3|jmBZr-u$(-MlT24*ni!(a*%kwLPC>pY-=iHF>E`*qXI zmmOcUHrx!VO`VCW_waHn^yBkj%*WBwu|MQp=Y-K(h*|+tP&LhV!;_%B`A;(%Y6ZRk z#;&U!o_QT#iXrENX`@NPblkb|a4M_OaIw&brlg;lSMOaxMV`!w1el4y&peMn&0L-I z`eGsa>j3cam8aHD6Y~YWSZ!mI{nDceEAq(spiX$64d10l-%RXqDOI5lOVcXE@Sf26 zQ&X!qN;JL#%f55CqF0)h5oa4OKC@wJphI4?VlF`e@U^Az()?id13%X#oob8dxd8Tu zK2i&Wh6>c31Fm8&$Ne94x#^S5Yj~w?m<&~EO{4B%dZ*rUD%;}5cqaFV_RGC$FM6;j^UQFMp zXYjQ4V<#S-JRg7zWlu{vzryldUMp`rRfFW3W^u>|Mi!*aloXc~GBY$=P&Vh;vK`-j z!lReuwqAaN-r$iCdh7vPojo0ub3h z1S>^kGdK%3B;gONuGpl6RsWM@q;!_;MnCe<{N|$&fn?cdu(My@G;VK87v;W=vW*t@oTKUNB*RmhcJo^Aq`G$v@4{ZY6`T(}#r0Vc6amceIgJO{mu zf@|z)_zvlQ`3e^4r?kfIqiB$$tkRg}9Q5qzQ}p*V@ZRRzHPfHqNU^+}=_}YTwV%qq% zJE!HlDWx7`W5X2`@43vgQx;lW*Qzr>rq zj_FV8Wrq*q1nsPjm(dX##V`>pIKdxi%`*b0xB9q1#|x$p?~f0^3U{X7Y|g?pN%N=299tSWMVRiE(1e$gobD~( z@vci_<_$ttK&&m4!;9ZjTtFU@-(iyUBxF&5hFErvggIa1PFsTcANUp?DXY>1$o~flE zp5(gS!R&6*DZw{zM8SI`g_Dvn9g-dHO|L!<;kyAgt7?W1d*YgJXk{=yo*3Eg7<0F! z-^GT^Re`DQGIrXe@u_Jz1{Yysl#@S+{zl0u-94!jb{K_2zg(cGLW~m&3=~g|8gOA} zYF~0CUR1Hsj9DQ;xZ+F*yktq&Ea2mrLEBsi3Ii;E^cqdK5t2MM#~=qPNyN{i*`5*f zr0H`mvNP^UQa}N%_V_dSNA7~>>rQFm3b^pf5!_}@=kfB{_2YDlQiYd~K@0bQDgGB8 zKU@#N(qlTPzv-50c#h(U3~`ZIJk6h~@NFm2pn4NeQVtEp<~N37k<-=cjGL9zEwt#O zDM0@b_Y0?E;APcQS_i&XTD9*;xXdEV-MdN|NQS4r@Yyiy(ux-ssU_2y#bb|b3TL4l zt&*htdc<8kBQJECVTHS4*iz;~Y=4Sk{#2X8KntOG&#O=r5qwt6g6HkYVp-t3I)X`F z_rq$|s;FS4WU|3}55-T*;U!?YR^^BJmh2|nVr*>Gr-dKVc|80y>$NV!3rF_7`0kNO ztUQWdMt(KgSbU`HVBl4Q_@v?nf020Esr494vQHSm)QH1$ z0jB>y;oK9rIiq+LErZ1?8xk$}Ml-OgAGo7>roWNIt%L_({+lE`ast7L@vrxAqGc~~ zp%Mu0*?5GR)V(SU$BGQILldsieUj5{YI9*0>Xn8CYOKw<_@W#6Vc-=6v1pSnAAI;y zT1BoWGSGjV`?N#N%U?5L81-No7y04zg5`%)OaI<{R(eB?UX5{@yl=fnACj#}^C>En zo_ujkU+!*$rJM73Slz{MR+W(|vhE5pMV+(U!_pPrw}H@~+xzKpbkfB0=5pT=w*I3d zQy%h26~VROme6;(b!j; ztXfQe9j-zbiztRFYOLPBGXqb9#!RGl?eHEanfIrQS$x0P?k{&epEq_&w5X!|)Oh7C zpB|35tf&+8*KfmL;7suXXDoCoz%$t)xa2e5kuMCM#9b_3+wFG6BQiGMO~!{;zhJN? zihJ$$hLuY>F1z@YjEF{HlUbS)Z5%P~sCU#@TXtCB(?-ho2=3x{cffnV+LL>Vb{%Pq zRP|#~DQ@Bl;>fKLo>19ekUds!8R0^sg8k12Hw=NMaDQungD-8`EPEB0c>B$|2OEkv za<)Ew+R~HM8P;v3@8afVq}CujvHkUvZ29=eM>goWzbS^}no>MpuNWAH%z4|Y> zYWGYeZU2$L%=Nqn`rWR~{1~<2d2UR%Rj5m&im8j~>&B6{V@-W-DuKUQf(ImZJc@Aa zmpIt>$1+L=Nv@XsgjL4X)-ZZ3XKPb&@Qw;K9A_llgA5lVMkBS z!pAlwY@%LO1jtm?p0{muPHym*YdC*M@3&+?<^V;Ykkg+6W^-fu?}2K}FM3ODZ+8Z6&w z87f@@?4FhSE>a%LQ{+VQ*GO^-ic;`1=3ZK`w6mdS{*<{Eevz@)i*r}OlWo1p_}WeM zbznNO_v=32m+jHzK?;hKuA|J1r*Q5X)pyew*G-hUjU^cxM`!Vr?bz_xMipB|swxGr z1XB!(j@dRI;6(WD80?Z!kqB@#Y}%L3L4QfZT1WbiP!KVl6NZ0GPZeP3*u_2i{H5w;3BO^}m^2*RXf0^n>THNp7|f%t&}~eZm9Qub>EY zeBs`AZOPQN9a@%ekGB65ehlT|#BJbtj?aeWB;f`|x>jn3kFd5#yqvbMS|J2A@+z#Q zoMoC6rTD5@_L?G9-sG_ytQHNo1)gTpBVF!_S~A@bNCo~jllE(zyS68#<7eCw-Gh*Z zM9NPmHyoRWZ5EELr*PlVD;TnYf<@wfGmsBqPHXWR>(3~GJy@-Mc88ZyU=0^m0g_e& z7ofTWT-By7mVLvrB)J;aRJ!=8$nsM{GstDxrIITd_(j@FY)acswhs-(0(5xFS^=c} zQBO-bG7Y6~I7)Cq^$)Bk0IX-rGCwDhdOU^xaE~5+Y&$p;9;9h{-Ir5!z^B9~5^-H` zZ7HD5u796M*4ldF`MkmSQXBK>?<1>L9aYNstux2>2!KN?U>i;G`2@;`_}r($0+q z!?aQX+LCm*)LsGP#PrP(E7A9*AmnN^%?n`F`U%gDkK2)N_+sB5y5SG8SYI&@F3i#* zMI7rMq(XPPbO*6Hhh@sEGpn$|=7jJ4IfyRpD&TLrBz!$L{5xEj%EzaEy5<5i$kn5M z=Au(Bx{b|dwZ5)fXT_E!_*>_5`Cv$nA;c3D9 z$oLq{32joAE<%o88jIwU-h`h1=TC1Sc0c2#J9TNobzC_02g@F?ENC)0vXQF-DY(r{xpW%o8#DAsw}bFhB0r*35305FLX+9>Jng&1X`z) zg{n=Dy+(;655@g~Dl{uOT&79tg8`!F7j;4-Spd26;$vH12NXGE={y<9nQ9f8DKlr>W7ioRKDGdtQR2%LyMhn zULXPRMg)0F`re6h_sAnVzDSpr3@Kw?${lifCUReH^Uvg4OyqfIO)Y^o$lhm#POXCk zeShT4)aY>J?N4Pz?$;~luD-g}QjmIZ>%>HKmE{rjWymu}g=5;J>*Kr#xBRXF^4n8C z?w_gLxh6|_LY0Z?|3svQNpBev+vbMNqh7pbzJ#RIA3#WKfo_h0j*cKci@r}h+Ajue zAbf92sC~i=sbXLy-#7%3olbU^Oug2_dLi>P9Fy#Mbbcw%C975^ZbZWM-S0qX$;(@$ z6N=BlcVdEeP06ZKZ5COzJJ78b*A)2H{^SoMgAAbNoktWA3m3{w}a8T@yBXCkTz&4_%@E9NVv;LW{NrMNn8Ll-jt zGrVhxAIlt?@=J8-=Td%I%)hS=S=ny)Ma{l5h*LI)Rj29_I}>4$!Re|CbmPA(|koOu$s zRMq|QqoKavycZ$&&n`kpteDHHdLP0fS$Mz!L^ZuFl5gv8hvL5heBIPV1A~s*wh5U? zy*?9Ja1nE`;A2*mv8QWr2Yb>YU8;#hH|qYb{N}v&E@U#!I`@JXiM8FxL=qZS-Yy4> zLIU$Q$`NqJT6SH`X|@CZjQ)5({*QZ-_PHj1q#h`9c->;=J=Zi;0@61^l$He&Ax$1tt!?aIeiK-v8$&@}Xf@zW3CF#|zX)l2>CMemM48F>DFpfK^Y573(( zccE?TjwJDOEI0xde=ubf67$>;H|b3>im?(&XnKeDbx93`zmlcpW!MdQab34 zH>$wtHOQP6b*nq6Lx>G8*mbZo%lgMB#o5S}78$hH$cZfF)W-=R^8UAt{aIazf)^HS;_C+oa7VIz>MT@AlTW}nUQpt zWj8a!#!sm>4y8?;6>qG~but85EmjQZ8Q;lQ~_k@4UD2Li-%yef+Zj9o{E?cBX$B zQcNl%7n3${g2#?+TYu4|;=likt1FM^DtrIuLzrS{vsAW`lB^$1QHZ8dgcdCr+b6g?IQ>e*=o@c*Qddp+JJW(j(=!WCI8dZ1FAH#?KBF==ZvWfM27Unw(|&EU5T#Ohu>7oGQzPPo(y2LG!iiEhMpA>-})z)qJP zP0&@Yv~m`ytW6!bU2xv#v`lvXby^0H6|HH{Uyhaus4Vpw02^v zJnt6oXf%8hAv6;-G&mzNl0{uN691fTBg6Y79tVXqu^o|N*+FwLXAq+i16l1hLXj6g zjDb62DE{l$ZZ=Pn_`(CV!yQj4DLDZslH0oBa?`)%1BrU43b^VF5! zPJF0Nwy{)x5Pa(AsiDeYVLUowJ?S01{fH_!;?cLH6xF3Fg;jqWxC%AzPdX({Ui;31J{`S;cbtt@3=3lnOFC~u#OFq55#zz}V8yiK z&*BwB*v)lte_-Xu&Sry=^qpyXD&+AKPMmzitFWO-5eYp-?6FP250`RCKmB8!V8ZqIo1=mc2YHZRTJ1zmxb zT^&#rItx}yKk8Coc*h*St+y@Q%`3ZPFPIH-2|c`8|Bv3xkCr=-{x~i3w$rD6NrDC3 z8PeQ%KQp-0Pt${_E@dm2yANfCCPav|=`3-IvV?m?Q2+9cYK$Hc&H31N@IwrxXmFKk z)by}$bS_fsul3x747biGKh$1=Sd#QhmVsjR2YNGugXPoK*omDGUot(B9Q>t7XTtHi zt)AHLc(in%f!j9s@K5CP<~7?5aNvWlUn0N0#@(KeIqv*a3dW zg<3t%B-LR`|?QD zZ?Lt1)%yR1Zb`dm>@6YGf2=rjW78@|O zyy;;u^Aewb|M>VRf2K1tb1b2#JTNq8FV5XKQhC6!=;yYg+xxtpkJ5Y4Yxk85lzooJ zqy&EZ5zJGSp63D^OUbf~Jv?aSeP>mxj^Ptc!Qm!n5lyKkB@BIp@V`Cs9w2wcCUY@Z zX#cOpnS02nMEWgjb2L)`46`YxO1f~!JxcTIRmsTs{U6&CjMajdS&XZ- z6z+yeN?hIyAob*a^K6ntJZv}3>#b;AXDgk|hzwtyX~KizJ*S3xbT|*OZGZb7L)vNn z9&@owt?8R{z;dU{5+j&J@Ox*Hb%!L%_K)@D*xeY_W3o%E?}5FAmso@MAN{clIMPpc zz5bzd!On<*B78`l4c;z@VxY3lW|DfQGmPh=mfFU0>yjbCthXLS_RnF=iMaXZHkLzS zEAQ?XuE9Rv!be#sDPn)Gk$&a5qbZEUHswsApOEw|h(nTFw;}mkKw!(h*pUPGFD#Bh z+b2jx$?^!* znR23XtK+gD5|llotl0LDBAobdI9`<$+dO#t>Zo0h#E~sLE06f?Oxk z$d2L;i6lUjv-8R4BiUKl~IPoPblabXsqMAA7CovHtv}ehAMPdw5p$j`z+E$W0-w|x4*`8 z-nZ|SXRJ$R+l3@MbbQlmF1v!Q4C!N-q@Vt?A2-$s!umZlbYy;lQ`ePFjMpAxYyjL{2y=Cs5nO-!g*kLOQ!*QPF{&3^5b2#*^ zKeHZ;bsEC0rDPx1@e*jY8JUX(zbgjQXlWmJ+$-ZSon&V(>bZPw|I+c%gxKlMNy{o3 z=Ht6Cvi7Vro%yTb1bDpMl*e5W zW@i4u7^bv|P|vC9sZ~y*YnC-7rS1jYd6zUxBim+}Dg6*4H84`UskxZv64c^G(z1BX zuW%WpdJqLZ{eRk6Ry_!YB>+<@3;Z0jyR;^{lQH4qo6zNU&QCj^O$J%}Am`ck>7*gM z$Wn3?9IVvyFW9(#snlrJ-ERLwPuPBmC&WJ;%c8(8*QSSIA6NduIZ!h3HkUn9AszkY zsuI?l`nEdKH_6Es?KB3ibs<$**b#9^0kmhad!t~cKhrZ^1v72cf)p$MQ+K=H;q{!0 zKUWUj_5E(;gb%PIqQ2pWKqsHzurGN)jGsZoIDanK4dgILgbyvUr%WZ{5zuMr`*y0s zdF_6LMCru)vyT659lUwnY$a9>=4Z4Xn7{HRKEs`WvL;wk@JyOv5X5+5*A-(l^;Rt= zH0M08#F|Rro*dIJ6|Fn@IOhLKpnd=M2cD0YV@4!{6Oy?{Ojf0nqUvw<(=*xyj;c32 zJdnR^Jx4KvyxxM7Gpk8BHK*qGe#5x{%C(6-UXHKq1Vm^XGzp`p1=t*>1>};`ms_P= z%{zAC|E=mm`gsV6Nf2-<5b--Uv-Wjv?{P3vnIY4Cj-3T~|IACZYOC=Zd}>E85i{l5 zGqJws_9y>Yd#Ti8Ovn8$c4nf<>IYEjm`M%VEdk7NjK0RB&Ls5&g%U-;a+W_06T!I; zEDlVMi6Fa7ortq%*ugLMuy|(Tn-7Ro4vjB6v~Qyomh0@3m{#PIGRtZ|_qjcWy9g9* zXAscZs#7pu-5Jt3ISF{a?A)_{37TJ@@aS@qLPnq8o1R)@Tlic$I*Lnr$Z;dH{;dQ6 zb;AlsSb*6PLl#ty;`6hezxF;JPDpo5=kcd;-eR7=2oQtA#)iR;{(Zrt=CjkoKP>#U z!KZs8lk@P35hC^+bKF(xer=Av8IUG|oAX2{1BgHAb=X)|HF*1eJF34j-SISN{lWZ6 z_59}l&Mh9#Ex)_MekCtjv2tnsT-c6;wwbyX65 zU@9wKbrdl+R>2o@AYTK&H>rDPXBmG7s!Zzv4#I8B#RjW9fy!rs)NWT&PFm;;@oUks zC4&Hgm!L_N_o}FQ#Mrpaafa`cr@`zf_=ne#nqN&N69AyLK{Q`<6o>9!2UVEf7kYX{ zU%1nI>RR<7|DbsB^9`uSVrKe-8uM#&L}u`<+y5D{rwHa{w7#pN^>=<$wmQ`_*gg9# zBe7l1?LFq8xwP3>fepo;nTXXJxP7%Wr5*VmpPFzls`=G6=&5w{6%gT@^vECM%IX&D z=JxXO8XCham%mkesU69n%#co2T4uln&j4RXD_eAK+r>XV&%>H2nedb<*Xg*q^v6K( zQ^517Ty)T{?oz1RaLQH%097%2DRy(Akmj4?Pk22m4yJpd_W}yb!gF^Ak#W6f>dI54GrpNZoA8RCA3T z0EAV3gk1?U;`<%UO=lm%7J@C8iG&+i*9o@)wvs8dJ{@(&^(NJD#>k1vIp-Ir=_=}} zAfP`!J~|ldm_Mo8VfLS$W}Lbh&958ISyA}J>0XM`{sbkwq@#Hg5QHtCHg-5PJskXa zdKucXvZ*=EUhLaIaev`|_P-F_11+AMhmt;y%nJ-ol-32ns~CXFr*KCIWfQU4v*NNY z8>$?PHiN7(zq!u6%6{X$|A@W+ZdYso;FA5v`RLiQ_**`S;Gc))B90n05)13^W%JI# zjc)G^s?JKHHw*eEe3%N~23)J|%gu@k|BzSL+%3u_gT$~9aQx>a860~;IumCuxGw3Q z^1Ytmo_}c8A`=U1fw%5e4jYs4tbnaIpPQYY?V%OmLc+kE4j|dR?o!$m55z`cM~RH} z{)If$TOMQXqZ^WQftNGwKmQH;h2JvI`;Z#_c!*x0h?jC63(BsI-!wHVQ3yM1IJ5pUXp{8qnU?Dc+({BEp*RTk zUlk&R5^wyQ(Dmz5ZfOFAv8iH_9nfW1kl+BBrmZ1^FfJWFfWhJdIUsvY40k+Ke-ZGy zH5;nj*efF=fq;C%>TISec*M}ygQZ9@I|2tPBaGcH+t@ZCU-ZNaq*66!N5Mb1*s6eS zKplV?!{GeB2M1{#l5=zVGRRoK`V9nH1vbo@vB8n0Ad-jU8q!_0;hbfa^MBQ_4wxmy z8d~4|j(Q5nA`o3FBO<7-&;QzLu;u_BIF0r+?>=R+UF>f-WZrz>B{XlRo6%)eRp zqyAa{7mig|ydB+F@uGlKY%-4_?l>Y8MCJFKgBa>|G?q;Q*)*Gnyj4>5yd(!H6XAsX z{e!2u{DZ{=OH5D0*6?jR&V{DtqyF8r4t|H?BxaZC!7m&-Km|q&;&4d!OH-2~!#=Eo zbkD!&I3iCk$C5{bcRWP8izB15}rMvxKDrE8=u7MAgS++Aez{;wEit z_B4yecgOrQtPDc%UCv7*`A6r9B~-(_0T!cS8>5D@5s`>?{0h z+jpQ*B|ETS<<`Hm!rP8l6=3%yo>r~kj)bySxV;b4Izls;wEIe7)BeY3zJ?;G1eq}O za&(XVz7S$pQqhTa$dL~#B8j+Ak4c5=OI^Hhu&R2sUCosr3Q>zRG-`BJIDcwUNBFe0Z|&*jhVa=a){ ztS+Hg%lagGZ3b(G4)g4wyCLjrA6u6B6yhTfCpomcWkZ1P)5%9zCHhY&iz3I%I+QKm zO?yTEym3B%s9VW9!f*HQq2~t(Or|ret>Xrp1_58eFS=U&eHU$=x!jXU4tV2V1A70% zTg!Mo0O|Q1EGPU;yuS$B0=uZlDj*-o}W5^fUuA+@dV;p4A9*`_t9-kMZ=oobW`LM2`IPf}fKR~#nR~f9Mm^XX{7o@5IgcgQnKAF=7dWhgJ z9rmiVbG`%P;dyb zCQip`i(fYI$KwQT_)$iykHT3L9vJF#>r(La5~zx*uObDIui@P<2J%5eF3Gx#QSif;OEcc zBg&`ajG;8!Un^HoO~Pyd{B!P8A9agcdgH`!x$Jo}@Ku*kkQh860|IQc?vq9)oGTpp zjjem`i<{U6bDR*@F9P|{51|Uqa6z|WLP&oDP-_-1Si^ao57#fIMb$2N0Oi*JMaISy zA9C395_UjfZc?Hk_0JVI&d<7ZeH4lPkn{qcs_nt~x@UDNFIq>{hB?Qew$jhjS^SqY zVu}1*_XT(8)vqU` zTsXh3dzvvU0|hL6;c^_rc@6GpNE^fwdGQTdMQgbXt2C^D^4sAhP?Vxrx2!1$MCXkH z7?rG)dt>UFFx)NW)IBDLBgY|a3MZ>A$s|Kh0mqqa;z#v#uX16V$TZ3)Ammm<{{PSd zi|BvW;3|*L1sCm|ZPvQ%ZF;EbZoQfjxw!fPB8GWcOCgwZOsRk(tBaAqGf+aTAECJq z3`GzpB>Mtlr2&QKKHiB?zEIo>9nb@)B?M~=ko$hX2?pmZcM!aoD6mI$94Uq~fcQU9DTDrR*k7H~L9Sa$VNO zqG_@lK`I&wGj$+x?ht>#%zSx3psfEvhJsinz4E9&pq!jGcPif3a3u!x-mf%U9a92n zxKe~(b^(^gBFk$6Xt`4qu4eX!g0Sd^<60RvvhrLtZfStSY)wBVDyuTe6vuO*5KMmq zUPd`KkCg9< zSh_2plGB3xfOBj}GX=|!ezeKLuMda?v@Gxfo2*yULvy@1+Wwb=$VJH+v*?qd?5qrG( zihSxrA=#x0D_}8!7z~YaMgz6s)@KR?>)G)4lO;K7u-VOyH2j}n2VCUEld*sNh+CDQ zIWltq&KkI&?i8@3Wx$dEuX_HX@M@wDNNdnFyKCmpaGH)Kv(N{&2zfxxm+>!*i3US+ zZy-I$l&9tJ6i)#!R5M!RnTVx8Psjn})&|`dP9K)?*!BHUO?W?{;Y-+1WHi2m>=V+f zBgFiUYU}@TF7H9?Gei&`dpQs3f>yd9g~vUb5Oi7sl;P&FAjKft&w9J#l^96hJ{6Sm z_O4P`&g^bDA{v}XzI^ZkdJ1*jlG%b*9vxAGw4z#eQS6EL2GteUtn7Npx#qzi-DC4Ll_JI zMp{3ZV8xISAT5;~isPF7v|%}4bSA%aaI!j34mX`%?5`t!KAeb$I`mFqFUaUM}mobraLyhE3&B@#0kK6*y3H@kU)Z65bZIxT%- zed{l&7AZpbrRAo;eJ^`OOA2^O=GpI?pZS-5@~`KX99nF#srTN=aqrPD+kac=(47gJ zp;X%AXC|MleGoOo!}Xxz-P;iH$?t?MPPUeg8~D#aYR@?V&D(UJ)c0~}P_d$)b=QmZ zx@-aUtK?CCt_X1r0yD~^3r79chDhDN4zDilzFza#WB7kUF99YCjvXXnm>eX16Hwgq z9Ch+>HX{)0h6MxNfQqv8-@-gSu_u}I=fz+ANDqin}9 zoxKL31T{OFzSXB-cc_cAE>2&qeQw4N(;6==Bu#^HwD@NtJKr@dzl-ey*A7!pzrAJS z9(P|{4+9sSNKzT-9iGBep!yB~`5tiayPTt!D1Zq2#A^mx+`Hs4Nwtz_}PfMJSb24_+HA>B1xScyUxZ3_+ zoTH>PZfYtr^vB=J_1RYA7H7q8JczZ5j(FF>WZRD3m_z(I8C`1%{vUZ>i)~r@u^ECw zIxl$e%Ya3mbz_r*NkS=~Ut_B&NN(w1W zhW%Q(r-)G)G~K`lh>6Gxhqta#F=pI}AvTC+gD~W<^0&ZJ4tj#=yw+XhrU-hZD;7P5 zR-ekmP)TuxyF-0#yQK4FVGY{YcI5`mmQT35QVd(lxKPGqqqEMnF*v2os z`u?rM6Y9dY#f;YvhD>x2>rYycQbj`o62WX|ueSZo@U5cH)rYsO0J}a9Xcbs&Srwf1 z^xkyG?1^I5rw6%~Wgj=eVzHi+1EQN?LxK_PbVfpFspy&qpqxIMnwjgK0V_5ik27S$ z0TGVMm4K#XxY5_9Eaih-0@OC>=YM#PSHOFedG$T4i$x-%14es!&}NGD5yC^_E>~AE z((8H^R0p8^xM!bcqu&cKT;%dk!5ozTa#PiZtfzU7{^B0o5s5o0z*M$_?QD22r+3vi z8a7=9+VUVFI{fE-JaHUNd{w+%MOk-s^M{SVzz8MiWfMLdLjAyORy(bp<%8in*a@SL zz*VrSD@&bmYEXn{r=t-3BTsaZ;-fjN2SE<7)t* z*`Jx~=r02iZoIi}R^8CUni#^hOmMgJFCh@&t|!DS&UL!gGI1Ni$Nh;|D2oxX2fWrq zU;se*Sk*319jmrhlH>u4k+nxhR$5lYIdyQ3;S<=smDu3ql?hZvVl_oDviiW^3x*c& zb^_?-L+YS${)wO|ubP*zb+8g9jwR@i? zW;v16rk->gaaiN6W#Z5qa}e*`T&XpAdk!RoEU2-keOB%`USv|;H^kKiYX%TrzWtiY zfEhpMV@O>mkaXbL7a<%GFS!FsXA7jWrR^b{qv=dTH?iOV>>ugcP$vKD$mOQItfzI- z`Li{YiRJg=YD;u?pk%O?{&68<3%mktruLi?-d8a-sYp2@?Fbjgw@t3-BWeJpT$mY2N!>4~Pfa6{^M(UETQ9Ig>;-)A7 zBxf?sN?svbcS|C@gQ-I!`CqeQyfs`6)8;0C|KRf3!;_3ADs6<$QdlzxBR z_Q?;^HkT(5{DcfgCuTI6G9TpZ%6!T1|lz3RQpdtTE>d-f}OtD_;*|0nf0f%yXRqEF|JWE z6)1fzP(LHTr(I5%=*ZEMj1hRxNW71O7(e1X|5BZX1NYb_pcv{r0?e32g2P-hYFSH1 zM4}s@L`&Ei%)Yc!66rsn0*>zrfB=PRkt|+WQ%zvF08aJ!TGfA)e#9Jii~$7{iOYIw zH4~DV;ffBzx48Jpk%^hr&PoQS&I(O;DB^f!c!{>nPKXl+yu^7g!eaC9Qu+=|0^Stf z;8(S`DoIbJMmw61JI|n}B#nGzu<;V3r55Z{-tltn^P9jL@6r?1-3_sAtq-&=iSDlI z$4{Zx?jB=lMm6gI@SiNjM;E%Dc`5W_*(&VKAlzzQUilWK#~pZ z;E7IcM*jvJay=3#hFd|;RSM5_FU&A~crZ-?{Blef1le*Am}E)5Ti7J?cIbbESp@!x zX$#Q-jBRP~8tyc&($j1M;mA+@UHcrWOq{PFgK6bjRp8|<+IYvKn)iIqC7z;#$$-jL zDeP>nir{K%Hv2nd@0h*#imrhv4J^(C6Ex%w>#!P1PSOu@FIU1$kGo4}ouOz!|73>M zjnPD?bVX*yK4t>juq)rg6lVs%=1M@_9ux_iw^G>`87$B7!xaTtJc(S5b&nod@K5zpKizDlc}2YgC%O6ZZoYFt++iFu}Hdn#ruYYjv#?b_E~A z9J7a)vi)_Ke|%~<;FSr+#`0+m>`@giQe*(Vg!%*Ltag~+G8~`EV^`k>#?CFuaTeUc ze(wid8qlcO&xlcuK@^ny|}5amOO@qYNZeny;q^U(9i{1nH)A$#!UTtj1B) zhZz=2Ut14z?Ma3JjdOr8Ki8;)vX8&hS>ps9p?*FTHhHvF4smL#GIK4lgah1dN3`6F z7lO*Pee&Zh2qE;L#zyelN}T5=IP7C)PVb+MDUtcpO}7t&Awa*G=vE|t(xsSZk033F zZtO4(&R&_3AP241NtY$TR^YO}CqOglYn+(?eh+a}?TP7Fi`A-Ug-AD^Mx9%iBFLFceGB(RhBqiG2&Rs@jm582ZGuA@@TF4dPN11Pc7>DI zAnwQCBApxCY9j7Bt&jhOr=J(H*6ljlBC=~iK13nCkH!-tvjfy9uw#7;9eh+C5(nF0 z$FkrbLEM^!QHhl4LQ)%M+fd8!BGw%6n&bmeZli-y+!`>SxgtzK@b%MaMMnp1L6L>_ zfFbgI3DI3D4q)X2ctZF|=k47pU|6}y>J!vy)Wp-`f-iJ-XH)&x^oVJ59056JF9VJE zpWi;s2n_^N6dSaKB^I(^#Gb1+gI?s#Ksg|3+IiZ>r>d*0noSN2f7tHbr|IJw)m$#K zqXrpnS)Z|IPD_BDCc`jn&<3&8rXom&rI!Z8K}&KKL*4f=#4imGB>qCY&OICEGV+Iy?v}?p`(kjm&OT>ZTIHZ>FT8>qrdvd_(}W>knDV@ad@qv< z`_h-p2mQbqVDSLfTcCOp`M(^v^UDKLU-#^;nZVdu)8u*DuWdMNRZzQKdMMshzok7T zVdl@Ue5<D}=c z@AO(`akJ1Eq`s&wnJ2O1hRVR^C8+Qq-tG!2|F(n=x?Hp&h6AGVyQwlzzDbdCUkrOJ zN%^%yj2N!$q5fu|5bMq7py|p@Ro_2-_k2MLFDJ~h16C+tQ{_jOt0^NeLK6$B#lY4 zf|n5`1zjk4{lT!wJ^UOsD1<;WXP+jf!vB3!7?}uh)Vkf~Se9Q2EH8p4h&Pl7!Huzv zr!yLz6W#1n?Kfr_6rQsztFpg6>ptN-_G#{wjK^gm!0{rWHFC4LWxWptVw3q;Io(hS zg_cFXgRJBgu!g7(?c3n>L?SyhJl*p|=9gHEo01Vl%CyMdOZ(kWd}$9UJQk_*+A`PeBhp z-sL0qB|{KqfyM-|YE-ns{1y$Wb~spl>h4|hI@Yo(dGcEViQwJ^2m8$f{Z_HaUiCjJ-?g`&eO z87+e2r+nPjP0AdOlKudvRp3YlR0LIpaT}ZWsMw$}6~3Fj5)#TT{tMgWlL2rQw*skj zL}Tpu@;Q;w-Biw7MpqaUJaN|52=)wAWE%3`;@-kEf-Bc0vXV_WCp>fj!NX zcK(Hk`BzQi^g-h*#{{euH8BpW)1!}7z6^PY`}f1Y|Fr)z7&Zu{~G z-Y^*wcM`n0*5=-Tan%IoaLuL4xz&F;oNMR8T74*8eFQQukzodOLYd0ZBosjs8`UqWOluS7glboXf>b#F+eMvS{v1%~vX3uC+Yia=`H z(feRxQ{O*2V0oY|pZ~wudIue5<+knywt!9;r4ofluDF2?=2Ulez%@NA7b+HvF?R#p zIKz4z>hzG`+9hv!3Yskwg9kT3nd=~Cr|c`GU|$=#G*VCmAC4xUvHjqHm`9c+;-Gv5 zA3eoYCqOxVmqC*<5+-CRrlnM3LpBq>umy*kDgp2wSuv^N_>|5 zPv|xL2CTvcM??|W=II-9Gqy7JUaW{~lwO%K(3;(T+&2{7pMGHk1VggSqGS>1Kp{jUIfk~aSytoweruyVpLwY zJ;HVRYq%swIDtkqzB?pCM@(H%T|;``gIvGxVsKZS`;A<2ybcFhDyVugKF|+OVRiBx zbn;tE%rpu&EvgR?^(=I@``f_ZWzQ7_?-{noJ$T23&K3Tc&A8O=s%rn#XFTl0OfBF# z`3Q9E;Q#}u&=HO7nlTCGmgvIi^s?D`HG^PPtw1c3aRI^R(kne*Vtg0awPh3?T=ys5 zfC%l3?eXGYAlhpe!FTNGyzSO3dnYzR*+Vo~07LpVIQYZMlGTyngUgAyD*!cY@+FtW z$@nZIm;bkp-k-i|>@1(Y$E}?c@0-`fEo|P=cw@0<0uOqLA(UkyZ2v+bx{JGpiDKS9 zDBQgj`<@|0NW-@D$ct9hA7Db{F8h`Mzn$WQdUgonyaHkxUL5>vaPz6rT4iV+u|MSh zMXEtCIsx!QLV4CWIW>EMBb47zWTid)H%+C-EFBN_kb>2D=P85^X3i#VhX3FMQW6@v6{zQ=uda4@ zF+S{w8TF45KZ_CSSc3^(;tJ&{D<{||27t~5rDp!z_&f=lMz~e}Om|a9xAB~NN zD`FkEfWWaiTCiIGywgGyz~vdJn!-!p956m@p9^H(HL~}yzMAm8*vti!CTde>Gk^Y8 zFd`9*D|sdjJ0X`%3$nZGLvmU#6UQGW&Wl4ORdXez(-$w6Rf$g$K>c(Uo8J%uZ|Jd8(#8N6#?t;=GAIvVq8!h2{;kb& zZNI`GG0w3)Fr7)dkSz|i=es<_i$+Mk&oel>LDjFOu4vbfCUi32j9s4o>~_^Pv@Zt& zE#w;#Q-1qLZ9icsc=bhk-T;Q@q^zfXuqOI~R!CforiSlg@9`k3=lV}@NogRm_G~ru zsi*0tV$6DA8Thz&dIY#!TYu1l8Ns1=dsXF^4#61r<^xMbTlx;XR;vX@>{+$iNxE%Z zmR=NVQG+-n)xFYvf=f7yNu-C4F-esA%_+pm4-Y=T=y^-^RC?liujjjGFDe;tY~DKIM*1S!!nV!sX}$?uzwd>8PzaZpz17hybi{mU-RnI_rb|~G?THv{hD3{vO8aEy4GE0k^UjDVMR!3><0qxE1@rU z;J3!KX%PAfAaphN8Cj#4fOb-wR*LB% zl15R@%2>;p?|St6hy)BA?GJFu$@w2nH1Jjqva>_?kRAj5tH))ho|Dk{6Czpg5TC0B?`i2^TEmaaH~C@T zYjEFGtnK}B`ehPu2#X;PX>-f)JdQag1y$Gcr-gZuX-3%T!_``*C#9Udg8L&&EM#E{}gV{S7-} znMzYpN*mcV(>}TovL%S;GI`C4CDtX9+qk8!05Y+NNp0uhPlZHGag^F8GDi1D3wyC? z0Upa5G@348`aq7f;P|^8OmVT#QL!LDWD+Pm7ZJTuCh_c~epO5@@5Ne%caz7sL0~Ms z?1;Ity0_*K^GT1#RpnMs_&~NB9g4bj!soFxp{!x~4Cx;l7kKZjSG;}8kR#~(_P5N5 z7d}War0?A7bQKQUVw;nN3S=H<#hX;^UIfn!{jk;3|N8mL0`KT#%`qTNyV#$ z{(U>Q$IDn~XN8?*Khlq=DIM)^E8%g*5te+-8zbD$!bNjsd({Ip*Wmnf_eTYq7hsXp zH{LeT?@F%(Hzo3RL5poQ*j>;Oo3s7KMK8LXdbK&a&<#bL7F~!3irS%o9^g9Vy`$nx z$ExeU((6!{p!0~+fI_{OeF6XEO9xGWywklcHLQS%c(S)eyz~;e`c?e1&AI-{qL75T zTYWqu27!9brLeyP93(+JHE&cKl$|ahyt2BeC_5B;xQ3J155@GH>)y*azh@f~D#6us zol@U;Ak~7ji#WoPT9j+M{MfxVL=W?(d$hdb{2(NDvr1+VJx+^Kn;Wxtqm< zWe=W~!EuI5e)T86k{=LC1?AS8xj9USes2)_?r3+%*={i{m3-ley|C{E4C}fCr>-W! z{bRvTvMRNSM~;+$`jNT|xjBpB#;d>t^$wnb53!s1{9W#iHQJVW;LC?KaE_%e9l-2+&?h)7T*^%n zPiES|mT$L-!8^#d5#3Xl>Qme@xq9q2NI`e8`0_)uo;qjr81oRv;UvsD z>Q>#OMQ{o8Tl{r7mm3gvf|qXJD*4aH#3QOxD_i^*BhG@$VPKpKK7_2VSj=VCgzQQ$ z&qbA&<$+{`qW)=KVtGT^arO$RvL{j379TBwk0ZS8vNzK-X2R}~+>`qqz8@}GFu5S% zm0;1_@Dss17-(#ZkD9L%)V+7kN}4zMcOdCkVG9itbuaGI&U@FmaXV`0<3%oO!F}Kv zjSEGr`>WyVbm;1pFgxF6Yc8HypS&#LRrvE9DC&I{jP5)S6Yg8Z?Fw^5FV{Smi#}`w zXtS8na$JBT;JZ!~wJf3THg4!U3p3qjITtOvsq`|;alxQ>3G<3+SJ`cyEeb(>dEKwA zj9@!*qwbwg@8=`bo3xYL8?k+{CJ+*@HxRCbh^10t;Pc`aaxFulS^b3?dlGz=R}rPu z1{*atoMCW&&Xvgye<$NEWOKsy%i|LZcK1UE^zF=5M|=ypZ{%;>T);Peb;mx~#Ifs7 zz7zwiPni}Y@=!Lw%ZuMio*5ykZ_6PIr4=iX%m0EFeUn=@x}{Z#&%b3H6vp;r!D#Uv z<-Zm9Rh_V)J*CANEc2NTCNU6>jra#X3U0!fV3!p zcX7l&R2GVL*_26^n{Zu)s5kBE(1?bssO8%&=5R)rprD;0z$?Df0(V)>!+Er8I}dU1 zP*{Fw`_xkh_*!A=ZZNh}+2((gdEn2=T3nurEwbM@ddlCiDwQiAi3||$`3KEn-%m(z z7?_7#jqw z!-M9MY|FXdyR-ZJPd#g?W z-60=|lhc<2sx{z}jdZq?6ss1{O1^zVZ+9cy9g-hOh3P(1yo$mh0oK=XUh^2{#OtTi zu{EYE_>UIMgG0Pm&cH}-?G-v!VZKordm%QNE1-H0PZ;mkht6ZG_*s|6r_Q^rp!KRo z>oowbLQgph-&%bOW#3+Zc2uM8MRjks(#w<0VReQ+1^!SdN8{baj>^v1Lm#~l^OG6O zQ5i5WS(2$FMlT}kIY5s?P9&i1IRlhpp$P5ZDg%&{2Q4Q}Q3mlxU~J9vvl7T6rgvv8 z*yy~W4bI=z_gok9l`O4>xZaJm(0{0@fJhrQv{iLxJ*{@w7DWptG8=rzXg!1`b0K0Lm=Wx40kk;-?cueQ@ehqZ zH`G3lBpM+8McX6R=kAHv%CT>TOMMcG;08OP!UcRfwljZ!mjyKx=4`Pp8(?ESoCPxe zP-P$vNb;;H8NP&g@C;Uw|GU+vHx^BXJj9eXA}nZ-sQAYSb-Szbpwl&O^KDCdjN6Ki zIBbLK@?7w@B>PYLvC}{%F7a1meV7UF<%(c$i8t4S_2;eh$_#J+4WzVQ4B&VU>lmOp zN})a@$4SFF5Cp6jx~(#hDmkIC``W01d6#-z>EUh+!p!?!Ah+SdEj&WoZ+IpMQ0F zC+1hnrW&6hs<_mJfNCSqR|{eIo-o5A>rPn1hgb%bkOJYmU_?vmr7@&_5sGl&Vm*d! zcEKA1K03CpeHh-p4Pb5Zn$oL0QHZ?q*C%?RbPTQ#16*ttE9!s0kejSAv4>%{I~4hz zJF>%}BU}hJ0)UyO(;r1D+%O+rBPIu1)Thc?*cx#si-yY+j|7hV)eR2CKY!FpGegoC zLihPh8PBDeU`nAG&U8zZWBN%y2(Cy0R8`_{Swgz;Av4Vwdw?z#Dhk@Y``;Y(d~nG0 zL#|OiA}6nIYt?3N@4CH^HzmAo5nMF&r7tfP%gE?`$a<39$=#j>qGgF7A`kE`9f&QY za%B~_^)V1X^#O+#rDb!`>BsXK=pJl&J7OZLS9qb!=fz4dhhnLeo%y%HJ8=*g?T^%k z4t&}NP4D+&-%YFX^)7O^W7vWZM`r|LABM^A_>?2hd3dbz+?6n}Rr>2O2YlL>nSrU6 zl2nbDm-bPdx%KtfcJpiIp$z^EcsbGw4M^`jktApYezC7U#7`V!#bE4ZF5LMVfM1=b zxnKF`>Z(;3kUTq~_9H|)^i|#kNC;CK?2L`W6LL;=OPh1x`;e*5P*@WsgUo%Z3{e@| zHl4nd_BD$*qsC*^Yix|cd2jv1+1j;D)29T8368+0uT#~-7r@C*TAe|h7_)1m#^%g5 zOis5YfTx?8X5*t)3==G+ucecsZ!Ue=%iPWN;`A+e(gSa$m$yWL1|l!sGitO3#=w%G z;H}rh0IA=xf=z$@X3$T6XA@F;?$^SZD`zj4I)6R8ily~!sg;w@DJY~IO}H>U4Hi?s zy>WE+R%Qb1gDjB@1;yZpv-S=UsVio4h(6dxI=`|In3{$-Wb{WVFKAc~=6UUf0fKw{ z`23SD^AyM^zC1bn2f#mPV(3<-^j}b+clTX{V@zeQc1o-bcLD1E3ySCxptw{&2R>Z^ zH;naGD>0$6LsJtefh7!~!|=7?cp$5*^ORn;1=!6SQZ-xwuWb+A zSdReM{Ylephs_M)XsUu&CNTLOuS7pP`m9KCyXBsy*E;xUA#Z*t335tP1%Dsj0wut- zo4B?F2OUc3AVY*|rON=F2Mh{jWCx!&g@brno1r{SeUc1r-VY00$;a7rH4JeSY;Vex zcr3|OI~y={Uth$w_BYU=4pE!}Mjsao48pGLVk@On)N%^sAYY!zNBE2n_FlQkXbF-; z92x2pJFxE`rqyq8{Dkf3y9|Z10at;-@-Au_7lR|%Vxz7$%*kdzdjvwy-#&q*KC9CH zJwU}mFrmY7%hwJWzISbiFbBAih^JVCa$E>v?Zo3tfKg@yhQ5m`E7)k#b$c;yeyXoj zZXqAsC9>WANGZHv2vj=Cw5u4p%?-M6wio8@n8Xf}zNWpyK{FaYlaehWfLfd>n@19_ zsf>--42J{b zB_)doJ&=v?dn)pv-Z!M-?Qy&H&A9QTOQuodn9ix67o}C6g5=lt(<29;9iupYBkAiO z%13sQcO>Gs6lo;K#aX<-SXey$&YzXP4g2VZ-lE0;UX&6=Dil!cwJVQBD4Y|9+R=RD z(IpvbXPZ}xe*R}m9){kCUn6qFApZ3mR*)Gq3_Es})6hU=N>s^ll@a&n{+~(zYjmRsJ2^ zZqX&?W3YW7{01$?Lv{Rc|+Tn`b%4f{TEeU0zC3!na58YtJAd8~`dl&SomZ^gR=)h8&K;#zvqY2{0GR3lbi^JgGtdv7 zS%sCkl5&8Uz$k+qlLwW7ueWSmhB(2LY|G$-e4I_NV)Y|ar<-~QTh9I}T_gJWIb` z`%hrZ>KD}<{S}ybt<<=v{tq0HA7A@0B1c2Z)9!Ua40kFZiA+&JKlK6!MhbQ(Om90UMgYDv zGG#<%*OtqnG&i*FiK_{&*VIwpf|JMuf8YmbW68glYd-s>X*447+nFyt*oe@V2ls>*v#R*MGm#Yqw*(JDeg{ zWM2LW?K=W_=-0Qf z$-Gj|Y3tI6x0kAX)(hkxNz~{ zSqM=#!r{7*&D?-}>0&T)$tcwC`xSDW#Wp9J0EIMfpt^rR;M!9KHy3)tn2(M%GYZ`x z(|qFzM{39?G8V+?0BIa_isd->-Q@u98p=p^y=F2Eq92s&Fk-Yr`e6aJG${F%Nd{&H zkWksVIJY$6R%rXv8KoVsU89vC0Ib5ZW36bF_E<35!Y+^K zO9TPtpYIKL%5H5+oGARJ)w_FAigsH44R>P!D-FwRA=d@)YvGw2R_E>`{q#C4)}z-% z7LG4a;Zh<~7m5Q{PQF3$Q1D5Wb&GBkZTHHYc`{R>**rNp63l+cWkOb(_@q80z6R3()fXLY;HnmG98s)QsyCz zlzHxSE206FBtjYpMJZ)IH$oH|DVa(rW0@n~&wieB-0$z*e-zGn_OtieYxu0S_g-r^ z^kBNna#4!YIB=Y=GV^3Z+WD;dyrCM;zIVc#j|Hs`aN<{5sMs{dZ=1#MG5hosUryk^ z&g!1vw{lhdYRhf%4=&kvE?Xbzp`sFCz4`K~cHgLk`;E#yHycE!lpkj*;!tW~C&y4D z_mhiPCYibq9NOKHcP-)KqlmX`0oFEk=xKL8m)+#96gy=U#9L6_>fhFu?D8Ha| zI^R*;=;jzpAnrv`4F~7)ag1lnP>%16zOCDxH@7#}&*h%3tJH#V&3szZiH-h|azPp2 zoVQ$ZeD`Ma(5iZeyN=whv#!5Q4Z;;@LY1K${!VGBXD*I7(#%ntLHAB^Rr8k1=g1iu@cO*Fo}V4RWiTPhmVInuZrpTB zFC2Z|xg6qfE{v%K)3dv~98Rc+5)ck;939*?Q(o5D^#1oS4MKyx75#B?)tbeG?FKI7&@ zM=cM@;BNi%Nj{1?BK-n>xQWsE<=L_W;%T!$a<;Be-;cJz1e48}18JQLK8R6kzhxvB zhYKklL^w3|*X{k8H$((Rxw}>*e)^-WsxgF-AXH!5p(a`mqU(v1UtkPPzcPb-vN7eIdV4cqzL7;y9IX-~HRns44f^af5nKu!OcB zjlP{0Qrz@-45h!quf?h&A8T$$k39H#j+~tw75V<&7@cR)vlX5ld{z>4(8fU`#;Any zJ_(tnviy!-U+LY8i?~?PW+(H|!IZ|)6%9O(KEw4i6p(abWo|o89O)k*L}!Yhu_a&0 zP-=-@6WCKG33NY8i+Fk>^hLu5O0!PAmGJiCQ!VdJE46TD0X@qlng1`%9|oq)it@<7 zVOzOH6D*t$g6eN|f#-agl2lZuiQg&e#lS0?+^QN=(k}D$3;S)sN+txRRh??X;U0=* z5Ivai*?(CW*96zCJ0e4!DGv2I$aFZmj0`cb*I}zWK6r{icmH|o-DhLlhH{1brD(Pj zNPO{?isX`jlgNw3cHf%!d#`zDo?>=5&y#I($*6Tsgmheq zQxRo9G1?3rX#G^U<^>KF337F+`~8z+bp8@5|ElVDLIA@R3LKjg5wRb*Gp{VxzuuFa zK4&>rSQ1{|1R9fe)Rib9yS8QYiSJ>U>I5>OFKt zSvLz0JJdUC4q_h~J?7P$=XM*WPik4sGhJP3*C*i+iq6hmVw{M?`P3ObB+%5+;1Zx= z`)qdCicQq5nB?MpR&1#tttm&Igz~vBIH`uQs?=wsO{k*v=LGlZA99k5iK|REJFK{9 z=;ppMPK3%!F_ygQx~gj2*%B3DL|XFId`c|>G-!ot_HWA;8>1aHjs%Q!-aYGS>pa?y zYj%_lzkJ^B!S5Xz+&(qleqT|H(~GglK;&>UxwyX1AprxQiJ5&t{V@7lWyciwL{oF* z!Yl`q#{9eWwdC-xVHs!)W{1)(3_)x9UH>BpJ z8{6(9rhk18zq@Ii(?+DeIGPPttb}VhD7MLwylch5*_kOgO>LKU34ElaHtEeQzs(x) zST7l?{#hLgK87pnc$^v#n5KTJ4VgXT-4{p=)$nz?AR`lsPEbFU=#?*e4FzGBdWkxt zZywr`Vk#SwZON|rvUEEc&BZDAz4|T{t|I=r>_DZ_H?1W2Nle@JhPKc-a?_PLi3wQ! z;twlUW5029q)CzXLL*6Qd!?T3$@n?ReJ*S@C2{HKpUjC>SP6=qZ*qE>&>IysF7wx^ zPd!p`sx6-3rJi`@U)5zSS*pszm;6A-P?6V?QvElfZL_s`sF(jKu1tA{!LBvG6XTTpNOY7+I|Sx>7=rhk zF`XKJPyDrdEIUG2*O+%eW{%Z4_ek_dWHB&L~PhgO$iT>(- zY@$~TRSEU)&Sy`5fY20h7Kkh9@`z$6f8L{~j+IaemClS>u|QDPyV}ZK+1u z;OLewP0H;vOuxcofrzEVkUdN;zAeFm><=;*bwoH#9LZ0+rjs+S+$zzCJXK8YArf}x;js4x4t zBDKvWm^xFm=bt1Pf`c=t=rH(>BSZUwqtQU-Y56;g~3yQl}t{i2o5KC8R*4bSsu7nM1{l z;(=OvT~IC{K1qhkHKM}3##f}$lEfJ8qF1Z=*{LgZQQ-kvy(xFm{D;nxS|ihsxG9W9 z=ib+e80jp~>NEU!$_0sF>Bcz@%Xy~wQF8H~acV(teQXKLE#xOjD`-1{#E)2q$-6fP z->&D3+~y;a*xBZ*LPp54?a2GoW4HpzTj^Fe0u4K{N*^NxTN)pazC$nu(aAHpol&ff zace}9LyJlkbsUbZ>?e29`i0|KzSH0;AoDMxiALAsxHZM6V{(66lN?n>v*D9m^ipurI~(V-fqPw(3=7>8rz)b|L#omJfkKi5Xe?)N&H1{{==@hLn8<1dRrPnG||@kg7uJCf^Hn`_PN<2RZ#N~ z5Gr>A6}}=W>ckl2Sm25?6&wo{w$m?ZsJU@D9#NY^%h@^a_QZ!1T2}MeZR0%X>pMP% zACx(7Bim;Im9krQQB=(ZTG!~1j8)qtT{j5GbTuGO~$$XtNKgwme`qrMiwsTW$*O$=#NB!>{9VQ9)RzgD>4_);Y^y zGcZg^^m{xY=C;g3@|KbRp&Qv;y{J?0LA>t*e|`gyXu6kK-A3$LueDV>)XWk8vQ?fF{jqaJw;5G_k0%_G8>_R_Z9jWt(xsk=&R73L(ywzb=`hq?_{L~CfBTCq>111l;^3rJDX(eVw6 zx(@wbHuJdApWul4kFRp%{8xE)yr<3-C=XTn)DL*|%M#a z3$1zNs;qNG{#gb|(r3n_@G4JjkqVtrB8Pw4f|;IqaWuC?Icfc0{Z2lHr3;V;;>`In zxO4%(=RoU_rn}BObmG*94=*jq9jQp%Q=TsE{=V8k;*ZK)pjO0 z;EfG$;_1P+a#BAH-T+zK!lxl#08$!i9nxBgbm;tvm=a1r7KwoP%KlG z3El-{rOnJ=yJn^=VIR`oHfYM_1X$G17&#9$og!7Tn{j|6!-JpD|uriO{x9Ku;a__G^cyY^Y` z-9k&0Zr8B{!G{1GHV&gBqTw& zm$X~*b-dhfC1n;v1MqmFz?vwp2u7+c| z!IjMw+yMr$I-^eMtRLmrvBIqGVM5J$yB<}`9iBr(Q29R^JS0(otd>N_1p?tXq$e7h z8Qazik!;B0QNiHo+*Kd$kQLg2!?h%vQ`MG`0I%CY^cO&z zS<-+^|;YWkpgAPN)KNfee?e1UN(*TxnBl6&x6scnh~j5oTN@z z`IKpo;m0DTA1XiTf1-EVP1Lz^!Ux`?TJNu3F?S+N?4?sc&9rs9Rmo~j@NyWHNb008 zA}7^G-?oes`(J2N#EVGLZ{oy5={`%yE7?XrKWp6gd&GYuP5J}DGXe#=yLj=Nsd*py zC}mzuflP!mNEH69xc!;+d{5^*^JD;OQ;r-qmFUKnn0iE28PGaz~4iXuap(oyGS$&r*?GJPn1`(oL2o0WT9BCl#dtc#MFY;bgz(34? zJ}O{8S*z08;d&ucIM{4_m2?uVCe|*4UgAy63dT&H6sKQ{{QeV&Uq`%pipO8Uf!Skf zNFqX(Dd7e~|FGzH_)To-Bm(8P{6&w2Vy#9fHP-Pxz~neFYc&}ilN_uB7?tAUJ*4h-5j#97P7T?jcx<9-t8BWhe|RPjs| zoba$4z6dc9RB%{qo2zBSX_q+Iw&1G>;M8Aa#zehV(`@&|OJRtFx@(elgN_3WB|m^Mx9M!>K2Tzkn4>&U_reFRq)?Rf1<;6pae0H>A;%Ms0uCjD?y%c^n2#bND2 zcsXwr7xph1UAh-~|0I2$dcZ?X0S=H&If(lTjUYCy(>Hm_ofM#n_a zOC^YrU>LVp(j*p}J|TKpJEW~ndsM^^`pxw`erfL6z8OXDzVk(ns5XuibFP=eoAouD1_e4i=Gd)HLb$wmX1*_-MLIm=6f2m1we7q zA>Wf=ofL>^m<*O*L4y6HN!)zvbBzg$aL!CxBNeAit$lvf1sqe%)x=Qt zPYYqm7QQ{YmVAG@{-f$npEnvwdx`lxW730&*jKtM91UIuI~;d~{Q#`vUh{PmX8SaE zDk3_(jSUKr@BV};{Q#RJVlF5%;A#JqY4zXYQl!Wxz4Xrqm;qP!a4LoR_W89j0V|7B zyNHcEf7p)LKC!k^d@D!*5z=abEllv08t>O22YUJ9U>y{SM#%` zt*bh+P*^P}JRO4Bt7{TvcXW&1 zADIch>fgoFa65%R^2ZuHs@6KJEju~XKcF5424Qak5)Mr^^S!3w>yK4eTpngQ3V_z% zAyL)01kAWEs&8^o``Qfe>;51H!(b>Kdq54fh(#0Fv>X%~1YK%2Z=Ep^xXhHuQ z#f#?09`#yziuhR;@BBID73j`eUXUwIJ z%_tRG2|UT^<86Q}geAYvFB%csmIe`TTP^cjASoMHB-Nsn>xYk&@^93D*7DBQx&S|8_s zNK#ZyPqqHf0UJuXX)cR_9N4lOmDu$$$AY=4L{XafwBdS@uQ`iWR=0E$oah0ZT1M*; z)`ktx$NLuWi6+*wx>9_$TkULrw&w*UV}V>G$QSa--YH{!e=G*kS>Si_CE+8|*h=_B z)x-P#6t=tCL`vI%^4>~t<2Fk=suFyYh^y0FY_P<(T|+!w=`Rt02F-MOlTLRdQ7>2O zzlm7Ro{EQR%Kw)Z^BxdRGa?wZF7zF=%eRtFNo;vj8jR552me{KPBc$FV&vbZ7ohq? zsN=cWIM(EeU>o+xPQckDWA_2;-6DY)r^780m&fQyvlx>m-nWxKf8TsQ1%Rpl0x{G! zo~)JC>nveSJsYsf)8TvG(KA^@=jjL&Q+Gt_!*rrrRTZzR#S=||CUFAcvV}ni*qkYR zn7pa!Rp%QzW*}7IcE1g}M484=Y{_cuN-sCU{ckl*n^eHU^G1EL-3~UCm=iK2Q=Q9b zCFU2VeaZy~*OAz?17RE<7COfvp7fSH$Jz-Bces0bbs56&f4C1fSdth?S)`HyLLKEThE(Bq_gf9;A$b=ID28>)dU++@)5|dZtQzv9|(SE z;z>u-wawJ6FWoV(tiEu5RVWIPoh$>K16YlY>bzjcgkcM5Flhu_yo4vq{InnN&0~)l zia+Y?^iw%SL!T8~v|X1jFGCqv1=l2EOCBSs(-sn$7?qR8fSOgaAm# zF^E<1O@59E4t;(sse<<)WAOX_v+QuC%>2W7srl#b?|1``K*mQhS_K%eIZ>ZE-^tYS{foxd zIVoY;N`QP;m?A&#Pt&Pj8}Bfjtw0BKt3jNB-3$S_i_OH-ZF`_Nk064@? zw9X&cW7?vTIZ-rX4}a9ZqU+gDh3tfmkO@R`0j^6hd!(#pw#?)64wL4%y7frf-I`mv z`}RLcl-<~;MxwT3qf`HBNvd;kwQ!}>71h}W2kiZ>{VSYvv!Ks4Lw%#bzYm8$bnUz@ z=H6M;*tAnRS9c{R@!{ez61RHSN4g#`@g5l5UwQUgm4E8C*wYg~vu{KY+P0RshMReH z^;cb;xDHd8zB14lwv%qHuMvm3C8@ej>(Ys)KyB5pA@k3n2Fl?MYvSWoFa-pVyVX*f zPtm6B*VC)L9nZEYi3t3DoorQ;CSvDyY!DY zbmM3}>T4^gClzTgi*@q1{dzu+WHv>JkzK)Ln^4>dc{Y0=Fort*`SM${hGhW*8Y^3E zJ=^jsY75KM56vlVzb@DxtwL5Hr&9iHL_t``hAU*Js_Y5&dFfVpGZbgyRyYKDOAC}O z4a23qC&q@g1SA|vrT5Au5@0!#6J>wxm`V-1s}1ZO-&%`u^dlaKW zIRvFHs%$+=K&--`o6d8#*U0k?Np>ywy`tP$kU6o=8?vigR(s_;4IMQ8jg|fNfJ-W! z3PQ(vV0uC-A+X%VU!u`(cLB5VN=vPIjQ-zW--L7rZDTE}1TW{FA6WWhWKHoLD|stl zhcK#OahmEZzD}PVgj!P$hkf7rr6g!xNjH9*qb}vK8-hVviZ=X^i;kbCAcO75g5k(> zUlrk*H>pt6eq;K$n9zI3ixD!WaB}xgfBk&%6A{_ts-7nCZdI2?vU*Q^A2E$oy&!8X zpM8ib1;AwIT{i3~Rz~EuZbD;~|1Pe=b3W)5op?x+?~TDGj`*XA-4|?dKGOio%=Mcp zqr=_jU-6?f*6ez(dThU4x=&r5vwB-?oZal9g^yGx-Fq@Jsip0>2&JG3i-t&U<`$$l zx>Ho5CU14Y5bwx&qm`84)45TWD}*V6Z5@k7w&6@@&%jM_(1{z;erogysSrPp5#vR}A_r0wd*92Amsw$FiqrBZ$K( zW%9xZ8D!qNCF?ksGBfrHx%PYumF!yjNj~>b(=YSk_9J1f=bzNp9G?D*ue{Bp^X^~Z z+Z!o_`T8&PcCF&U>+;)eDpvAeN<ji4Mn;iymr> ziLq{bnC*JvyDRDo9hCxqb;^B;m}R}-=eNSE#5`McYAq#6!s#cd%9FJ891P65d?NR( z_VlkI&!19SK^T24ck~|-{zxwUg{}5TL6}#`;5Oy#UZ=9bklF|QDD|*JhUtjPxMJ9| zQW!BcQi z;6SoUcoC=eCCPnixa&O$RdcIzvFE}(cmB}BJ1y9ynfRsDH(eYEN@&52VUY_YMa14 zrYCCUCBvRSMLmJk-EeT#l@oN)q|eWrQixV)<`$!)veNF3L2UC3xjD*-u{F#Nklhee z(w&p95s&T#9SD>mEk$rM5xprYmrmR%T6nBK2G=(MU8r)VRvUZnUX-{Hzj6kH9U{(< zMOsOB+SIgqQ>Hl;E-F4)@eqtUbF|NWfT&A`DOR`HwE?w7dX0nM4F4VARJ=Q3Lsimc80|o8+g>e741_DLc z4kZbmQ&j>Kjt8ah!lZ?Zjdzez0CZztFfGt1BDXll(xPX+DOE6yL8B`dg;Z zW1JqHiO3#RKA%n@#lGPcjByw z$&4kRzqFBvBCCLESDdCLkhjU)-ZDebo8Eko$wL8SsJ&>|vtXZ;xt^6)!RNam!HZGi zIm;&g==D%~f?3+m{g)Mrj8k^xO0X$ocqq1M%9b&>#&(s6XQv~~5;BS|_}SK>RDo5{ zar|G_cRq|IG`L`Dt*lF@W!m4-cM)8DpR1Fru~U3~zk*3*5yo{Ru&er*S4|@5&uJ-2 z_^rHerg_f!^fMmY!P2Dg2h*AjcD+vwIW!5Es5dD}ZVh?T@x6vD`eV8(^7TiEuj5V}(BvX1w(kMvBAvjjLeDny-sg`I~da%2^uYCiSL>By^}y*>7tS9~0vF zFj-$EG5`S7o!(3t*p0ae_jnr-Mz((}k{NKo;ZAhE_qlXKyv6f*UNSvXQ2icBHFS({ zwy+sl7x~nL91@r3_4ufF?pZmi^&NJ{DdF=GPI#_2FpXcA?A=bS=qzIR~y4or5X6AdVs?#e@x5>$^pH6VAp#|YB7nWe1cZK2U>86Av{ zEh@;sj{bd3+Rb+&0^Mhl5<0A}C|&CB^?7+(4#FKk%m--<;{mV?Z1KyR+|i^aT(blB zebf5PUx3we7y&^sH_rX7^YMoP$TBqu#lamPv$(E}NdSO6(%rWKKl$Ul1oC1Y-TrB; z=LThnuq^n=hp7t3#)@Pv$4anCrYV?yQ$~0{otEkV+>*KsUNBPqk9q`6M*>ZU)LT3? zLw=+R27>C$VT>p}y6Z5z*v(dR>A)s4fsV>Tb~ z*z6qzk5qe(pG!9YyF}ggDGIOUiW_zP1#}n}IeJW*!|g=Kr&~#ugvx<@Y<;P*Ppo!W%4u>}40;(_L->m{A|4$Z zf-1WCf-e~Gcg&*H`$VI|2-YuG<@T2mhFq1UcmvCl^h)(d5ffQ_^_cgdQsX8l!2am} zPdwfM&#zhe<mxD16<4rYF)pncbIBO-<7Q*S{l7$c;$-=H^1vPVby-(MLo$m?h$Z zKI=XLWQ-iVQB!qwo6-|7jN*2p+ep(X#9(B;(E@l8Du*R&1Q=k0R|GNZ+y53G$MsZi ze81@d9uU&=x^&>B@*SV}lcxcxgkF2>Xb|Vgt}s!17oH!Uus_gyroS~EKO*lUP%${L?c6pc2_e)}*kFz;YtYq%}jg_8oh=9V!YwUZHY#l&wFqQlBl` z5JuL4o6~dXyBR-QZwpz6xl)aPK?+K}t#8yY?4h<@Dhd-P&G`szyw|t4wLRuy3c7SM z)yz#6JpAYi6(ddvB9nifY`{8OWo~znBC7Z~;anVP5g;0l=Q`^cftoYYr=i-0(*)0Z z#8$%k{?{D}iRmoV1H1(s(5Zdy(Lxs{!oVN*-p~XWyZ&2L6+&nyDJ-&rYl1R}$(3zO zJ)mdX%oP6wQbk0Zt;N6t1rV zi*DC}TztEq>WZ+PUtW`*Q&O%@1D~^HYsg89W^R7a;U8oPA~Uy7#z_>i5>!lb&Yt)R zUtb^bF31ADDz+UXrW+STt-r7IJ#-Y?v1l;dOoVUfJz!MnOBvG1HED-3qadO!SYx@$ zy)3nDb#^VPb4b18qhkC^BQPquU#!>s#2*nPRX72+*~q3G90|vXy2Z`OpZiIdDxX1g z>Cqgp36*>}b9;yKopT4KUwSmLCnx{&T^v3O>Nn+Tlk&&3*JU5*Ew{#D_rdYOSI+iI zAvi+mo?xKBaX9-iDosR&t))H_##>b>k8)v9$jJ9&Z4W(YQ8pycg=Wn=0rYs_;?JSL zET1_SNRE0Uum+Y)<|QzcSt3<_{Z5gl&_qO2qT(__h+v$AhqDz4yqD(q%;M6sw=U~{ zA`^wXi13~~w8irV;UBlC#0fYm$rao<^$FBUYx0KD9U!oU2nk~(p(Igm{^s05lXF9* z$e+>vKbC_K&fuRvKoer4RhmYtrfq|>u~Ov_gEV~9x50v-0wXaFcg}_9KS3I_!p}JJ zz!3)0W}0bnf~PpC4{%XAOy@Wi)jGG)07Nn1Rszgc)>DJxRklN-UhX;jr zA?Sc21<>_dUlv|H`}*toN3gzoxB^9Y?R=PI=e`gD=QKr?X3Uofzsb(+Xx~R~#U(jrCRKN}qE=_5yj5{}Y$0SM^;(24cEwAW_r7 zGZ`a1{2gjg?>b-vfeBHaF*w&u^`8LBR_Y>SJ%X_&h6w(}hm>YK-O7>Ly_hffO7Fl@ z@fjUKlk-^ zAK|I@IOZP!68y*WganS8CS<&wGH`%kblpJPdT6j0Ov)06%u7_v4q{>IvZkO%u`pn+ zeUsm6Db~|e4XOPXH@vKUH}EJRN5gcI3|4kk9Bq=htHs5uPY6ZdadmxVnA)7#PnvEM z(Wadot$kB>6m0o&8~ndsY9*5BYBR_WpSR|5qnYh1F9{#+$ow#qwIr-9TSFPNc3 zKGQibK;6?s=($zQ9T1EtPbYS`cR$%?qnsBe{%}FP5XchIO;!yj*5d3GDceOiZmfkw zD)UOyJ1|JVL}CY7hK!x|u7aa-UHt%J6JWHAv~1(XwZuqQ1`%+)i$KK{e~!G2L-_!~ z)9}bG$yq_tQb>gql)Xq%uWA~3+A)*UG!q*19HUEWgAE_!r$LmI#N@Z*>k?^zkFXNP zFV)OERew9(h5Y&>*D&1|u_2z1Lk_*A3{=8&%&Fm~xmp!TWRxT)DZb)edk>@RQe2FA zLnWa7UIgWseC5XgZvBc~mb<7Iyxkh~B| zb*>2c|eJbG(kP<`Cb9ZoM|N;(~^;?e}?1cL1akN3z<-oh0z!9uBF1rb&JLmi8cY&aOTbq=|;V|6ciHAi)clxwk70H z0fl%%%sh?`fEZP`ovUW74^5zY?|>cHv$5%j=3Qb$ve|FU>P8ZBq7T8bu86{i|3y9} zgagFaCE=qu<)Nj0rUJLVK|v6NF8LR;2EMJ(huH%NsuI85tg2VzZJrTX&4mXX7SnaK zKKUO;(&IT@D;oD;q(-PC!?emvhBQWbK(#Mix~`_i$tP)yj$`y*jMrj_=5vk+I3>m0 z_9ZbF2+XP!f`>4u&F~E8`1iODmrwD?G;zqd-g`*oA{068D3H#vsZrs-Y@P26bKmU( ztQo2OnO<4h1z}C_u5^n{ha%i$vBdrjMr4tw90j8Cq%KpH}`C? zY_?=wQOnghpNBe(7@s_eKc=4xFWh478xU4o2zUt>9mNy<30kMyV6dUfU{d&A+HBUX zBQdp9Op?8Zw&cje2ta6&i$9}cvi(0q=6m5LxS)8@G?jp`nt~xIgkcE9bLq$y>9h4U zi9jHZ=qs0(N&;cu@C3b>*J{Bguww*Lw^krrFNr&8@3zHS?}=0F6!;Ic=Yx;gng^3x4o?3*dm~@)xq%{srwbvs(+Kpx_>dlUH;IHH zc_;KT`wtcZkvk#{N1Fkn1B_8#sZA_fRjI>TtI!02Efl!RM`&l%q44<+hkC=OSe$o> z9>2U+T#_vYD*A_En4rfZ@rWAF&CxIruyAOkE7iH5?jnN2wmCc}|4~ipoU{@fy(cQw zQl@Xg#j(96cJp}(aAC<+5oLtNKM((TC2}}k9)r>-zlS9yR5iVNDm9U{#A$*vzuI=1 zS$5`b#Pd|AOv_c38B}jVcE3FtkoGYx((lpWG?7CY1Ha+1Z2Q{T^RyiazF*Pnfe$Up zcFn6W|8%G#Cw3*d#jK+miHNEn3z5d&-vb|utXWfufUbJ5 zTZB@h+Sa^UaLg<jLvPK|7;aJdmP-Z+$PyGbg{{6@T_TAJWEl;!gx5J)l1gymt^Eto=*vBeIz< z`OuuO0M+jX;(b9uOis9Tq@SkSv^O+oZM2UIC^ePH%d?vBsyqNR;t?gJ?NgV;$0 zPJlvN(s4M_MP(|$7U~Q2$F=!X*wL|G%+bG{*~W@GJljcLfK&782`Iab*jNx;qs=N? z?X&lvPrWy0#8rhCJgS|OIF@Ry9HnkFa57{?^P%&2)_k~Ri&y<|old>^De6S4k9J0V zL=gpO_BZ2(aB@FJsb+)|hN_$TNaUZu^m7Q%Ah?RWq6ELmTiJ*5Q)VB3x`vamH5x|? z8d`Ug*Q*y>{Eq(({j|wTq>x^{3`Qp1AG*9eZbMaO-hBO4`ZBr7J#_YC?+`XYpv5a| z&Slq>jv#1tK2N66S>5g+AEkXmGrAHkC*4{%!|S+H6gICfk?+>6bLp4s^@r4V{dN;# zt=J6ylHg~wcvL$kQS&z)is- znrCvpXhUGWW@Y;>gC?1B{3@~1RsE_jyA)7Nozy)>i~2k9$KahJhp#2#%c$OVwP8K^ zuN{q{n2GmhuN<{sqOSN7yQWf$ed~JeVh-GnmDtHto$)8#q(fH;2T)P6foYm4Ghq>e zuPN_S>K;%0+FEzlnJB)5&lRUIN>`)oRnch{!&Z`(6 zw#)QJn3pt*=w0(bAcHqr(fbgIvY0;kd71Wwyq9yvvL9)=<>#9ADgW=ZfW5K}E=VaT z108ci^lBQ$hEFPvJn9M)41cuQWqr`Zo=*pI?ZaAU%Z{T2Z~y7@S;A9o+uFqL+$rk# zQesV{`ogel+jkB&d`*ld245j=iV!#8R#vWc?w;%~|B#8Rso!6cd|3O2{$%00Gy4NJ zl_6JXLWNI!%`0Jf_cyit98uwCKiaxM>M!n#y7S~38J577!LIiaCx(Zdj*r2Xm1f=X zRPfv1T$bqA*YNK_%IBekrk2b^)PM2vB_UM~$JVggtoB$JF1RlF5Mdv*a1)=7vL3A@ z@Uf@WG6XMK%er1uVK0s(Uw6q~tm7H23Q}(8>gSIi#HeoGZdYa0^gZ4pq+8X;XM3yw z&0ijPsqpg`%DJIpxSJfGj>BvQJK{S~Ay0rVZx*(kr*t>0D3A7PzbB|$yvD1H&i2556>8wrTJ|v>@GlpKsG*+66pt0R z=BWAB9Fx>?Z{cIi;#_VtHvE%+++C_%ox6D`abImtWPUxVa!;2~f8SU2-1F?!16u~E z5I3bcv@0}*@)}itFwR|e?Zj*Q-ZVC*X>kjxwgvmrK26=aPlB=6i>UHNXF~*Skreii zH@1m^r%cJF{!`kt#N8~c-4Gcro9+Z zoOgyK7D86&<`YaG8DK~;CdR-0OV@L9l|v|$akLatv4UaTO+7`)W5X3%!9ypppa?!} z9Aqb5B}K4jc5Nm$d7lmQvxrkXv>&?rXV_`w`%`mS#+J zT=x}znQyV#f;zCKT+3p2Z$8!h7~DKv8Q|!GTJEB%E)81w)fIvghr@onvMTubCKLLr z;s+J1#*74Ppp|jXBbbjR`)%%!=+=<3_3gRibYJ}#Np*AEc*J_|EVj>js%cZ0T7}$H zn=$3{IGX&ICqGj8$2F)Ik?Ennyor-McBUfUHme(;@a9t&+)Li1bw`z)AbNi zR)2btzg`=OLYjYPs3?)HIaMg^8hCBP*00|(<3;q{^vmAxS=8yRcXS6!lcuPJM5!`y zTL!*V;Z%v^_vPrC90BiV?uW3S>^+OKHNG9HLwrHGUQ9c;ax!fP zTg`TDALb+^QGa&(Sf!J1I`|44Swpvq4-L%ek=XGKSLRA$A_h z-pU@FYPjAd?`+KmpTJMLf^5x9GELGHE$AGdsPJ*%p9<3q7iyyiY8Ea5-_(Vd1L4~vP0-&forzY6CsvtnYi699Ds7;PJAq$ef|p>BW0Wg_kSESo(g6oFR{2_ z4Cz6~F=s}++f+i_cc|}~rfsBX~?`;=TQy4G! zg6iBfeUzMZ(*OHl{RKy_J3aY}C5s}ypU;A%Im`_NcAN28*stn4&lf?SgtpbjRqN8k zD@dFZ|K!|{ajfhJiFiRy$<}q;c?+4$Cp+A0a-O0nj!#&Vy*PF)@jiAga~+p+#aYnn zIV~D@*Md`Yms4ceoXIbKH2#b@;n{Ih3pv6BINDbOxjjlev^U?f*bSL;H?8PwiO5_B zZo0510D~NkqHhXORgb4Jv^GD39Tc@#?G~xeYGL&1OU4o1(y0H|ePHvuwuX{_5oR|U z<^2iq$o77@UE|tEQH90#Tb5o4-#aGwLzw_p{r~olwRCKxbDdI0-T$1NMd!<P8zcPLQ+z__eQY-ud~fB3}J2CHeUO z5+iceq@NtYqdvMZW} zXPn)$78=_>zRZA6ZDt&gTgCJ-C)tI_q`u~9@Y4(SK`+nm<&PPGd!wXGAE2-($yk$a z<$?MM7kwSh4y}eL4T}G%aJITi8-tYj#??FZjegc=##hDL-KEZ5%hqCst0RW;UxhGF zWVC#oMN0@bYcm%t+10xt0>;au;J+G8b&;lhS&Imw|K+~s9e7t5AW%dV{2-S@NYhyz zX-5TLq1Wc=h4j7HXvnnJ&LmTBVoX)atVNi5VpZ1uuPllqwYGLy%Ws8^^uw7K-@n=c zRXtNdd#%rvzc}%u^wiZQVkOxvc`dCd*=mK;EUNntq=$QJtW^i*V|NsCmy@zbJ22Z; zz3#nn;~Pxndb=$Hm}755q0 z1j==GaTUsSqEOz?zW;a%bd;W@;Jxqqj5afbTw{4vO6S+8+7wF_B?_WS zI#-R;w`83NSrdm0vL?3NNrnthzlxBs?x&4A=Yu2f?OA)BXaJ0QW?naS5yel)FR&_C z4d10oU}LhjNflhX{O6YCItQ5{aZScvvbr4^wL8b--pM+1m#HNV7wJd+UMgzRKj0r% zS%`Ol(*f+pplE}0P;K&*6*>mXU&BF$xK8B`YDsR<7zzJ_Wf2m5)cUxNI5}o=cl%2e zv(*DAp${N5-PmKmvB8%w#edYw(I>S@?ynP$$%SB8GZ%X~!nQ6>b4pT?UAW?iRZ>1y zcfr|s1|m^+zx#i85v&~&>oMjr29;%aT>jNbd|kQrZJ+GANx_lCn0BsY%vNg+e!F?J zdfLDiFp5G&pMLH}nkRIP35-eX?_Be4y)<=FX_RX5Fi{sBYo-mnm&(~Ddyt%JbLoKG79Sk^KaM+(Y7#q73m+40jKlqwsgaa~v z_xUcg8jDK;gnwMQl-vdl@OmJ7$uEK`yr=%rchmXa!JRWPoHxcB>5IJKc8G)!C>fwz zycLYSB*$FV0A&?XNLPVyk2-hWM5P=X9t4Bkg;Wo#>U6v-!_i7Ka~HvRYfbb0kLwCp z#I@FP#n|8pGL}0QAIaLNeXheGlKSn2U6}RU(QQOk_Xme%_!L(z)F(D;IIgO}YXPnI z+1e$fY{p=~pEXpbBLFeoShjyYE{!IYBOEt(q>9ehF^Iak%L*E;O_8xnxkC@dEg}id z0=81y_gW#DdO zl`oLE#Jv&l8CIrFV%?mj=WS&tknynRv?ZEi6hcqkc9FMJCVkC@}8|4kuQ8}QMPWzWOVfgt|{QvR$o>eVZT$ehUlenk}Br#7}XYF-j$M93d4Iew~i4omAeH zv>cIXX~H&`E~Zkah~Y})rpCh@b97y1d6i-IXep+^F%PnWJg?9^iNA346A6$dphKa5 z=yVZIbgxK^*Z|?;^fG4YD9Xw^uj!U_9;@gJR7iR{x3OE*cCjiWc9rKL&erMID$RLH z@#ss8MYnX@brraAhKudK@at|Ox~7twCs3Hk+|ABt->=e?&ac?y^sDZjbT(RxCb;VE zp{R>MH}(Ul5!ov^U5b6OUrl_9LS?U`jfiwqGtyCa6w_3TJ>JCx27({vM1S|aq8UO@ zj?<2;Z=F`7aA4jJ9tPC@eQG$oZI(VCGXs2kJJSemTw_mv+kyWf?PohciN1%GsxoTZkTb&^Id60L`BI4;)Hd{Z5jH( z-ZEYmCW?JC4SK{t9)dkw*U==+kIiBz4|;Np2-KyJ$;qn%wBoM@Z~7FtrV_J$LmVH{ z)aa5Jsr@y`kK{YGB;1|q{)(kDVgyriJ2axGrr=&a$*>W80piXY>r?yiGQCoq`ks^B z`z})bdyS---l&2F02jOMi9Ryk+Ynbf&}Fa`)uFnZ*6~x6I+GrXQ$O(hZ_ZC+#`RRR z`Ezncby?CvM(0ZS5&1wJGH!<)eYk4ylLlHpJ~>_e z^qf3^+dBtmyKvQ|Z7`MB>ZBq!CK9pzH%_Z?G>Hnk zQy+PaRa&%vhunE$10PZ~87%JzHz&Mo>#aM$1*>AJc;`RQ(!Y>f1D5NC$3O zq}lm&f8fyWDcEn!*J?=dG52KA#)R$rA|d14H$X{l^IMYm!*(&Sb91POG_G(EL!Y9) zshfP9VZ8u&Wb^4|U1=O_FxY+iQz+f2%P8t>m)Z1c(cQ5Bby|<^50_z&;Sf<QV$yg|#VQdXdE*T;0q_`clC2i>ExgyUm=P1Wococ9m|Cn@S2;&yU?gqKpv0GH-6wSCIx;<%I5 zz~TwiGyk+o(BbPRpMrY~0sU|Sb*J31dGw&rA-n!yS$sN)T^xS7V~Z@!&CEVTwF-FQ zUe%o~oR>!i`i%nX6J&p_k)?-)u(LjiH7ws3W!OVdY>F3mNfw4S zYXx-SpNoF+8-!_J(dXf70EJ9fveAw#G91*;L zGlrTT@^1v#GE+YX%bt?{vG{zaF>HI`@2A zdjf~$@bT`^I2NV2Uj+-wiFlQoUj#VR|CTxG8M%yg9b`V%V zkF4|mXedQnL(QOX`sDo!O1*JsOp_@cU9R88w+j+er!N0#wIuM`!M=$`T!oU&vzrJZ zGKBl&!y_D5K-0p<;{fR*GWnpt%ju=7OC)mgnH0vPn?=ed44&-+fYr^EE@RtAoTb>& z+mVHGXiclg9UlJRB-3g$ps-CkqEGHVaUAmMk8MW{6X8??i&$Sf}Rs)Bn*q^EwbhL2NlZ%KJ3&PtP} zy`8Wr-3TA+*}c;s2b|4ph!i!CW1gPf#J7$zYqs7SDL|bIX84RUi{ar=T+bvgWFL2l zHtZp>i}Fbfv;RgtEK1>MBC?_uY+#lcMUAAoJ4q>5y0QPg7Jwgy4N1_yS(tg4T z-2kMT7fa{D0DC%a=iy6^4))M4eA6S#x@Ox02YG_Zs+n>FW-Nmg{%c_PH5lPk;co`pE(T=47fQIcBX`1WjC3hTB7 zJ=hjSq$h(bJwcdN;5Ue5*1tG327RET&Fdy_8rG2VY$Um&(2_AY=$s&&*>7Q#aQ8MW zQcd$F5Vm{U=iE3R3m^Wl8jt9dMKYXJ_YLF8N6;SL@)~+siKao*ykzJyqXb~Yecn1x zs(fCX&mck^q*bPz=Au*5( z76N(;AhR*zp9X(QH?IDFWPNuamERx#gF+E0aVw)#q>M|2tZqXkp^}}QP%=U`w{J-y znuv@~p@Gao)-9_t(;%D5B?_5E{NCqz9@X#nJAYJe?s?96&)0jL^AK`8$Uu&o7-f#tiKek|2&U) zy@pTJT?iTMxyA&(`9mxRNKHWo=xO&;=**;GmMSf* z#jT{f!zNBivRdxXestChx5OZH;q$hb&gT_!8%h7Koy2bHL8vYvhu}y`KjXxc$tDPm z&}d*G5d{1@Y%gNNUdwp>C(I)Tt!8Flx1MdAWWoz`M zLOFKBgBSO;2si4bM0jS?b<7Fhb$Ss%I9k+l))I^D9XrDC2d@bsdJ z%Y-^8+(nYiHUTXX=oOv~qJ&um?E2M1%1sk;2mM{wY@yc|SkvS%lmKs2DMDMp`b0QD z4~P*~jXX^A5UkCNu#%Qa05mMCd8`GruUUjbdz<+8*r@*5m2T^ajXZ-U{Xe(WW0i1# zSzij$D;NV)tEfS;_4_W7S6S1-!9TQtg%O!7Rt(4pEh8ax`ir(yLd&$o{PXd@X@WP! z07}q%N$H%Q!RQ36WFOp}U|uN{!ivpGwoBOjBu$x5*Umrk_34`axGHXasZ<0|XmbRR zpe*Ix$eL+mC>NUYGD3p|t3UGGu_n7%7d8vOB%1cu1}h_2O1U}>!eI@*YL?I(sw!wr z=57hOYUtP0U|}>rnJlyQg$5{moE zv%#4mpC8a-ffb%@Kv4-x(n}6?+NgvB@D%%Z=x;*|<(=?IF4%$G@%%2DrsFt|a!qFEVcpWD+k6+n{^~L4~n(4%Ijp=|bz(?GZ`BFP$ z0G1K0bPvLfSxL0D-^7a@EvG=T6L+gCXsAbQ=U<5VFP%&qi~p)ubB>?|!ytVR zmMO;sKEP;RW%mx0Tp|GwFDicgB5W{Y_?DcmHOwVr9jSdydkOhthUO%yGK-c6F0bsnD!d+_C)44DBL8`NqGZe%g7j^6A*%mMvldob+0=NjM( zeD&L_N6P;7_0yS))>ev&_zG^u4_9X`(^a!p~;Lo;kx zgDvul}{@cM%7qE-C{Cd>DNXLU4fa}Ku=82LfwMJZXxqhkKuEA`M^^z_&& zRF(+%IZG-XLi@UVkw@U5s)7)( z(VA2%13Vy;gi%N=xUMmv&lhm3PY=*HG};E}E9q*nJxtT0eeAK0kck@44HmqE0r3}MUa*uI(If;(?ZLQ0BH6@L95${ zke61toJ2m71Ds%k93HxR5za2`puzi5BM~4`giv`KA;%n8#;xz7av*!}It+>3*{Pog z9WDZ#2&2mvq#i`|J3Iq9-irk6WGF#gN8>dsPm2*N1!a(3F?nUwC{oUlP-}YNA$9oW zv05_rdI|?TQ+Jlc)F8NSV*Ba$8Sm z^_0w)<@nOzkD2{QW|X_urhW}V3F!HWnvO2O#+o&1AYear7#sF<4a`3{c{$o*UUu#c zgZxph;QR+7*WeyB3Z`mJB0OJFFRqhSJl0OJ=mqwnZ1j9lp~|F5WpI52U5DL3ui34o zVWNn!J6HMfHKDEPOHE#fwL~;}U&do%7t&}wOndb9oZJ>ojEdPIf8@M}g=+39njdbv z!fh?#nZ4xTgjMEh9)X3;X<$Fl7(9r!IAPxr9}9xPeUmWfu38&>z-U-vJl0@>;$M#m zg|Sk_T-$eq8<$tNP6Xgz#fsVx(4dPb3*g*iIN=!wU8n%g)_@jbWdw$_EC+V#`cb(V znMrISrKov*J3j{_GkL&YsyccLDP9Ah08$GrX^~c{bfkFdyLyWROmAHtc1nk*&km+WI^Fp|-2y?cnS4$I(k_j<|Yj9mxUQ zFr6)!t9jIta}Mk!D`79Ab48cduV-P<0UN*(m=$g^JoMT9P2Z5|ZZ@ySNlvd-5@4aO zfdm*M-%A9S5zD;nNAXp7Ja@MI<0Y7H88P3~CjUb-P_$+LzOlCnImoIWtnYEYj%el0 z{%68&V#I2i^Kq$FJB#o$FgFf`y9}N-J0hM6JC6$pHVc@NKpQqLe50m%pWhyBT+c&y z(rjWH8T?Vq+EXFNK7u$mcy`lt$`q}8#U>+$hhnR7Cu zo`Z0a^z_3DEZnt67Pz+oH_2(nAkRqRAZecve>;SOD7=KTnt}U&+-X&iz@pr~7Y(pd zP>$JyH5cd4#-|LpfaNidB1rd5S&)Ug{@Z9ogKdZii^9+zLhpR>#M`tssF)sMH-c(0zl9OW z0@NGyb-X;d3JYu1sBs26WX)5-lIh+5Yt@!3QQ*lg6BB;VO*kLFpOvyScnd4TD=rlO z)Kd12HmH}vA9j#)mPn{Qm;&DJ{T>yqzsVckv!1!#Rl8WHsVSOQZl02HDzITHfEW6t zB0k$79xXnl8E=7*AQ3avil2Ft2;=!f=YZy7lr8Rs4(1_uY`R zu(^s8LehS!J_S#ltsbLH*AyUA=||FT9igkk!N7~7vq=YeyF7ZHd5h#kt41+}H3^bW zd_T5;!AJwu{d6l{_c}f&!<>L25Y&2Q%%PYPqYpvmvYp?1Q{yxwv!~uR$rr~OIrjt< zEM4UM=6jBg%51{zfU2lj=aKNv(NUumIXlunI6*3$8o68(W6J@d3TyGMLA^yV3U8I1 z>g50V>GMyODM$yIp3Ks@)jz`ikd`q}VI4fH zI3nQ86}mq`|O*Vpkb(Dnq!oRB97;S@xQUfgki4 z6*cGC`tY1!Vb}1+6SLS@WWCI}oe(2Hml35Hf-PPk3ye%(V|`g0X!%0d3EzaK-o2!S zpMztk9#VRm7$Y_L%tP?j8P<<0SjKLX0@$a9l$~+aj8qF}{siEkfakaE!SL}V!XtKR zKw=%wYZkpaT=NaYC3^QY%>Rvfp26}j-uUwyOd&X{IwQ8olCIjsiTQbeVTM63zD-~? zLmaCSv0~RYfNFHlpZ5;Rir?zjzSYlScI&aOoZ@Nkj?=!s@$X^3297L%Gq`(bqJY(| zvNArv{Xzkoa9>7mhmr-JcZa;Q&)6}**H_7jEB>srbtm|=&R8*7n{W+Q_npr~lCRgl z8r^wARDzT=%Z-mg(y%bb74W<;CQcv~925nE+C&=EQ-~-EUpMvUAtMjAZ({A^9rqu; z+hP_WUcVuM;|O8?toA3cm7|>}3W!0&sO!q1b3FeaQ)G`K&D&3E#3o>tHL7knwrSL4 z;5-rTVIAD%Tde6jBYT5+L>FI^r5CZr@|`u1WJiNorFj<|vV8)d(DxAH;*3DJW9{LZ z(+4-bjja$0UC^9#v~lGY?c}&`(lm+Px zB)6W5CupF!Wtqxj9dP+4oNV$AJ1>3C^YXw;?}IDhe!QPlZ<%j|)nQNjedmoGE8{8z zQP@oF;Ud9jq;7Q})6*p(03lO2_@wjI>HrxVLyu{ zf5`V9ThEv*AMf63CedTtlGj;zXFoA(WwPXtPexFy;5LSWf+U=DbLAhp{CJ*a-)?V3 zu&_&o03=w+xCQRehk4#cS4XoQhFfyXl0l8-KFDiN2eDcv_4v3@sy$Q=5EhJ%C$H8i z#HsGEScI0#_!~s$o`EYN zp6j$x*WvHOWelr#+JkYo6>~yf2fumXk0L+`QWH$q%oR5-kla8b0Acth^WIa-<;2=u z_}p6ge=4syS#c+1!9~g0k_fmHB6Vs+ujbZKv^Y2s2a!Pd7aGjLc-fw0mtKy4oGG~Y z2=MCzS*yYViC~)U3D#D)`pSB^8l%xF`r)Agtxe4#uAJQ5v>9R1r-AkT=NdXeUbbH+ z+`_PMJd>r8VWm73x3H@-i76@Ei2p~XC~^=}7we%fgg!*dT%oPL8J<4xOFEi4F4voP zv}lD}Tu6R7>+c2uW^!q(_so><^rW8{ab%%uAK37ODODCq@Dt8q6)pzXvGVC8lzo6P z2zQ{6xMuiWydgYnn3n)=%{N!`VKip)d3k4>0NtQti^mfS8~@Xm`|`O!N%nWUIVmJw z|2^cSUjtu(dHvTo3i1Gh1T?#Vmv)3AXY!$f4^rf@y_-HCdB7q3?9AqCeL z<6jzHrF|`->Wugz%k>7pb@RS8Cy0w}=qsDZx0U~ez?<730d z>NHgHw66umtBUe(sej}ny{b9>X+Fqxkr{6iD?WG8+oKB@myo+JdI2ZR-ljb&COX}k z)8%{5gMHXLpq}~_b^Oppe@5VTuK8x#qo^#DSMd230d^crq(ylf#7zqo)~#Sx9IHyo ze%;wxnwL7C*6oS|GDmzlxQt<>bTnAR}!QHe*zCt9xNwXFN-mdR2uvAs{AdTK3oFF#5WsYbUioGdv?BGHbJTqWCW&b>sQarscM4 zG3wEN>~`we#&dTjZ_KUuDqT@Vspxz@^0|OIdT- zA-&k&d0bSY?k;q1;P#3hcn^EYLjqL1)u4-ZP90p?E8~ONg zH^`0_xrfc*2xG-cs{ zg}Q4qUxQ*l?Lo@Rg)9xyA!GP|oxOht%}8mTg+{C{3jz+nISjc{u;+Wf+5c!dQXKrs z?V)*$t+LR;J?_^U39=;e!tGr;@jGs?QDUJtj=tnDq*GyJvh{ab!op&6J5@L^%LWmf zIYoq;hRM*XIVSCqpa`TgXM2<4EE;*U4888mJUQwtDK@b-Krlk0-q!p6@?-*X(0Bn7 zP{0_`ul?|2!7?@by|ZkK>Yt4XtwWDu7~ERUYyj@scEY2F#Ypvk>QG}_b8aJ;9iB$gP;pXw3t1t#v(r)$o zmY+6r4FP=Ca^5mcTFE|KawDroRBj&lQ6gQVa*ePX4RD|p?8b&gmPy~i0j7)dELhl= zJ;wd%zE}Ntps~Jg1@0L#SM&?Tphhc7T7Cb_PGtRLiGmqDUCTnmIb_DCrvWa9Io&AI z146LI-b6&aKG`Q#(==1iH~h&1d)_d$-zWHc2{*GFPzpt^9_ZCN@t?oIx0dI4f?_VA zxNy7EYSX5LVZJBpAFjOEBNQaR<*Lqtv+RwR8OnS{D*rRsXQ{;cmT&8d=P8SWT=jN` z$*g!@eCoDkY^1d5jx|9E%!j{x{`Rv82Qp*LWLNbszaBWEFFx^E`w-68cf*1IDJ4hHnNVw}xD|N=Aq?p%Y zBG47uPGlw zxFLv?#pmVnG0OF`!aO^m9?^H{bXG@eJFmh*-@I2kS>`{|9c4{g7BF3RMP_l!3VhLy z{TSI>@t*f7zD-Q^h~9^;-z(_OL+xysP~-ZKGg9T=`RauSs~;^TkWOg1ua!C&21}~? z#|RK2FEU3gWKwi)M70J=Z1ivyr{2$YrOo``+Bc|F_`FK7ADSK66uF3Z!_8rDXRYC< zI)P`>)}XFW;G2d=eB8w^#n8c8F0KN70ilyC0>U(Wtn^cGV>0YFdOU}>b`NB0&L6r2 zU-wLwAn{D0_@axi%dW7O=euXWts3dNNng*%LTrE}OkfT#}>a;k*IrNUg0|;wye<^~d(k^2% zfH0{mn?YT4#F#2ju9tc7!(bUUX9QtJ-vgtR$+bH9k<l1p%z6UxoHL-@xN6*&RkWRe^lnZ zx}5x-GosLF8NN*Im(D9LhSHsiMNJ-gR#!B9G}<^#q=xSeD{t3U2=dN}yRG^|G@{gV zEdn<5j?K%^#QqbK(wiqW;OX^5R+HZ3zM>bG4)P(E@Qi!M^>U%J&p<4(-um2;skx#x zQ2(4wE1y#^*wr4>S)^jHZ8W{EqQ=B*u?}w z;22LuzYDeN0eCEdp$o7ay^9_vhZx zw$Yb2&aa!2s$5@pBV`W@T{LYEgLX_}u^Y=&N9>6X#iol{N1Y4ea?=FFdX~O$b6ypx zl*R9CddRaS%}h0rtg$WyJNQ*~RDBf`=-PttioBO3Nu_7Kdn;32rocs!BkXXT-F+*l z0n<59B`B=?>CvII0`suyr$)~2?+B*%)LUN)scM$23bh_-JN(1{u=z0wwV8L~-EJz2 zGlHJ%L(*%xGz1wrT0%{+A*gIFkuJvKu13BGsbh8z8~uHpCMZ(QuY&bhfCE%&Ez6%) z_`C`3HIbKTvg%2>(E7jPjC+>O`7uLlg}(Xqxih_D{5DsI4p0L2O7vz8*PuoBmFA{} zDAzxJ(TxkU3rvG0k{wWe*GH*=sF;Tjde(zc#~4Qi-#iuyBO@J1S$_TAayhKw`nNf~ zj*h|)Vb!PIc%DdqY>n4DU7s2WbkoS6JT=?TR_a&EEvs1~`@HqHe~H~Nm7rePvQQ&+ zXy{5Def%~Xb?jpHWO`S5oph>3oL$0;Ze&7;L3yTJ-*e*UX!(gLtaQbY+)Y-t|I#?= zySF=u`%E)D`X^jW^Yfk@2#}KNkpATRGBot(i-dl=FK0^SwMAqe`G&l;o7ocLYS9U` z6By@@tVhu1*O?Bm2Mwf4bFbBi_Pl)MIr&74l9@tt=Igz(6$=>H3t#hnVwx|gRw(3j zKbn^Z@AH7_;Qs29`pl0}E>q|TL7azRUF%|mCr3#XQ}JkL z{(CoK6klenpXWMbDfGD@)$&q@s~eY+tkVOV$MA+7Q+bFD^rhvd$&{V>(~64yI(SZm zQJrM_2r(x^R#9P#{q{gYw86?iUZ z<^?KY0oUA-bTaMW*~8=}-+~Lj_(d0XPmBgdk9cj=+a>#ipM}jGQYQ2d1iYstf}UH! zp;C-y#}76Jn;;d--{p4xL!4RgAzkF*sFYKeNN56oA4?I0DBKN@_{Y+3zwS6tauw_$z*G|_k76Eaev)a4G4kwpDc3}=$?`w`*q zph5syqoSJw6Zs-{{QYtiI9HL#dI%`#23se2Uo?2ZDOdl(uh(jf&8pcA9(JwZ%OBB@qFbTVDCG4hylnYPTjYKSJiX(hKOXYXZ^-FZkJ zQ&H&}0-7R3P@(esClYpmtQM1X!3^-Yyw!p*IWaPM!MCrE6}u0zDTEhYyCjLnaa*+6 z_3A)y>}v=rjw$gIgy4Se-NU|b{_7@3nIE^-hzZT|oe<-rm!Sw_A$y&h5SD0mL%BX2 z-!k>spLI{W4a@=~x~`Ht@~vGG&iq5t#3cx4*sesq92q@G*%(_$J@JZyO*N;YEn&e# z?V7$5H+TPVq~NP+Y>@GVvK=oDu4f%S87amXg|28jva7%aR`KGD^dg`|K9?SN8%!xF z@G{y6onA=1ko&h%e zehT=*^R;vj{4fH&|qGC+zeV|>&fvhH?Jy5hY7gU58|*$&|1X~wqm{S>fG-(KAc}D8 z64I$2Mt?0C!=R0(cCf9jdx&bcjHNFvnRD0=-fX?_@1_iJM@8HG{RQ4(S44JRbA$SX zX3m)Yu_>QE++>tv69XoJdFr-dh~en&)^GD(SY7WCgygY>bO}7=|*BOGT(bEEOEO9g{i(Rt4F%F^Zo&~ zl&D?sTXpy0yR&52*p+O%LUCW=9}mtLe&?;grFWsMw5&+?CX;3C77`2M!s zs>)=+Ccw%70T`E(kQGS>Uc?VV?hZ~*Oh*CL!0Lp0mD$AE1zs$hY$PZ}uk(6aV+cX^ z7x5bTnoTugBAW#;N?wZnxM~exI`&g*S@(WDF$t&W=>`W;po3ABHKKW~VY9gd5JAB| zFms|w5mIsTH4fG}q=)XyZnrMZSRDfti*gJZ7kHI&%=uR2`&~z|vK1v0x@A9PYBZv4 z;Cy@hsBh%~2i3pI7bk`g{qVa{2_T6fnVUZhZrltsR%s@Jgqn$l;#N8_KOaHJ)&?O> z@wf6b#mV{DuhfFPb&<^CTkMPnN)dK7^}hl7V!34R;7g$FqtQVtZMK@_64c%1|4}2ojQ0lsm^j2Y@ZicnJr@+(VgTB+} zZO!j9#jPj}v|mAEp@r6jV-lB(SNFgVDJf>{#yU1iUI8a1D;c;rAr>&?fO@(x`UuG2 zVWx4B?ih+RN{?QG5Bo@r9A+%_#&m;%6(A1ut8&|&dXG<@*M8pNk*${|81d=iCNVa9 z-6@~dLru-$z6x0LnU30MNe>P(!$0Qxdg*V#kF-11Ohl24 zpno=nOv^Wpm@~zN$MqiNo!t9egt0CtpKrjl(a~0&*#a}fv21>LuOW( zy0?4alhjB4-U@H%kqP)Den$oGNzZk}`wi$Lmxp#Vc~3|R#Y3BH7e`*Vp93mRBx;ei z#s_j}Ae}MIxlo}-!z=!D0iH72;G1V{ua>#HL?lkFmId$tk^w-u*}VuR_9AEkIg4H@lDyOH(35c%JCtYk41(BC3)gV zhB(~c6gL*V?Hv%pRHH`Cn~!m^VhxWGnsYGT-URXjttnn}5K87fKyT$n}7^OLHc-%hH@%4|V zskOR12y=pMZOo1VI@&4u$&!Rh z+}*Kt;8lGH0k8~;L zmNbd$EQxHrV)R-MNcdwD)`Wx|Cel}q^{BrkMQIu1}6TyfMm}-mb7E@=@Usom< zi+#U7#5MU{a9AVMH?R~mc*B{>^+ir8DE&I^CbbsxIkA>?s?OtwzxQ^?vMU0v*%&DR zZTsM;av#4y?HcgX^dbvTAotsZ2zt>>445AAwZvlZ2M)ep|1(RfCxE3hczUj(SgfQ^ z{3HMHHykc&pWglE z{Gl?DrBjBp-<$IIr~)M*2uR2eKM51VN{+ zViJ z+X>i=1;|RLLg+`_K!Fp>yrxpsMHitlh~R>VXWj+y!FX8%i5*ll=u;+T$?N_<2nLdw zXYWgKi@0xwi135*}p`ZHCa_>Rfhw9>nf6;&bi~kjD?}GT3bc68ZuEUG@7_um`o%f=Cw<7iV z@T3phSyYEj)Fn2-*n&XkrUKN`G{5B6#}*7{LORz9^~li%#69AL@w5Rx?5hf?d5CAkpeJnFgGFTHtR(;$4L6t$y(n)QhH}yTpvW!s8Fmk);;5oM4Y~eb z4kpZx4=H^QY)uSK!SPAi6;^eysj(S91Yl4v?D|*U2!_K2JG&Q*F+8I6(f9FuJ*l*3H-kv$?p9j_h=E(A2}skEfMX0Yo(D;enny zk&D25A=U*yHj3Ao=4T6QFf%bYFxJ(NIu4pcQLhrTx?E1B20{ST_aBiT8de4qaiF9< zbZOJQ1R>`*)gB9P!4vSNywuOF{>gJfRY$xc;7UrupPp>58AtS1yn|M86g6QF9|^|L2M94Gwy^-fMvk?-Mb6%5CUA;I?m9kUdn zf+CyOty-z(gGry9SkewoT45mRQ{dpY;uL33^@xnQg#A##bro>+C|!V=5!Gpo=abmS zB9@80_+ zM-KSgt9Kfu-M31_sEEzwdcgHz-7?%*RCOuY$F8B{6{(mHV`>u=0}a5i<sb1{^{qk z53}ZeLUn<3vJ9^aNL8n(a|!0&d0NTY_!3K(FAC8i;@6Lku&P_O&_`CzL4HQ)j9Haq z*`1Ju^QX43j5X4!)tP?R@%`hgXM>>2Bnv#nzxJBr81hghtPPO8t9@~-;(8tg>&q*= zr?aKsO>aJ4yx48!iA@4n3+YG9>ouZRqX3H-)HQhy#*(xDJL@IQf(=$3#2+?ua5NzA zwSgBw1UU$pTbD3!9aDLj0W0n7d zrr2R2(&4VyrYKg#<@WbBD~uh^zT593u>~n1GrRmcTg|BuT?5dsgqrnR>c~K6r&UMr z?fuDa;bnfDqk*4nR&eoZurl;W@i2^xd@m2K0(S?9AhZOt`zWcFJB{b_2Z*Fe*IF?1 z%NK5g^S^BuMsEgD=vnr!M|+xzOJCg4Oj7Kz8Ta%W>>ZKg=fEOJJENRfAlV)GC*ck6 zbXy@t|A2Zb8c5VLQ-{DkQvwV2%upTLBUU`Fpnh>A>u3rGV~AW{IMiY}s}uf08@Ngw z<0p9xJjgmaqflDBVPtsDBT!4K>cRk@& zVEPBrYDPh}ZQ0O!7U-DVz-kE=NXr907DMYE2{hGUrG2gAAB55OK(;pJ9Aj1r$;Y$5eC0#9r0BYs zbm|idT|ISlQ2t{Zhqu_0fKR9W9$#RFsTr~CCF5rIV>t5I_ZOhKAA*f%oLHBJ$_Kgl z(K@)y6|%VA=prYqh}E`)<_xfbYkBpk`i0y3FYF7czqR9qn;qW?pn>ZYk;O}#w!UWW zx11_e67`q$!mh230-S>uZTou>m^E1uXh1p`k^jI!LBB`v8NeS0xM`vgzihvI>VeU- zl9t={S)&8h!{dIx%V)yD6D%>kU%P6@)89}26I>o&KG@?X`T71)0n08D5$Pt`db}yR z?IBL)0f73-iA!08YQ$W5B{5nn;0qXC*KjZGGBwIeH-89u?MUxvMYfqSr|a699YC(P z1v=Fy3nJzY={?J9{@KuTuE^%Ld5z!GEmdreA|QNK-?CmN(U4sci&OivYKJDY|_H4l4&$`hiQHqD{&l~P~I!Rhh8LyA`y2aQWe7u?XHz7_U3p?bSnKyzl)`_VIoBkNZUw)G({r5G(v+*PhL zmo9{!FGDzH(%ZCFj}EkDGi09=q;e=#!;1)I<$f+|=Sg5wwwQn3fpXK0KDETNXGg zRLQ(qd#HT7XY6Z?g~CEmo_fgJxcy?Syxu>Sp>Q!DP2wpC@Hu= zjv5yk_%E`Nr{}G%?zYx!tAD8EoSrroSmB{PXYj!D))Jd5Zp%6W(8cMIBX&3`om(#M@ZyM12r!P1}pJQ&GVh`*l(=AucFuiR=#Uu5aeS zBU+p43isDr{`l?NVe}wx@h7Pn<}}rbefa5)i`xq>yS?RGKq0%7s4o3g`KV!^9El4J z9S+kHSL5V6Ns(lzF*9Sb%+;wQrC>3GL5lS? zba4$iXl36Ut?4w+JLfTMJZ7*rXA6#hGVebDuM%Un*`M{7y<$RoHq%Z}Le0J&E307A|58X#HeNXwp7U%EA7j(s*6{##>ivR*xvQOT|77Zb*ZZ?`DlGc<_39I zqu^-iCaBj3Fi<>7D3w@XHc^YmN{;{D+z|T3d+v7B`(57V)7HIeGH*lO#wWLWvQRil zw#$1Pr^{>j%r75%H}G6+c>}?Qza;u9ms7%p9ZHz;>)6XpOMq>K4i?Zd44bN{9ru3} zrQdt^v)DDEdaMLAd;Wwub=N(%42F|yuEkRihi?yAn+=qB#vJ;D0?$cfs!BD zEf;Iy^do&0zZmvQ>{fX3z#G$FLKlA&&33NaanW^Ugg(Vk&F9n*K(YCHBPCD2*GlUT z5yIV82r4XP8{4fU>Ri@~4r)9hTCo`aeE^*c|NK)mQPIaB(Q|pFZucthZsqMl4-Y!8 zFrU3MZYO;dsQG8PQooz?YW~4|F8Nw~v>`{>`k~lYPUxS=7b?ZH zbOBf*0TymdJ(C3Nden_@-|(@x&WXZS3l199vb zQ)2mtXG+j_(xCOPvIPGqRE9I^3#dkFk`2C#KSdR#)xB-G9kyR7WztHjta`HF$Wv0F z|L4rYzLkW0xrmi)YV^JO*SpEO+=zN_z}>LJb?3EAJRNkJ^sP(5%AE)(WW#u%wWqJq zN!`@8U9itASZ>Lr!Rmd-$0qAX+?@F;wX2-J?2ehn#x4+sPiZaOwxRm3DIR76C2zuw zV=Z!{(2ZaP$J#7)4IRvs=sHQuvSfuzuUM2l8EzXNcE4cbFIp)N$6`lJf8O2Cn@C2N zjLWa8zbZIJ-u~Obft7q)E1SBDyWtd06A}wmH6Q5tcIbeR?K*mNuJk709F?{J?|2yxfJ8rG8ZzWa}`4KLyKHeO*(-*=8`(D-ycl0xO%eQrcXK4B0dd|FP;`g%eu`Bqmh(1VO{cFzb(8>05Se=vI!{`e zSNrn-(HBM+PbDyge?F$}>roq*A2@OPT~oghR51ulh5a+>jP2=jB9w5Kk`B9iE@(D% z!7KjIfw9+Qxbv4ZbgFLbV=>8#g*Mz?GmgR%E}Lr~3NJ*p8CDg@YrEY)J=9phN~izogWu{}{heQv<#cPUU3KQq^z&w&7qYmUiudh1 z=byBFIrFrjhEHj-8#`Tq&|+*h_^%|;Z#;c^Xd(A?5Nx{7K>!T5@dTbyBUI?=LyKsl zNu7*Bugc*~vYQwB2uan&Q3YIOJTj-Xl{{0zS!urHI_LhcDftst=FoV#51AXZ*0{kT zQ4eESF@Q>&&$b?-3X0(PW_u<5bT^h`xl#gs+9n-#QVph;<72*OeL8lsaKH0)4?Xoo zF4^;QNBq0ooVx0-hqSt%)tTOJe>kn^J6sqZGG-p^ z_Oe)wMph}YIdsJwIT)`l)nER(Zy|Zf_&)qr)O&G9xNa5mtJva0I|U?>cT|5` zWV+C2t9ODh)1XzuN3^kE0j-Q&US|?ql}C=Q^=YHoKKyLlEht0sgt!=v9s^DRyB-VT zT7l(Sfd+WE?6^ydsPupf!Zexu*b*`v^NadtqlZK5FQ!*uWsxD&42Wi2wh~J5E*bK& zUA`<0Z?-Y@L#Oo;_nlJS*cdw{#9<##7^kk_f7B-IgYqqBma;tPf@8c9FZ?-(Zl4F^ zv{^QFIuo^7-YL32T$~8d{$jh|SrG+i)3#(*wP*RA;cky>#ftO#+gKCV5|Tz2{3lGL zDvx+)KA~7{#Z6uXVe;NVz>H0)uVGDG0#8c&a;h$lRZZEQk7*g3!0Z=dOJtd!=^r;j z-4q27FNCFX?Vb8b<3g_gLH{sO48~4gn`ec@Y4F(_G$=PBCsrir9}Bs|Gn6O#R>*On z;G<>=MKDp0oVfTOluro?E=rTCWBY`Ug&PF-?Q)D2>I7<6!04NH{7 zw>?mMWamczYrQ@4r<_l}6N_`h-jVqLm!<#MiSy%Eqx9FZQxbA1F9AAW;Y0$9-tyUY zoT6ZO9i!h^p76Hy5{vzF4N8VdOVe~N&xW!7l4*rFj?>W>TqQl|KBPkPOz2>-VVNAsQn`gRdg^i*oEbt~)pAc$OreW~ z+gc}|fFWUKpSD(E;Z3E#Q&uDA@~?ZNmlX1|gqfNfOy0IrX2HVA<4DiaAx|LjNm;Fo zts$C_OOxjJdJ{z-)#x)h=OLp__{Q74Dqt4?c#_D@e%sCc)4Kj z#}F2sU3`_!Km(;_BG>vZ?d8lx4($F|0dGU@(~xKNpG(#Z4e3bj#ghUfx4R6b2fw=Ir=0esJU6B|>wI#(|z%{8=dkR72gyT9D}`#j~zn@@6#*c)V#fbl3~y+rN% zmXs2VIdIwTh+7B;%PR&2IicgfJY_|V1f zEa7C#WPi}yAobv1(@blU!R_dsJLt%LcsAG-7)vtq)v>UqTXy4gaj^3_g9Ups91@N! zQYsVR>gWg>@!j;!G+0jU)(-WWZm@2!;UOJKL%KK-GGVKd{~GExc3zOTv}NQLZT4%MRMj%|Mp+80%yP6 zVNv=YVRsW<0TdR4B9p@eI#loEA~oS4`t<0}zSxdOtD(E3KK)p#&V;&OOm=L2T{d%B zaY}lUnve5~sZSSPkF`X<9oiMCr^nSrCC!h)XIuT_A%ByIE$%jlMU+l=uUabDiSp-+ zodo_)Tzd*G8mafBt5vWTYnDw<-Hdvl{y?|V!qz8NPLL6_-CvY>maFjD)?|!_%*WW* z8fy3mz-4OWu!r}Q&@z#`*hz}frK1O2A^%CalKWE<^^j#>lk;E+#`sU%VeKyTSo{BV zj=%{31>6tVAChW_-CE4slzn5;RWYGGJfG9)eIrD^@$-`+LHoynG~5iXvG%21k8ee{ zum2&%B7mW{pBQ46QmPpoTTj0qo|_YIyXRPba9BCaC@F7MtEfQll!D{qSarkQXYEq> z7y;z`ar?Ql%LDiO{Y}TyP0YY1Z)ES_al?57`?o9X&DesJs2x z{+Nt|FQ%6Dc2E-3itX1H-jZC9Nv1?;c0aP8@y6Pw{3C=ighidv2LC}nPZWG<=Mh#; zU}8HO-20nu$sa1|E^|`fTsg*(yzS5q-kCcKDZ&I4pxytG?NbvUI`;T)LBb{X7AL(_ ztNxUa!E$uzAw=eoalM)AH3wmtWoG<~X7DGkozz21jZ2-p6r-;9s&rgQ-!EJH^FI6B zT*j^w%oH9*HR)EA1qDwmS~vUBjl3)qrlOzsKfpD}GIImTwJ)hNZy^D|usq$f>ZCQc z%KZ&c`gKlK5#vF~SfkyfPZv{$@qko8pLMdS`pxKGQF1UkM6!G5?=0jOYP=c>gMTj*5@W_{QLr$^}+Mte;lgEc)Mz%^Tk@UX`BMuvn&r#pJn$%2ly-Re~35 zl7!pbY92m5WBhcy?A(lZ{H5iZURS#eHD|9%lSQ&#sCznT*0=Bs9#+5(}Q-##tarO5me%S$P{ zGU?HXk9o%XW<|G04DE2zW*gN)L;mfh)dzrLSwxbuhPv;$+ zqD?IxH~vOh8@|7O6n~ke`JVTGKum_7J-%(m+KOqfQfRzd_2pR_+<8NyRTW;l1%m^j6D4)F-`T=TI5bMw9b&gy zT#Z~5n5Rz}rKVVwpZ;u?m0J9>Av40K<6K&km1~s4xNFC&+tRDLTnYAKaZ8=Q237p6 zm(jyxZt44PW?xe^s{zc?l_&1{3qE8p?w@rE(iB_I_v$g$es;{X75xx5P|AOaFYyR391kHNlyI#{9kJ zte9#LL35!-B*OXJW3S5t!tRIEHtGGYRf_6<%p%%ogS|fUdE+XnK4EpNSW#xmx&ppXX^F(W|Xs94=I7ro5Nk(B|K5v`sp^bH8)rH6=lLRwXHNUY$*; zze<|h*F1yT3^^K(WKGrW;-)z{bR_SB#?8+AgsuiZg?$Lcr95SJ8+Wo$0kr-K9jnW6)6Ia8iHKE#4jYjQlUXsU3M3FKKGyOGfZ1{AGSIV0_>w~?Pz zxYw4yI&nMYAw`6-GwH7AVi#7-uamS$&BuV9O5E-p>)-kq-u)J}bc|^l4?-kYX zupHqfx~Csv7LBB+z`LM7X+O}UK9ZRa}Bw7Y1Jqu#>}6-(b9$LT97c9h*E*RQ#l`Yr>D(Eo3RVj$ZbL zT=gxbwHv}Ty}Tv=6jBqdL|!N@XLC7@6aB`n6KFR=yQMnj*05tr4U6u1LkgrN0F~Mz(n)AxBF2 z*^h=o&DN>GyQi!_Jp@XjsapclI-9QLRk)r!UDAIjEH%=f$zq~KPMrs)^g_k@FgRjM z4*I%6sZ^8aDv6S~BYpQ*O7?Y$h+s}#vv87#L*R`xH(6f%;^cN1q%Jcp9#UN=d*f!_9VwO#(h z1|a*I^T#&pT*>dI=oAho>}RqS#C+6s6vqNdF@qPg{Zf@u7bv`Ybe;fMFjYF$N18p+ zp|PYNy~faDB>~fHHm3RN;#m3YnCZHv$)fi7LrfX-+bzpu`O zq{v^;z9y%7tKFi+hbLf>Q?E&Fcmu}mYAuM|Djt^Ln**~s{wv=UbFP{;TlMc`+A$3I zMW0Mo%`c87t_Z$5l*K+fGr=SnsVr7Pdhx2QaCBFxjy29T#-TW-Z)Z-a){w>@#$Y0c z!D3VFy1zL8Rj7%w+Mg4eN}_ek2$)?FpAjgLVby3NHc0FTN(T@9FpEIfcKFX34wGV(H76hc8y3gST_r2rsb@S=qtb|HIdp$3wmS|G&o?#UzCgtwf5- zGBTJpCB#j#WlLq>WnZQRMW`gQyJXE)Bv~W0VM-w}wiqHyV~zN|-eYt>-~0Laoj>j) z*KywGyq4!{Ij?h$hCce7(H1lRI^nu{k7iCG?N*~-^N93a`jAa9B%t5AzJKl?s5;mY zsiU}wk#wQp7Hk$m_#}3bw(n4Yngnln$<&c4LVegMBmM$^kInpw6bUy@5)=)^kwRe;0iDgA6@`4v#a}$dp6~%X z1|dvPTnOOvQ@RVD_9QFHtN^k>Wl#rQb$8u?9QVBj*M{zri$lJe^` z>o%6|o8%Cp@7J)v0-=GgJN#~Z4O8*97-o3A!W_jt*cBm8agf^#X47i8eJ6FO}V00lPjT$lQf{6YMbm`nu2AbRQJKWnFP zNDea?4zkLx5?8sXTst8!sOajm?|1oL&aJ}q;W(2Xf1j6XtbW&3?Qw_ZOrtY(eS1m& zGuRp5jnn>M;UPss(q>#vOE48YQ`I{U)gFG9%@ZQZh--(NwBU{XYb9IF7CP>Pf1WZQ z&e@CoXZ#G*?NP6O4u=it&9z!b|1;$R;922ap&}*} zyXvV7fUdG=I)0r1XL!y{`nKGBFYfLiG8Mix;?v%bjHF(RYwhVrub=Vywlj_~zAml$ zZ*(e2+Xl=Q;J}^7Cs;AyO9I`c@^i1waB`NQ)B7V*-Yo z<`Wi78aOboPtuW@1wO*VGsez+W+n5P#3l_legx)Mo)4;3hw}%TyYHi7M&L$G$kLV1 zDIlPZYvg*`zQ5ommeUPLQu3LXIjVe7|0sLPp=KbI=NYH_=sSCgjt~@r0HFibNYr>h zxZ(p=j>K)w<%moeP~oYDkr29EaGXO;+@#;7uO+@2lj3pV1ie1 zmQ$wuyILf~AQc0zZO9HF42sU%88}UO!g7i)w5^qA*hmMC!Xq(6ZsCjy+Dd2x3aZ;C z6|ZN6D@v;4;G$ndk&as$s2{Tt>^aBORtAH?tjFJ#jBPlbA4@kpWgb+d%Z9;rayNS) zzHCA7`OqInv&1TiHJ{bY$%hsQQ&@R*R z&9{BGsR@f+U9(iiZ*+4(c$a$N!1W`CXoR3!e62`u1(ns~G2YywFO@|h&_ zcYx5KlegzvzpWP-)DYCgygp24D^IW-wg$IWQaNy9D-C1Nq1psECXs20O&Sm3rh!4J z<7;Tjn<~wE#BKvrlfk`x9!17-B(M}E12;O_3K6#zi7P;@=>9eT;R=ph^ABP!ZkW!? zUWT&2{(*DpPvBfmlEAry#6G`7tCF>Z!L%oSuU7{_a=$RsA0uwCkF2#0)iAJX$I$tq zt@}7g-)o5x3ya#^BzCCr7CVMQ13??4kJu+n==)}UDy@fi-r2}9iI>CR85XhJLg&gM zh|ew+l)iNPCrj(W11sjFNZe-K4;-Pp@}VF|ha~KGVhG`{O2pLmBOFoLS08w{j?^4~ z{s7-h7<+m+ZHgSs^T#jXyd}4}COR-mxVr7+GQz-&{7wR27*8=DO1}qzsO44$boTtH z>*94Wa|lafDB-t^4}PHgd~Kf*F0Ap`gbBYk!$o>FkH8A)JaIRbc+%q~p!#h?b9~m3t3wKqg5T7YmmXK%YIDEKJ%0( zG(XHNp-2378{R=A_(VA`hG0xrBiHj_sHIqX`UCz1Y~3!5<*Ien+z7r?za4HvcD;J! z20Cx)P7ned86$lvch1H9!_OObV+U?0Lbl49F#jA}#n(lndZK5m5e53tlgIQwKh5{x zt^4QP+n^XQkALSTBY8NuM%u>Cg_4-0H?-JM1kY&XLk7?ga`<=8yTo*s4jA^z^rXYo zqCImx$yg5O_^1C7$V{G;SlpyimigPTr|yqp{eB__qGlPFZ(>YKIT6FOZo(@M2r&&! zjMvK(>+~!P^1NH|1LpdlEed*U`DS`O?m@=JD-F{vN!N_QgWG};T9LrCRP%=m5CylN4V)MWI2CI z5RfbAzOPfx9x?&01IN^5vz05Vfpa`Bz{-lS5T|ds zY=+6EkN@F~ z={-J1%CW>$OlmFz_Bgy~*=iOS@PA;380fvG?>Z=Q=lW?%b3VLxjF67dy~qtNU=%Q| z1;!8fh&=B;YYl^g9#;GHWb*~Nncr=GY+qkLRIYD5vM;?h_k(F{1@%v^4(S3Wh~GZ! z;Yoj~1y;sy&0V*%@d_6+A^laag!=`k{a4{Sg08v!0$`kA6B4%1;%Lq-kJxl@b}zJ^ z%;T#60D3*HD?jLNvCEh!Nl(F7ML>|gHYBn7IT~IpXi0pUkEMnANcYwYE{~kRO8kus zJ~+Gk@&+`TBl0XKvxw(T&h+I1Mu*r{PuPN3^i%qWFX*Q447zngVBS5vKJOii&;1A~ zLA6S^2NMpJ8zifu=sMZ`rdMNHN~EX1etTDg8$jq&z2_6#s&-#17)PWR6^ zAne}MVA&2;h7hcfxU$dhA%_JHkY#4PBpRDkuZ; zThfCbOKuJj$dv3!yfT6J@qt`J?s672mmQJEr>`LCta^hs}Vn=qXTeqLH@B_*EB06t+8#VMxkWOLZ28ivd z)yQHPGg#F3!nP%8_{niiieemOjo+T*9LLd(z-y7ycS1E(s^5jz<_>{_Rj6U><<8Y; z!)jJ${lv;v*63UO?&Y$2BX0k6f7p*ZvJt_nKyo*!hc3I92H@>5=G>1@=aKk(({?5N z8S%C!j)qRQo3-C*nt0~Wdb3Ai1I4rKP2n#MYh!HH4!U}7Ep4;iDZ2$Ssli502SGir z>sjuXZHoOd(c!_xBlkBmx>TZYaAi6cznilv+!FLtnjSM2HayYdWg-rMh0YK6zMJV0 zE7_1(Y5BT-?vX_UZ_~kC?@PlhvnweRT=XZOi(0$SF(QPgwSSk5T}ZB#8a?>fUmVa3 z+H5X3(4r6h(pi(POT(cUe(Pl?u6<|2XT4JlM3&OE^>8&Jk@KCW(jpbz=5pGT+i-rx3&g$c`ej|iTPlZp-gyl_^UG9ZB<$6}?FQvWV1-rRHx(wW=r zmp}UUD4*~V7$AAkv8?lpuf1Y{YaK~bf5dXOR>0lvg zBDm$vll~1o+kA>qYsg9GBrez^LP=JXO;UMda0`^;op-A4PIvS>w8s2O(#-7$s@(EU zC~+y6dZJE6mGNtQ!RFYGl-Ydv0T8n%tZntgj%iPym7<&(bX%rR3LcF;1Oun%Iw?0Yk#XWyRQbDDf?3k^pOnHNg?zB&%;WP40K7mUKlvHnRLnSfIsr`yj`T zkgrFyW8#+hpT(>4x^OP#>8+*uHoEW`9;}+7gD+u6YTB*|mJ7BQexgCnfe>bH^Zn#6 z1tM>QW>Nn&FP(+DuE~WL_;SLBFkn7wL#jRsmc&O zEh0%)Ak$vXN)`KlDmmDUx|OO6xcWXry|XRvaTNF7ZJ+rVx)tbhoV5P}^ zORs%_$_EEIvxSXoMbcDL>Ul&zc*1@CgGH02-3MgV*X-cAv*4bb3632>_*K_Pz0exg zt-R<20+-`w;xjmCNMtF2E!T7Fh}4O4k4o4>1u~o!wrIZ-?y^B8ePAhltXeWR2NOn5 zDIol!n;OCzj`@ji%!livl`m`~V|(x}AMXr!`p3=JH64J%oycsjpsF?$Ef7abE!!QY z;YDH&&i&e!)Px#<1nhQ-JdrJ72b3mj1mB_53t#VExmTM84Zg%(Swr-=@ zeO>LY7(aEoJ>ZulFNA1lR=>jru)!7Ecnyi_d9kYprm{j5khAXhfp9e|pJ3}oy9@}^&#Hg7U z5~Ke6ioz&<1#8^!SPALRknFaMnf>}Z-Lb(f?Is6Oe}8nF4N8!9Ro37-)P#_)kyh`}m|EOYS1+}%=6M9fG;puwjs2&YKO^0PANPwb z8V`~s5XY1BT*3wLg&mK{O?d9#k2Kal#fklt<1$}LJMQvbauMuH`rd{Kyf3SB2|Gn< zMpl=YPTy=KwwwycXAQUvWg))P$EjtqnnG%cz5|-zxFD_&ZR;$sB945O z0mJ#XdSwu4jfHY436TI0!m6I-4HrV}-eVBiuP0c{Twem^XWUR)brXD&kDt+@3w}dh_LPiKL1Apb{Z+=&?A~!-TL&xjAIE(fKDD!_r;BkOKDb5 zEL!8{;arhmSQ`9)#QO?YUJibogCIcD0}@>i(6?xDi-@b6|14DR2;MBYa@ow5|EE4V z(9V(^Nc#9}QJ5lyH2l3B2_o@-KZN_B)320!l7%US1zLTNl;xm`y|JDqVICD8<&*mw zzQDokla;J>U6?$GK`zpRmK+{R8*hVEBSP?pUF7jOAD>M;au2&MZyRc{#bUvfPTee+D?>U95-q#6TWhU+QG7|L%AeAEHvxo63rSV!N6B^r~^@9B7b#g92q@}17aiGcb3fphnMoJn>tJJ0JE0?zac zQDe%&gWRk3wHQKF>U?3ucK?EHNRt1uIY|2ak(vOt>fMN}6(=iV5_8FTDHh@(ZkIh8 z0+ae=IVT17L^v``zU&w4`PkFSg~@|nriBb{YM6w8OM_aLrvWKel2T@-u@9+7;&$`6 z<`}!i6j0dyW~)q}M*}?3`a)dhR;Y$OS~)QCfk5-|UD0AI92xeG^glZ-J%6(xOMztk z)*mf)0#4+<8Ep%<6;aw0GGHSCYO5ryV^r!Pt{pp0&Z zyn>DyCmxvt`j2a%g<}Q6_CKZ!I%G%rC+|MU^fc0L+!tmO$CckO16+VY%R3NlMmfKeu!6SqrdFu5ayDxov>DE^|EuDZwyC0+6~VdJ1!_l zx>a^QG+cv}JrF^)4RNi7z4wc*Inzj0ajk`R$b!sCG8B(w78;5zcE%j&n$Y+_47_b- zs08uo!ODi~HkXL4bT~tTm1BnjoD4?T$(CmrkUJ;hCO5_ME*VnGEYRanNu`S)&%#MJ zJv_C`%tyj4RNNk&kf5sV&)@ruvOC}j)rwQcgG7ef)*$0&{&8=XdDDmpejY)__Pd9d z`a*6&ku*JVR$2JzHuLsCtdsqI>U-v@Vfywn;vW&LgJnl0VtOci!1Ma2vbI-wl=zff zS}F-CmtQKynm^>%f%4iDvlQ%5+%J)M6EHO2Hss0sg=kbXADu)<-b2wiwmvA_s@g+W zim#6-M?2*hw+3nWq(K&4882#bqt%5HuU{L-fq%t;n1sxf)gG9Z%l?3NK4WN(fxT{G@VVf6@~fr#_`ryS|M zF$bF_zVWr*G5Z?2dBKNR9P_0EF&;C4??NMvg)~f7Unqc$_S#XEe(y{)pJ3a&UyX8R zgiR2E}uQSz-QTPAKW?iYtLVB_^fum z8@9O?Ws7$&fWqRN=*o0B&K#^>pGzvd1X3l0it=NZpEy>$yGEJ5N1~2bT9Imt+4;5& z6(zopwYf{2w3W1dI1l;d9xc~(iXJGFv)+Rl30f9MWc*mBR>w}6=GefAkh4&Vut8LB zpZfS)x6_9-BQ|cjz=U*<$M@0p?m=jRYLd)wGLHJ)oW%e-;X(MsN3EcW%JSRkfOH2Q#K#D zluLRN-oLs`((Go|x7H#fFO>mb(ALBa!oHtF(x1IAt8LSL(6`cIs4k?Y-a1nH1%2(4 zD0~aMqn7o2HBOTRxFBnm+N%v>Tn-!%2bjW&ZO8@g|ma|8j4kH!(Lg$SXIyzErCTRLgWpOU_EoN{BXe@ z2@7XQgmj`Hb6m5hZ1DZ7-0Sf{b#amq(pRZ9?y8A?M4dqav3mA?r*Y$Ofhb0Q*z>i43fR3NE_G4MZqn$|G^-u$Nx>V zBriO*xg6#V-a*4WJO-EWspFScL)pOB4B-tO z^JJ^zX&7Set%hsn&E1D>w?D2Lb!t)!snNK=j3k|)JcRSVxNzpm8X6?j=8n%?NuGsQ zRtrBOnF-`FthU3PKoZLaGtAb{b9BFAu;su#|7_%tX!@(&To+f zMMcFicr0ljKRXBqLK2v_prpSn+K(jI=fb5>F;VGVDW!v+Gx)H+ED7U`Lw5H}BI@U4 zUf79x=IurVb}kp7AkVhS#R%00Dc4UNhK-&uHGy4A@zvK{EXN<+E4pJAG7j`9sLmv|pWv)AXESGRBf5@JIQ`Rw&R*KDd!>DAB@fnZvfodwe3#A^ zQ|gUDT5hR|^IP4=W}NXWtbb_V5gmm1;FO&Bqq-N$kEBcmiVc*T@C{86aY)OEmftqN z+p1je$ssMnqB)!-7HxRl5jOjBq_O^yl)=Qf&YMe@you~eMOM9){8 zQq~%Tw84t|Tb|hX5%bQ*hs@g=K-x~)uTyGz*^$XMsF`ex)d;QDJhFuh`=hok;&{o1 z8gwQ&zTT z@`BPpOW4Q45*OveOsMj2M0Ef(PN*HmI|NWLg!xA^-wTy^gcs%7B-bEEXH?i|WQizq z=s&g#nZ;O*y(&0>*!tM}@w*8)|F+QK2@-GG*WcQasIYLavOjSz^6c2n0j_CfL21*&^xY zLH6AA-R}-!`=yem0^lKv%9MtfZxCMGSnuF;K7$<@KWTNaB?xC+J6EemG|r>kdDndz zKJ!baUQvT0TVKnGtXx=juFy2-!EIIuI&Kj+%yd91}A{xu{uk&;9#Vkkmab^{5!BR9=s zV!tgYd(nX+Vq93p45CtCg>QB~tye ziI;a`{4jLp8;_fn4)at}_KK)E)>SvrrInX%oB?9F_d$!hNv-EdvPw20rEbM}CpItD z7ts}GK*}F|;YUB>E29gf(?km2;zryg4cHY70UytKC2X$<q<7(Row3n&*hnA%Pi^2uk*^>i4fXZ*JD$(^33!r8HkHqw3I19vVZuzmkHvS z2wFAb6PEDn!)^D4OWK29x^(o_@r-yCzaI;m+ys)M1Ye{xn$FO1n}AIX$#GJ5p;A80 z&ZT4_1P9;XzE+${$=%V+dye%BQg){CS-|!VWs9&rTwZoqp9h!nZ}CsV+5R^U=|Rro z+1`MjRIRwX<9INto?C4Azu$?+qGUP%aS1dpTM+S%o>#`I#LM3nr7LB}A1!T6tq%{* zdzdoWZq==}#P5HOCjmq3^iG5CKqEHyB_K~)#ou)f=fT$@TN6rP6k0@9l7eI8x*yr!kpnWKtyY< z2uPCrE4*Gh16-AyLi^5S8$N92EmE}w+yUu}OCvtUk+nD_pW;5r_-m7O6T_PLwLY3Y zIqwBLLPdl>Hr=#9k~S|JBP1!^Ii*~mzvP1pd$Za$@g^NsyyxtQQ%$>a;j>!e>_aGw4QW!&Bage7&VO zQxMFGzM~m6)YFmhR`ZQAm0gfmc=6-W@@!nFNqNkONhNr>oSCeh+ea3aUc36`pY$Cc z$+0l<(MX+l)+QQrdy6UcC$b^@h3>w-IqX#=(b{GYk31LxHYov9^fn_zyUe#2HhvFz zcH$++EJ7;G)^hlj@bz0g&w#IFui55$m(K`a8m_WUo0+IG(A&2aq{{U5$Mz+OPc}$$cuhE&iY47)FLa=S0?2B4>-t~As%wckz*~feb2lY#z02b;R&y5 zyG`P?$uLFkRIOUAF@M|dMY!O+TD2x7!qbFhy8tf&kW=fw zh#SQ`BbL(ssa}@i8|7KkZ30M%D8z@rGz%8xUxH_GeOxR|KMvc75-hT8tQ%({kiZgp z5|gw?beNcr_L2?UQ^9gZ z_|=m1WWP3%tOq?6=pF;|@p53ZM|5hvj=tbPePzZvxP+B5P~s%`^;j(@#XD2KS9MwL zNrN3eTuj3lWb$*6RuCm#oEeUb3Vu^)GV`)}4yDvTWa;W>F9e#dRVj>+4p@zOUq9^i zW`1ExwEA4oE~T*)50Ily%~E!f%MvR=ZDkG>_ZZIgZVPx^cC%h5DM95B#7n$AA~=Uk zLFt_h!;6|wlageFe%rO(`nSIjzl21{l7ZV4TazoWeV*650ue>xgJnnq)De@L zjSYHnw{dp|(|4k4kbBej2#mUJ zZncIZ`G~ybkdM8U5UZ4glkt;ucBDMME01BSrQQX4w*`mbE+<#8bf@_sNfAF6E3HVW z8xeyVncpwp*L)OIci=__i;G8hV36mD^ZXc=A3AsoV{t};=)jK%()l~&Q|-md1?2Y9 z))uo2#>tq9B^lN(-HCRgMx!?&55D>bDjd$7S0fE^d@kK*xBV3&4yfdUv<`sr{)Yql_65C5M;AI|$gM{^{JjJRJ<* zFQ!8p30=!x%@cn&rE_3rLzuo}NZQw^gUvp68?j>R=Sr-RNt3wJkxT|;CQjgXjYbC9 z8V4Inf#0`(YQ6RpdzXv}|ROX^mki*MMSAa@|}f zxG%`4Ze%U~(_ubjUCnX4u8!wFcj}~`OrlB=Ac6-XRfTR zi2o#MJyOI-@@t}drEP^i1vr^z`3)JYgnG8dlsszDCHuG3QfuH&QWFPHv2xVW;;s(< z7B>vB!g&j9D!ij6_mwH#pI*{9qQJiuL(nyPRj@{0S`+FOq4{ujqeK%#iI!WKC=ryr z_Xu_VHD~Xv$%3EKjYH~V9}j*~@*L|aJWlZiULLwQ_Uo6yCStN?M)SpA<$!vmBckB! zn_mJ?_*jwXW{Hz93Rb2Ye6SG`hv@LHRYq#C8bSR0Ie z5-aljs1A?hk@8ka=IyWMo4Aj$9y&H$5n_?J;U9xF7|7$d-`y!;%vfp0VJQ;)V z1t*@D#0=I7Ds`$oU(>)kl#^OP5X-nS;F~6scek)r>c>IbW*H}p7qaD#S{S|Ijc5)S zP05l_Nw&{F$i_c{kjg$)iO9T*|GMz1Dz)TG*ISern)W}7^p?&WQ#8y4?HQ&Y&oCf% zQ(|GB8F|dl^@Wgbg`aUl)CDoQBd1!UniDE1S&UUxk5p_-s(&8Y5?y?j2c@XT9V=Li2gP7J+Huq_Ej^j^&&JgDo!i)kAsXK@e>};BRz5F#_dPa~M6|W2Tx+qVm zV_YNFD^}h3s-g`soEjhxVYVzVYT#IqXW3bJAqPTBx48Oh5xM8?jWV}_ltD-n!9F`+jckK*AJu*(!VYc)h5h;h{owiOD(i~o~2%(D& zB7KjfED7?fnx(T0NyUo-O?NHcBBWt?U-mQe<&V0^S*@f`<;()!74^wv;kE&(Ns~UR z;*2fE73fv<>YrIw?JaruQ(Q}v`w}u5!T(jFH@ebc2}tK+-Afyv>Okpmdv-k$1`ST9*L;7I3t$aS3RW_HR-^Ey7>!4ps_*xuqyI9 z;#!Exgy7s6F~mzbT{!5_<5AC$%7zH78Q~c3I=|VuA8(DTq-Xes_cN1xR}g0~xMSAw zfl5V{66e(MX;+`jBgpkg*@JtJlo!}1|FsWq(EfZ@8MGg1-`Z8^8q+jWxjvT_JfQv0 zA>TkUC}AVxLqYF)(r$(Q_C-eSRnd+!TULIO>(#u3491&-rq)+}V5?*XOIpc!#Up|4#r z$lYc?F9%EepW-EB&oU*=d=yDLo)P;^Xb-Ec?P(c~+(d@S({YkN0L&S2iCi$x->#dS zEsaTfkePh?v%O*Gr}=rX0Qss6)WNqtmi!|PuvOx1qXf~vqmF*r&7ez`{t*2t4_NdZ zL$1elrQdn6}I_pVaZ4wo8QCX>C@LB8Dk z2YKOSFX-$@CdtMeu5n41cKD^X3;A^v)TzUB^dS3{Bywz#Ie$wyCTX*9O@qwUN6vs! z*I|YgW{Of{pM;KhNsT>RHhwf2Oe#y9K{Z6qFO*W}atYyE5L6C`Eg$VJ_UHQU&HwS_ zjLiAz@siC!vBo||W#I>Y?2;e(>cU9SxGB_qT~KA?K{53zpZ^a@5B1frFr7j#zlL#_I$ZQAoLe65Lf@pL>n^;1uKmUf@rt zYuI0w*K!d2u2pVwmNiN4%=MDV`9(F$1eGk*(nxuru51!E%xDHW+h z-a&E8aUC8pb=q8`#2H8ruUu`?=sU9IL9x~b7Nx|8Eb|kn9~1)?6@MCfx(=0`f&ijv zI+($+$;Y4nmv4^rl(s>jSNN;`_Wicj4Y;n-=-c+8a*E1PzxFa8_$9``#{&8kAu6B& zZnq?wZQQ%{EVf(q#=zkj%8?<;ImRQzFm6bOgeK^QR=2+N>hmdVft$X4+2vZ9XuP+} zc)LC4>FuLiGTcFYLJeiEtJyFl9;Hg33{ap*EoYwM!HN-2WWBw>gI)TIjoA3{AQy40 zVXiqL&y#Oj@k;jZP1p}Jj%8zwdwZh?b(p0jR@mO{ho(Fy9OHtt6p0SpF|5B2-s*Ee zanU2LOzB=-?{p$t8)Fh{#f^c0UoWa2-Gdq|JNam_&`;_zIR&ZU?B*w8O?l5mJh`-} zxZsKAR-nleZVC~K8zR9fzwxdhpS|R?F}zvoJ@I*zr(%;mQg-CGuaGGDWnUTSclD6GzV0yl1 zQWjjPzqYSwfVav5e#X4@u(LeGG>+pn+q+w`O?XXgbUIy69Qw*U(R_HgHu&OdL!S|n zqV9yxY`_6HlkBoIS#kGT;naS!PGKkY&U6N;^MA->^|Qe)bnH9z><* z>?U$`M!VV8)%_(H;A$g@2T(`tS1YJYXywh;Uth_@;`Czfr0!K&SGkn!lT))5Ei$@CC&B`V0-m z;7F{Z$>id)_moAoP&G7zP7I5#pD^AG>8)2+b|VSm6;Y{FqNff|vj?7i)M>bMV}e{m zlLF#{0x#N>lsWnme z(*8!QgE00iHmUgxb>Z{Wo93b!=)hN4W^uXV*KCVYg_GZ7F>aDdwlDGBRtefS2E)-P zkF*N{7E^6-kJCYtW*($$RnH6mXdI>Ql@Q55ETwzWmh$=?#~~HbK8Pmx9)q;j?9#)w zAy+-C(kP-K%%M^$|l=+o-o1BCzsBef}>??oM7QE)0+GC zf?x$i^rIO3=XBh;`M@ZvHC?pNuvnv+AlLpG8N~MZXt`_-K-~AO<*I^`I%(QxBp+M( z>}%Kz!Atq@u#FDFvNr+I=eXYGW)o?E?jNfIa7WSX6}ZY)|SFD>b?$k0|> z)+Z&jc!rHQdefyAViCl^`Sio;N%A*r{oHe*GRkEYV(FvUAkCWAtDb6?;O*&Yy|pa@ z%-*xaQX>#Qe=CXJ=kLwp2}>0Al`R0XHpt}u6LsjwyGh9~j1B>$F6_6Z?+(8kr`89+CV!M;AXYl#7QRP#KzqKY;8-LksYvPhG;>vM?M#Aef>a*xAUpy`&OBw4>TPz$ zlC16^fe4|I2-*mmKVVa^Mq{>xx0s*0ruDj;9`@b*!qaTAeI(}y94)1wP@p3O~1dkUXdspy9X-m z-zYxhfvo@0&iS1j-gz+-VohrYrr#suXX$>kfzlf%Cnh+JwSs#{>Ajk%GZ8(~*ewCE zN#zjN3Pi{*N*wLPiH53~c@lysoXy<^dkCVhvLswE`VCVwPu zt$J@wr4C>lF*mUwTqOA)V+?J)HBNy;Ma;r|XjLW3#7>2p2GjYnHR z0IpAO80KDI4Q^`Ll6BiK)Te@6+FSXhF-c_G7tL6_sdrJ%($1d7idQ%QZp*yp|*p9Hyp3kI#-o~M2c zLSBBC)*fUqc#2Ry>7>JxEmiKivBkIelZy@4_wKR)AeGt*soG*#r3du`MndgFk3|SQ z>)-xKBUe|$rJ_v5j*RiRpQ7ck@$(2&1|#+*MFY=g$z7mP8|_=(H2<25EmAhl$;?|E zDPpgY1K=1&vPFKq!|!X&Meo9#Y-w+Q1u$7@jx{;ZSU+ho%|N=(O2KDIPM>9yA=@Y)~Dt&}Lk*sMkd)_Oq{C$1n!G0C0Rkv(zkLsgv^TgTC+b#D0S8 zkCUM5pubg7BIO+^Wi9a;>anAZAo(qH_dDS^x#F0q4*Lyw_9fdjK1m;yM|xYR&ZU^q zAK^0ZFoU@%xJ5|Qf*<(6o4-d=h}s3NdimzXx4Rcy$`veg4Ws^ireT z^yIESjvU@Xv-s|BT$JByDZkKJAg-ijI2mIPB0laJuKj{?nQQdRSJ!&-qlqp4KMdz% zM}Lj^T}cc#HG~87w7`${SO4`N%YZD2u)ZGNNW}8Jj$n!)Ke}UTI6>cD|F7;z99d61 zxR~G&@ekO{f?t1Ksag1USK>3=Sq5UAMkM@#Xt%=pfA=C7g8Esup!mDX$dxyZNDxAoBm~c@lb}l@SQyc6+~KrcMzh{%D&aV`x2T(P9da>J}3cDLnA^{T{I{1zZ$9V7BJ8n z-ExOi*=lRy*M_}nA>%A2f;pNYhC&+vpM7X9@_)4?RqkDgyWrp;|#uK`RN9R$Wa>6o6B^g*YM1*bguW z0g}~<5&S;`!VhhafQZrq7hp`-2!T>L$b9{~&+^X-;1h><;LfrU>)z88Np=PH|J}bT zSH0H*Mhkr&Y@UV=T6lFeGP(7mLhn)r>H(4H|F7pWU{Z~T2h29~A*5ar#BL?}`}gOG zMz}L5?K@Uzzk1Icnl`bbw5L^tkwjLd(=!^KngJZp9W}^zuO@Z25Qjip095f*#AG)m4xLhb66w^P!O25L`ybWjg&>b)u@YiPi!? z&(rv+uqr=~>G1S<;Iu&Q)oF6SZ&kTZgAOPz3qno@DhGjI|FstMe-K51o`P7K*nNQs zEK`bkRZ87JMQ_>AxCX2yO9_y4a#{v4C8!cuW~tGtI+7l<2SZ3nI|b7rM6I};|DJ94 z2dHj*RilO)_%akn;?P3X`=|}eT@@a&;Ef)~zXvG$ z%MS}=C8@6Z`P(Tp(Kkm;2wEipFSc%=yLta!SrwMHdj-cpEE{PSYEb@M4#;zOrg_H9gE_kP{Cb&uD5TYu>KC6?yfruSdo;c6qKj7=8UpJP&#vH8*H zRWE#x)lt`78LKth)pgVe4fpA-N=>HU?jpk15JZ|nt055eNUl|UhJ?N89y*QX8;}%h zRNGA7-d@FQk8gmRI#awP93)YB)4sf#yji>v8-45fh^Z=LI(b-`W?b)YUd3#8mrQR! zW;J#<900=NL@S!fJ#sp_BZ6u0|>Y{_#Irm2UsW;{LdIUo$XLStdtX z6PN#>1+{knqm6HA2*4;33%0C7mo3GsLJxKS-E}rAu2)BX<`i<SL}*YeLw5!xLOs1;>u zGKk6*cB9G&_yeN6zS{rpUBxTS%QA^r=SRWsi6#0RHA%E^>%V(1MQWFcQR0F5^Xp$M zot+SV;VUd zSNr-AbmfJxa&zGqiz%p*zK>r;Ns0Z|Z%glElK4Rb5k%tZYTcOMg6fs%G9>YXrdXij zr^m3G_^m}ns#zSP6{&(!39hC8->z8IPw9T6w}@8g(ZOF&(OF6atonIW^U=?XhtlSG zYqgF6uf^=&d1^t&Kl9wM+C1NVcu;&7vod7pYC|%=79&TxWHS+TIFpB)P;Ve#dNp0X zybpyuV>aUVSvbRj=7*$I8ke$)P=fuBMx`l!5)&bkrgwS=R-Ni68kx``Ptb;3 zwdcVIZ6CwNoDGPM^-lPr>Coy}D#a~d8|zFK!)QIy9{d$g<9?^X|Hcxc=L%B|Xxukl zMLkVVxBuLC3CZ{f2>BPPAyTX0zSLTjq=rD!&nQWe#K)_Xw0ec41}h}(j#!PP&5iW_5^_e5;tT=#01vhK)r)QX7<{#HQ8oAv!6p*GS&s zK9bSCvi#Y&)kON%8!_^IYW4=?^YW;bhN}X3)v<3YMo+6~g;nG3HmiiRX+2hiF%L5W z)|7pj2JcowKK1&FGMmjvmOHzVKk7Rn6|15t)3=RX52z|^L{&=~5%N}_E}_;rIPcw@ z2K2=RB^FLZfQ!|k~QB4%8x-X5o2URnx@hwB!kX*D5iE#QuhNe~El5oj6*iOFq z6rz5LX4UcGtFbru8@M%|DNI$V058h|M%llLE@<6K$CR5yBGGX)u=c1}-TdB`NcYfg zQ~+{Oz@(o>HO!-8HC^cQ;wLGHW297RDm#8)HC0f#uiatn!%sq;D)|-6?!L5|)ZBgr zH2o$+qv^ZVXv+6;Y2BbvHhlcgfc>iu*w;g2)AEqvtC49uqJQI8XJavpl>VPF>sM3S z*<1gwy=#xFG5h*YF1ZzrUy~@wxD{PQ)Q~#l7BNkTB$8W_O3_`O8Ka0qrJ7QmL7|dd zlae#jBnlmq+`1jp)wOh_j*@rn{hY|yd)`0Z&-?kjpU->tKTgg*`|@3Deb?H1uV+6A z@^34(pl(?06s#y=lPT~wMW_mU#_ZWC^?*&)ol5Mr?5wS$iXIxT8hmAl_a)`lK7IZQ zKhaynAHE$$*4{9=32F8ScKNTi(dcc!gF8N?BI=( zk_L(G&sonOQj|$B5c+l6GbeW+qTZ%yH1S;4$)BjB0LM7LpGbS2_21NWn!#@k9?Z;h zJM8uyip_EoXd0?(0Q+9!;nwMT49^RiBp>zY>BpRDKE$=>5=3iHaWb?wt-7ndjYka^ z9Va^zK@ggFeDL{{KDg_;qYGf9gpx5-9&)cY>mX`!d0z!qK^(HP`cHR zcD!<@7@Xb8XK-l)-kr)AeiDs>P;w5yWFDwtNXphx{H&-q>bJ5T-mropeD@ZyGNH6x zQZ_d;%al}}k`p?wLmtB+n`Va{fB5m-VjE{EvNrxOdrLg~^d%HG6L=dQ``Dz@9Ez4; z>?^8u_0$e3>L^D`ma7hM1NAE0YAy|`s+EHp7K!6e%L)DV2mA-cIu1sQDGcAXGURFU zM9{i1$Gf!7^-drj56aK{*BG~|t=k@CI#7v+oZ67IBG5H7e= z7;>f`MY#4+jt%vjFvfR>;ssu|H{ODTU;U#9mjQ6Vi@fDKbyEHC&hN|BKaeeozdu2t z0fA-#Z*hSB&1jHa9h*P#cJWEv>@|J3yc5~W%V3I4^OsmFM09lg){D1Ew>fk@RAoOr zv-rn2rGIXp8S-0VVp)LOeWu}}g>9)P24WXY*Um&cs@Bw-=MI>{Oxr5us+SKbcFT)P zQcV|ot^wNMYt7xTtQf1@hiRK*=k_L!hJVCb;dnP+y4BSlvWSOd1s+_=Er+E>^FRvtm2p?lSRy!y#&{cfx$DVAWD!dN^ zEvA@(#dN-&H!|P-*Tg}RyQPe1oN4s3op^g$J()r^?7Bmash?t?%YkIC?xcQKhTC9 zhlj0lL(Du%gNtr41&P||E+OMvuusP+eD7p=M(yn<2p7-j2i<&K*hLEs6hcaUDvu0f zTzqqWhd-&)Qu={hamNY>U+u1 z`gS;Zhll-(QW9>Vu@=zuwyDfo!ASCJ&Gh9xSzWudkGC<1w1ufC|Fc>RAKGQj2dn7T zTpW&kT4duI;7@ZP#Wj#i4H=w5iU#-`RL)xzI2IYdL&03+Cwcy~C_Zp}{=}QbTXQE& zfg5F7;f+vS)3n{$wsfRi#{)isNzAZK`-m^fTKOl%o|{6I^Cp@_1Z;wF&+D={UN&7_ znxd8ScAOhloNWcox398kNkYN6(3GYw%6a@1q4kIsdm4lzOrG4L3}yQY+R{$%U9mG*og1~`rqhYFs~H+Uu4=Axv5lFxdIwFE8+S$in) zMr=0pUXTlst6s(f(?1CQX6iuN0V5qgi!+d9Bx9`CP z_GLQ$x{~(%SHx+8s(9+^Jz!qQS}PE5ly`bx#-yzNe2PAbf|=Ztl#ndoF!+c1u~k}C zd%k0u?1B*GGiHwDS+jTjCmqeqF@nd@Q*+>YUVO|Vnn#hB&{jLB;Asv-?t-}{HEP&9 zbR3|-Kdttbdu20{ROqlS@Y{w`DE4B)QxoRVzA$Nw&jjXEsQXNWW5bX&bH$ZfJs7*p zn?`sa)n_2B*;xQyz|gzd*1Z@nXLvCRUJi@Cr?N479KyES#S^P`I>r|9anS~eLteP!feJU&5YK>J2TG>yPdtd>eum%`ytbMNa;d6$mo+vf?d48K&Qae z^h1q)R*{ii)x{N|FYX-KeWt@?V2D19WBT&m%)viC))e2w%btN?nSMv2Zga(1 zx9?9G-7$i9izRF2G}OCUslua=>ssL*rW!av>YxZ^yETe2ceysX`9`~4&aJ&!>9MMD z_2#x0^{*zG6=mxksFUNVndp3UDNgjRw??zNew?i8!*^soyr;MnF{0Q5PI% zq%98^`Y2AK%RkfH+6lo!k-z%g>F~VL$_-9Dl+xj&NY<1L`}!B{m4eFpfTA~tpS_Qc zZp=x0?*G_WMdI)KePpZ8Ly6u}IoPL(Haoi*o8Ri{6Pt&9i$?d)(r{EcFSsM;d(o~x zg99!!mc!!G^ZM?W&+6-*IaD^MLoUZQr~S zu7=lD-uGsH2vkYmUusp^tZ^M>F}1~oNjz&pF(Hj5J5+vzwL)k9(K&CjK!ZMbJLt!y zSJ^_0vu~yNLuGMs?fa-TYlhrtX4rZ=9kb~*#lfE1Ff?`%`B;iKZs+Er5=Faj-!nW+>~h)46=zth^WS|db%l>dbh zJMCdREoI2AV$*|v>)M~KDma`oGuQJ7!|F<6%&u45M2)#vQ=mVk|GUl6A!}}wrw6_r zlBB9}eN|2NuI4qm*AvVn#U^t6?)#kU|?OEsa>o#e-w%UyfDEckCb8_HLNz0z9 zsf=iEhnsEbe^6w>E+Vgbd422`2>Vy5oyZRF4VVADR>0{dWwd<>c*V3%vm(~t%M>i_ z4G%$ofld%^;|)m%JF+hj>0?TwOL0WCDk01jP@4`&W?!VR|YfGa{>ZH5@w4HPDlW zlNnnn9+Y9O0N>!9_KX4KIt`J^dA#26ctzoNRd6{0?9MHx%wP(h!30=};QbjUo8luO zWc)nMm*M1TUadn%xot&VojxRY3gg*clF>!@>Eu1}wq5UD*PS{ydO%*&_UWuw*3s9QTI<^JhbOKn>C%P18u>*{h2rOR7rOV?188Ck^f_)@Zl!T9&CGpgjm4oX59zh7K_ z&JuX=PF_Jf8t1(UXf?k@s@BUp+9L}G@G;ZKP8ND?@mFu0g{+bdLcsA>Q9IfVTiiEf z#Q43DUMzjNu&@@dD>VC)!MMnWy(@OXnS$(agvVam4iY0Ue04(&OcMtc^C=Se8A;%j zaE{belxSH_4ExM?epWdKb~c%)SmD(Dz9#{WBVDrEOOe369iT#1zg~ze!Vep1jwjC) zW>>Q9R}b4;@PV04;j@M))Yd*AZfwK6?PNhg3kMsw<;=v^)SFyqZ#qJ@|Is8VaqE)l zjJF*L%CdZe6f+@3P|#(8_>f1|VI(FeN=Di-xF&#M_xVY++lh{UwQ-k~uOuJ5vKdnR z|FfeDw2L>%`gj-#VB*X{+R6Mt6E@ARFkii8EObb3i zM&Z2K4=lh`W|t?ea$OHUpd@JVT(AHHemMis=$Sy;mL4Oje)-7+Xw}%8=sZ$%Q_os& z0&cFefD9Upte`F2BMW6*!&`!{o6MMT8g>8A!ZQX}pjHKkxr}K31MO&R&8%5FT!la= zKR*XH63zFj*2N6oWK;Xu%p9fCeQ0ezK4uho++PSIm;229OeHO2j1d&S>hsnH_o`0Go%!xLvJRf!5^0pGevll} ze>V+M6^{D~vX1uMa3Q7`R(c}lLA5QYIOR&<)25`dIHL+ooJt8Oc`z94&P>zF4 zDXC1yo+0>-Jdnok0hDbX2+NARs^h`)U<;yEn_YrrJ3(iOUKQ4XHWLbTb+cg_s~b#e z&*LCL#EFO;OZ3bpbO$~9?Bt_MQUwGHzgiIzu#H{?)9 zGaRMI2mZY+2OI)tY1Ep(f=!yB=vxjwMZ#87Yv|7{wb#lWOEN@q7ym|*lcWZ}UrKJqX~h8auo$|GSB_bY_3 z9c#__pL9M(gJS0QkSaJCRXX6UI)|%Nas2qQ&)X;n{*_VRB@}(u^6SSivEFb150xC= zlN(dY2kT&y<4>+~-GCa4F{O+Roq_W0y}9W=C!dJ2T$dL|2su7GMN9Ny_%>U5;wM>J zDEKNP;$CDMmWlV0_@)%2_#2FZ-T3l`U9!~7?o&K{DXb+X!**pX2M6DS(jXrQia)en z?i%NTTBvK$`+t|?C%1W&Lq(;TU|c*OXbCT*ndcN&(HFUjU%a+rDQQx;WHwp^YNls* zImT@i?fn^ykUjLSr{8Y0Mq|#AtHC$PPSOs?Qp7C;Ra7eU%fUy^i~+^;e*O=r*dJ}J z>}sje^DOBx)FtrRE&*_*9i#b6g-<+Z{Eq_BgNa`iwy*`kk^!N`Fd^cdy;O(i?(%V{ z(9Qxya7u|}N9OzxW@9yl)3{KI#=Syw3K^IC=m8wwwYy!e^CD#z@zM+M z5vTqNWsp7 zcSDY>CIwc;A}s~HVcw)imLGW2=t)@f1lbh*w1KrEyX%~HAESP~)~W{G+35dh zl;^n7Q4^CKEW9*_Z=Pz`%cy~uF{LV$JvM-*l>AbJ^0E@iR-ODxwk*Qe|`D! z05HRT7%i(%+G?^Xo|E0OeYoORBl^(;e8RUquvIM`M|?hjH~SWhI-l!F_zShC@mMUp z3CCevUzfgG+2F@wqBpOuEJxq+ChR8G0~J^+?dmAGt8c~tXdQ9fyi&>X(mWOlt-E>! z4#qG#%png7b+)R9pKolm_i{9-jHusXs$ZAc9_3pvK(DUfadR9p8^>5*R}^(h$F4}0 z+c68!lD3Gjvoi)@c?kF4BX-y;Vig2)TcX+333atKm=gR9u6*4e{-4+392WM&>`66v z6CXn#%Rg(K#?JnN$_n+9=U#48O;~u&D0_9tj5(VpUHL_%N;^wD{g+vB|QJ&JL3FM;|DeHE?keWGTZy_)cUKc8Wy_MIRe( zaHCFov5+|GyjAl7f`w~lS$g6f6D@o4=^$9$#jR4RmVZWWYe`zc@qK`z*(1et_V88MdTA`7Oa(z2nIhSjTa;y@Xp%FHAJfuQx zQ*(?hM2+z5T1Jx^13(?irE2u*J@~w@i%`iNcs{Cquw!fy`j%Hl@dw&L@O#}$cZ~WO z=q90>`~fu86+AYnOI747lXip@0$W17qVmSh>R)0KPT=+yl1B(jr&~J*7}(!Jm3Tw` zb$D;a{Aq|!YS?s2xUmG;gJJp9s?mR5jKnIZM?Qgu&gQkoxN+GP3^~FPGB4gZ9fLcg z9Wo_jM<$2jWt-Kqoe(^eipVK9C}!g%^p$|}z`r3%2j;K6EHMp^QL)A~!^kiT86-G) z-`mU8Fi)^+s1ZuAXPnz$N561vGl6Hws;4*+%)*I1J2Dv`o-s}SWZnU?FLn{{9B816 zrN-`hNX*Y=kgJ-#s6fsK8Z9wX>wh|MKywgx4;Hp7yf!q z99D;x9sKx+9Q#l>ZWwz5(^eno(AJf1z~syH0V9 zo_jZs?2W3b&!rJpB3UdN`{#C?49TcdR$lPBOqU~W!yH%6$2@lO z9$5*j&lDVA*&7l*J|lgRY~@xNKfw?W2PhicS&xw}A*(a*7Ba=)cQL5*1-SEiO6Nb9 z0%P@gt6K%nnW83-E~j~om9BBn+|>wdAa^M%(lMzZ;!dGCQZ1>a%acR!NEc1maP0HR z*m<^O`mjPz-wWoM)WrqGL}6p+`BG}0N718*#0%3(wV2XAcGXtv+erGl-rEe+Aq z%aU|+%gLB(Xj$G?#~@wEDZU-)3m3*TQ1&(=oj@cPZ=^g4tLIz^<3+(o6U;<7&`9FU zMSp@V6w(wX^6j|9w(q~zEW2<96o*+kUV{F6bUFr{k?B~9( zQ*c}>4+?SIqy}t{3ul;0N+wPc+NZjcQRw|`7vuO0qVPyETy_w0`Hm4pKMOI{1^hw| zMByCd`P8Z%d!Je0$We5hLL;#t9IdRL3qTCk>huDN&;TUIgK#q zZy8aIM(PKj%30jZl_@h12lwJWDK+3B#PpcP1lnZX;G^RSQQKw7#*xWeI4$xdA3X3Z zz;;AVof^fdu<4}sJ(@b}Nt==|FjJBaPf4I!WB#o{zE`y`j8dw>)?{nskA@sSk*2i0 zd`hru0bmFceQ$!QNquD`CCy_@RwNv|t~W~X9^U2CBWZUM@jQnXV>u6_?8(nrfz76A zqg2c{D8thRn-N@}lz^|jHKd%%EkH*SosYNXW^oM9SENWd1a!O2Ec;E)3#F0>EMlG= z89av33X?W=>0cU~22nwjo2v=FG0%yz-4^KxhBCZmr-_@29)1m z0}bvZAvIvGQnntGu0+BM2YhaV;aA`daArI0SrE45${nLY(qNpqO1XDsQzkYQr_)<= zeRFMrCg62aP$;&)7N(?oq0l?N@sml5DAzeHfCCKn@|9v3sg%UqD7gob{3uwi08cD$ zf0Hu~8w(Ri3AG@51A?O3>;VQ_Yw$pQ)F?y6jb?R2fOga#=>U`3V^Q(8$x)WQE$cL}V9lPw)p^mb^~+ z0B>i+8o-LlbS21IG#W?k6?_`d;sdobmo(K-Eo~!=Dnr@h?26zBk$|?|H$tYd;h+>i5T2 z9rGJ{w+vq=zz8Xc>7pZmZrZy%zX)ful+5;k(+Yy@^5yMMb1IxFtvM1|O<2 zJLq}ok!>^B#2Gg2_j0n8N;tJ+0x6!OL$2D7H}6by#WLt(&fd+V`XSI5H_B?~pu-Fe z#ncb{${I;Uh3opph-ti@ujYzXN{gH-BVZv5B>=5T2*vmuOzd=^`UAC^_Op|CGc{62 z%o9%y$2Hxx$fm|ug8c=f|vH5>VNY>D5g)pNyBb2)`GrkuD6nX~fm0yUnLRqUB1 zn4XqWe=es&4cswaYtn)cc&IaMUY@hrsa8Ly#`boLkqv2ETTG%_1BxK3MvvmbW&`CJ z;OzoGllP`6Mjg+*uG^3Ab*9O0qFd{0d2zye?40V9nrYUIYKI;FuL`@|SLMlc#g~g(y{oqNtE|X#KTx~1pnm-| z+m!=w<%gE!=lXn>XG>hBspt9a{~&(rs?fCalD4tfPRGrse_e&0BK~P2ACiyLMOP;Eht0h*sg#1ubP<>!+j=7H8bX&Km2b@?w&L;PMr9bq3;AO?ju#%@H z7rlK(T(5efGNNIvale}Q+s@}J%k8-9b@##^1)(H9ay7L(s(v!h;Ih%Q4`Xu+wtNUU zY7msUb82fu%f=0yqBd>%4xb}^_50$Nbr+twYm}>v^7qz18vH?$`pW){xw_j?a|O5T z#I@%ubF zmX$dPH{wJ->S9*?RuM3DxL$DUz#B|eg<7D;n?W76>x8Z?f3()_$$qu|+T-_OxB>RX zNa8`5bY8r={X{;Oer@#)?F123&>Z!zP0uAuk87GeKt@N}@lY~fNWYy#kq!6tdfI^& z{`w|GE4betl1*5OG5$v;;qGWKJef*&$3ter-O*qe8EJiihm57afJ5e%zJNpKhrfVB z7I=IChb-{;0uEW=@dX^Rz~c)zWP!(*aL59WFYx%E2OfdvybJJ_%2YQa?5n18yqq;x`83njqYH`(2$|= z1p?jwknx5LjqV6^|18rD85-RY=>A!z8!|MyBhdY`OgCg`bVs23XPIvNFQQ@P@2!KH zpLny%PbGT+muH6ilkGxvhh2t-42|vxbpI^V4H+8U5$OI|rW-Oex+Bp2vrIQ+Xmm%Q q`)8SM$k6DHK=;ow-S|I&#-jDso*%!pou`XNV=-s(?DSa Date: Mon, 26 Oct 2015 17:39:48 -0700 Subject: [PATCH 887/956] Fix unintended BC issues in ISO option refactoring --- builder/qemu/builder.go | 8 ++++---- builder/qemu/builder_test.go | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index 568e8c80d..eb38ba0c7 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -21,10 +21,10 @@ import ( const BuilderId = "transcend.qemu" var accels = map[string]struct{}{ - "none": {}, - "kvm": {}, - "tcg": {}, - "xen": {}, + "none": struct{}{}, + "kvm": struct{}{}, + "tcg": struct{}{}, + "xen": struct{}{}, } var netDevice = map[string]bool{ diff --git a/builder/qemu/builder_test.go b/builder/qemu/builder_test.go index 69442c01f..d2a1d1eba 100644 --- a/builder/qemu/builder_test.go +++ b/builder/qemu/builder_test.go @@ -497,7 +497,7 @@ func TestBuilderPrepare_QemuArgs(t *testing.T) { // Test with a good one config["qemuargs"] = [][]interface{}{ - {"foo", "bar", "baz"}, + []interface{}{"foo", "bar", "baz"}, } b = Builder{} @@ -510,7 +510,7 @@ func TestBuilderPrepare_QemuArgs(t *testing.T) { } expected := [][]string{ - {"foo", "bar", "baz"}, + []string{"foo", "bar", "baz"}, } if !reflect.DeepEqual(b.config.QemuArgs, expected) { From 1c1ccc1191aee24f2da5245ce9b51ac515afd96a Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Tue, 27 Oct 2015 10:12:41 -0400 Subject: [PATCH 888/956] Use Units of measure in file size Use units of measure, and convert file size to MB with atlas post processor --- post-processor/atlas/post-processor.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/post-processor/atlas/post-processor.go b/post-processor/atlas/post-processor.go index 029fcffe6..7e6220c2f 100644 --- a/post-processor/atlas/post-processor.go +++ b/post-processor/atlas/post-processor.go @@ -204,7 +204,8 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac opts.FileSize = r.Size } - ui.Message(fmt.Sprintf("Uploading artifact (Size: %v)", opts.FileSize)) + fileSizeMB := float64(opts.FileSize / 1000000) + ui.Message(fmt.Sprintf("Uploading artifact (Size: %.2fMB)", fileSizeMB)) var av *atlas.ArtifactVersion doneCh := make(chan struct{}) errCh := make(chan error, 1) @@ -220,7 +221,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac select { case err := <-errCh: - return nil, false, fmt.Errorf("Error uploading (Size: %v): %s", opts.FileSize, err) + return nil, false, fmt.Errorf("Error uploading (Size: %.2fMB): %s", fileSizeMB, err) case <-doneCh: } From 803cbde17ead9bcc598a6533ea9014ffa03802d4 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 27 Oct 2015 11:57:54 -0700 Subject: [PATCH 889/956] Change output format to bytes --- post-processor/atlas/post-processor.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/post-processor/atlas/post-processor.go b/post-processor/atlas/post-processor.go index 7e6220c2f..b098a9200 100644 --- a/post-processor/atlas/post-processor.go +++ b/post-processor/atlas/post-processor.go @@ -204,8 +204,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac opts.FileSize = r.Size } - fileSizeMB := float64(opts.FileSize / 1000000) - ui.Message(fmt.Sprintf("Uploading artifact (Size: %.2fMB)", fileSizeMB)) + ui.Message(fmt.Sprintf("Uploading artifact (%d bytes)", opts.FileSize)) var av *atlas.ArtifactVersion doneCh := make(chan struct{}) errCh := make(chan error, 1) @@ -221,7 +220,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac select { case err := <-errCh: - return nil, false, fmt.Errorf("Error uploading (Size: %.2fMB): %s", fileSizeMB, err) + return nil, false, fmt.Errorf("Error uploading (%d bytes): %s", opts.FileSize, err) case <-doneCh: } From 9f66dbbde7c54c6e12abaf92a06ca14a164b4d50 Mon Sep 17 00:00:00 2001 From: Timothy Sutton Date: Tue, 27 Oct 2015 15:47:05 -0400 Subject: [PATCH 890/956] Fixed page_title for Artifice post-processor --- website/source/docs/post-processors/artifice.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/post-processors/artifice.html.markdown b/website/source/docs/post-processors/artifice.html.markdown index 2ee9abc85..e9fcd0c3a 100644 --- a/website/source/docs/post-processors/artifice.html.markdown +++ b/website/source/docs/post-processors/artifice.html.markdown @@ -6,7 +6,7 @@ description: | for example, spinning up an EC2 instance to build a docker container -- and then extracting the docker container and throwing away the EC2 instance. layout: docs -page_title: 'Atlas Post-Processor' +page_title: 'Artifice Post-Processor' ... # Artifice Post-Processor From a9df89df155cac3f7fd58626471473b6731e2909 Mon Sep 17 00:00:00 2001 From: Cameron Stokes Date: Tue, 27 Oct 2015 20:45:08 -0700 Subject: [PATCH 891/956] Change amazon.ami to amazon.image. ... to be consistent with https://github.com/hashicorp/terraform/pull/3195. --- website/source/docs/post-processors/atlas.html.markdown | 2 +- .../source/intro/getting-started/remote-builds.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 8272ce159..705623131 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -107,7 +107,7 @@ you can also use `token` configuration option. "type": "atlas", "token": "{{user `atlas_token`}}", "artifact": "hashicorp/foobar", - "artifact_type": "amazon.ami", + "artifact_type": "amazon.image", "metadata": { "created_at": "{{timestamp}}" } diff --git a/website/source/intro/getting-started/remote-builds.html.markdown b/website/source/intro/getting-started/remote-builds.html.markdown index 5dba242c4..347edc30f 100644 --- a/website/source/intro/getting-started/remote-builds.html.markdown +++ b/website/source/intro/getting-started/remote-builds.html.markdown @@ -99,7 +99,7 @@ deployed by a tool like [Terraform](https://terraform.io). The `atlas` "post-processors": [{ "type": "atlas", "artifact": "ATLAS_USERNAME/packer-tutorial", - "artifact_type": "amazon.ami" + "artifact_type": "amazon.image" }] } ``` From 6af2fd5bd08826e8e1a9d78afc017853db58150b Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Wed, 28 Oct 2015 16:10:27 -0400 Subject: [PATCH 892/956] Update Packer Debug Docs for Cloud-Init Issue Update documentation to make official note of the cloud-init issue with ubuntu AMIs. --- .../source/docs/other/debugging.html.markdown | 28 ++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/website/source/docs/other/debugging.html.markdown b/website/source/docs/other/debugging.html.markdown index 2ca5ab7d1..33a515091 100644 --- a/website/source/docs/other/debugging.html.markdown +++ b/website/source/docs/other/debugging.html.markdown @@ -58,10 +58,36 @@ any logging to be enabled. ### Debugging Packer in Powershell/Windows In Windows you can set the detailed logs environmental variable `PACKER_LOG` or -the log variable `PACKER_LOG_PATH` using powershell environment variables. For example: +the log variable `PACKER_LOG_PATH` using powershell environment variables. For +example: $env:PACKER_LOG=1 $env:PACKER_LOG_PATH="packerlog.txt" If you find a bug with Packer, please include the detailed log by using a service such as [gist](http://gist.github.com). + +## Issues Installing Ubuntu Packages + +Issues may arise using and building Ubuntu AMIs where common packages that +*should* be installed from Ubuntu's Main repository are not found during a +provisioner step: + + amazon-ebs: No candidate version found for build-essential + amazon-ebs: No candidate version found for build-essential + +This, obviously can cause problems where a build is unable to finish +successfully as the proper packages cannot be provisioned correctly. The problem +arises when cloud-init has not finished fully running on the source AMI by the +time that packer starts any provisioning steps. + +Adding the following provisioner to the packer template, allows for the +cloud-init process to fully finish before packer starts provisioning the source +AMI. + + { + "type": "shell", + "inline": [ + "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting for cloud-init...'; sleep 1; done" + ] + } From 8e1cc16ab5c8a0d3c926dc8b7feaf73e4b6769b8 Mon Sep 17 00:00:00 2001 From: Vasiliy Tolstov Date: Thu, 29 Oct 2015 10:54:25 +0000 Subject: [PATCH 893/956] add convert step for qcow2 image format https://ext4.wiki.kernel.org/index.php/Ext4_VM_Images does not recommends to dd zero file and deletes it, but in case of enabling discards and qcow2 image we can recreate qcow2 file with less used space. Also qemu-img able to enable compression for qcow2 files, that sometimes may be useful because it natively supported by qemu. Signed-off-by: Vasiliy Tolstov --- builder/qemu/builder.go | 8 +++ builder/qemu/step_convert_disk.go | 67 +++++++++++++++++++ .../source/docs/builders/qemu.html.markdown | 6 ++ 3 files changed, 81 insertions(+) create mode 100644 builder/qemu/step_convert_disk.go diff --git a/builder/qemu/builder.go b/builder/qemu/builder.go index eb38ba0c7..2e7e107d7 100644 --- a/builder/qemu/builder.go +++ b/builder/qemu/builder.go @@ -88,6 +88,8 @@ type Config struct { DiskSize uint `mapstructure:"disk_size"` DiskCache string `mapstructure:"disk_cache"` DiskDiscard string `mapstructure:"disk_discard"` + SkipCompaction bool `mapstructure:"skip_compaction"` + DiskCompression bool `mapstructure:"disk_compression"` FloppyFiles []string `mapstructure:"floppy_files"` Format string `mapstructure:"format"` Headless bool `mapstructure:"headless"` @@ -242,6 +244,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { errs, errors.New("invalid format, only 'qcow2' or 'raw' are allowed")) } + if b.config.Format != "qcow2" { + b.config.SkipCompaction = true + b.config.DiskCompression = false + } + if _, ok := accels[b.config.Accelerator]; !ok { errs = packer.MultiErrorAppend( errs, errors.New("invalid accelerator, only 'kvm', 'tcg', 'xen', or 'none' are allowed")) @@ -364,6 +371,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, new(common.StepProvision), new(stepShutdown), + new(stepConvertDisk), } // Setup the state bag diff --git a/builder/qemu/step_convert_disk.go b/builder/qemu/step_convert_disk.go new file mode 100644 index 000000000..dcf5e97de --- /dev/null +++ b/builder/qemu/step_convert_disk.go @@ -0,0 +1,67 @@ +package qemu + +import ( + "fmt" + "path/filepath" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" + + "os" +) + +// This step converts the virtual disk that was used as the +// hard drive for the virtual machine. +type stepConvertDisk struct{} + +func (s *stepConvertDisk) Run(state multistep.StateBag) multistep.StepAction { + config := state.Get("config").(*Config) + driver := state.Get("driver").(Driver) + diskName := state.Get("disk_filename").(string) + ui := state.Get("ui").(packer.Ui) + + if config.SkipCompaction && !config.DiskCompression { + return multistep.ActionContinue + } + + name := diskName + ".convert" + + sourcePath := filepath.Join(config.OutputDir, diskName) + targetPath := filepath.Join(config.OutputDir, name) + + command := []string{ + "convert", + "-q", + } + + if config.DiskCompression { + command = append(command, "-c") + } + + command = append(command, []string{ + "-f", config.Format, + "-O", config.Format, + sourcePath, + targetPath, + }..., + ) + + ui.Say("Converting hard drive...") + if err := driver.QemuImg(command...); err != nil { + err := fmt.Errorf("Error converting hard drive: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + if err := os.Rename(targetPath, sourcePath); err != nil { + err := fmt.Errorf("Error moving converted hard drive: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (s *stepConvertDisk) Cleanup(state multistep.StateBag) {} diff --git a/website/source/docs/builders/qemu.html.markdown b/website/source/docs/builders/qemu.html.markdown index 13ad4bda2..5cfc767d3 100644 --- a/website/source/docs/builders/qemu.html.markdown +++ b/website/source/docs/builders/qemu.html.markdown @@ -136,6 +136,12 @@ builder. - `disk_size` (integer) - The size, in megabytes, of the hard disk to create for the VM. By default, this is 40000 (about 40 GB). +- `skip_compaction` (boolean) - Packer compacts the QCOW2 image using `qemu-img convert`. + Set this option to `true` to disable compacting. Defaults to `false`. + +- `disk_compression` (boolean) - Apply compression to the QCOW2 disk file + using `qemu-img convert`. Defaults to `false`. + - `floppy_files` (array of strings) - A list of files to place onto a floppy disk that is attached when the VM is booted. This is most useful for unattended Windows installs, which look for an `Autounattend.xml` file on From 82126d01e6b0485ca4d03567d782c6bd505747df Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Thu, 29 Oct 2015 16:24:22 -0400 Subject: [PATCH 894/956] Trailing slash --- website/source/downloads.html.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/downloads.html.erb b/website/source/downloads.html.erb index 47136437b..112d550f0 100644 --- a/website/source/downloads.html.erb +++ b/website/source/downloads.html.erb @@ -27,7 +27,7 @@ page_title: "Downloads" verify the checksums signature file which has been signed using HashiCorp's GPG key. - You can also download older versions of Packer from the releases service. + You can also download older versions of Packer from the releases service.

    From 0e771e62b62bef816d4f5476007d9ced04a01a83 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Thu, 29 Oct 2015 17:08:31 -0700 Subject: [PATCH 895/956] Updated changelog to note changes to the plugin system --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4bc93c7f4..87bec68cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,21 @@ +## 0.9.0 (Unreleased) + +BACKWARDS INCOMPATIBILITIES: + + * Packer now ships as a single binary, including plugins. If you install packer 0.9.0 over a previous packer installation, **you must delete all of the packer-* plugin files** or packer will load out-of-date plugins from disk. + +FEATURES: + + * + +IMPROVEMENTS: + + * Packer plugins are now compiled into the main binary, reducing file size and build times, and making packer easier to install. The overall plugin architecture has not changed and third-party plugins can still be loaded from disk. Please make sure your plugins are up-to-date! + +BUG FIXES: + + * + ## 0.8.6 (Aug 22, 2015) IMPROVEMENTS: From 8682dec178221c2a66d7b3518bc81ff58e44f3d3 Mon Sep 17 00:00:00 2001 From: Luke Amdor Date: Fri, 30 Oct 2015 13:58:56 -0500 Subject: [PATCH 896/956] aws: build after upstream breaking change see https://github.com/aws/aws-sdk-go/commit/1a69d069352edadd48f12b0f2c5f375de4a636d7 --- builder/amazon/chroot/builder.go | 4 +++- builder/amazon/common/artifact.go | 4 +++- builder/amazon/common/step_ami_region_copy.go | 5 ++++- builder/amazon/common/step_create_tags.go | 9 ++++++--- builder/amazon/common/step_modify_ami_attributes.go | 7 +++++-- builder/amazon/ebs/builder.go | 4 +++- builder/amazon/instance/builder.go | 4 +++- 7 files changed, 27 insertions(+), 10 deletions(-) diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index 636de8df7..637b1e40f 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -9,6 +9,7 @@ import ( "log" "runtime" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" @@ -131,7 +132,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, err } - ec2conn := ec2.New(config) + session := session.New(config) + ec2conn := ec2.New(session) wrappedCommand := func(command string) (string, error) { ctx := b.config.ctx diff --git a/builder/amazon/common/artifact.go b/builder/amazon/common/artifact.go index 8eed0134d..76f4e03e0 100644 --- a/builder/amazon/common/artifact.go +++ b/builder/amazon/common/artifact.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/packer/packer" ) @@ -72,7 +73,8 @@ func (a *Artifact) Destroy() error { Credentials: a.Conn.Config.Credentials, Region: aws.String(region), } - regionConn := ec2.New(regionConfig) + sess := session.New(regionConfig) + regionConn := ec2.New(sess) input := &ec2.DeregisterImageInput{ ImageId: &imageId, diff --git a/builder/amazon/common/step_ami_region_copy.go b/builder/amazon/common/step_ami_region_copy.go index 0cf4d40fa..fa955ac7b 100644 --- a/builder/amazon/common/step_ami_region_copy.go +++ b/builder/amazon/common/step_ami_region_copy.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" @@ -87,7 +88,9 @@ func amiRegionCopy(state multistep.StateBag, config *AccessConfig, name string, } awsConfig.Region = aws.String(target) - regionconn := ec2.New(awsConfig) + sess := session.New(awsConfig) + regionconn := ec2.New(sess) + resp, err := regionconn.CopyImage(&ec2.CopyImageInput{ SourceRegion: &source, SourceImageId: &imageId, diff --git a/builder/amazon/common/step_create_tags.go b/builder/amazon/common/step_create_tags.go index 7f62d2657..2ac39dc02 100644 --- a/builder/amazon/common/step_create_tags.go +++ b/builder/amazon/common/step_create_tags.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" @@ -33,11 +34,13 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction { // Declare list of resources to tag resourceIds := []*string{&ami} - - regionconn := ec2.New(&aws.Config{ + awsConfig := aws.Config{ Credentials: ec2conn.Config.Credentials, Region: aws.String(region), - }) + } + session := session.New(&awsConfig) + + regionconn := ec2.New(session) // Retrieve image list for given AMI imageResp, err := regionconn.DescribeImages(&ec2.DescribeImagesInput{ diff --git a/builder/amazon/common/step_modify_ami_attributes.go b/builder/amazon/common/step_modify_ami_attributes.go index e8e7de589..31d60a392 100644 --- a/builder/amazon/common/step_modify_ami_attributes.go +++ b/builder/amazon/common/step_modify_ami_attributes.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" @@ -88,10 +89,12 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc for region, ami := range amis { ui.Say(fmt.Sprintf("Modifying attributes on AMI (%s)...", ami)) - regionconn := ec2.New(&aws.Config{ + awsConfig := aws.Config{ Credentials: ec2conn.Config.Credentials, Region: aws.String(region), - }) + } + session := session.New(&awsConfig) + regionconn := ec2.New(session) for name, input := range options { ui.Message(fmt.Sprintf("Modifying: %s", name)) input.ImageId = &ami diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 26a525bcb..b9277a5ab 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -9,6 +9,7 @@ import ( "fmt" "log" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" @@ -68,7 +69,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, err } - ec2conn := ec2.New(config) + session := session.New(config) + ec2conn := ec2.New(session) // Setup the state bag and initial state for the steps state := new(multistep.BasicStateBag) diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 6ca908230..6b1726efb 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -9,6 +9,7 @@ import ( "os" "strings" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/multistep" awscommon "github.com/mitchellh/packer/builder/amazon/common" @@ -159,7 +160,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, err } - ec2conn := ec2.New(config) + session := session.New(config) + ec2conn := ec2.New(session) // Setup the state bag and initial state for the steps state := new(multistep.BasicStateBag) From 62bcf6c7ce8a69181b07efa104a93ccf10f944ed Mon Sep 17 00:00:00 2001 From: Jason Martin Date: Fri, 30 Oct 2015 13:19:44 -0700 Subject: [PATCH 897/956] Add IAM policy for copying images The ec2:CopyImage privilege is required in order to make a cross-region AMI when using the `ami_regions` option for the amazon-ebs builder. --- website/source/docs/builders/amazon.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/source/docs/builders/amazon.html.markdown b/website/source/docs/builders/amazon.html.markdown index a85e22d1a..ae9c5fcb8 100644 --- a/website/source/docs/builders/amazon.html.markdown +++ b/website/source/docs/builders/amazon.html.markdown @@ -101,6 +101,7 @@ Packer to work: "ec2:DeleteSecurityGroup", "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateImage", + "ec2:CopyImage", "ec2:RunInstances", "ec2:TerminateInstances", "ec2:StopInstances", From 07079a5905ee6dde84fa726fc4c3e116a37a2bf1 Mon Sep 17 00:00:00 2001 From: Yuya Kusakabe Date: Sat, 31 Oct 2015 19:32:40 +0900 Subject: [PATCH 898/956] Fix #2892 --- builder/vmware/iso/builder.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index a9f2ae9b8..7958b5760 100755 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -40,7 +40,7 @@ type Config struct { DiskSize uint `mapstructure:"disk_size"` DiskTypeId string `mapstructure:"disk_type_id"` FloppyFiles []string `mapstructure:"floppy_files"` - Format string `mapstruture:"format"` + Format string `mapstructure:"format"` GuestOSType string `mapstructure:"guest_os_type"` Version string `mapstructure:"version"` VMName string `mapstructure:"vm_name"` From ca19688316b63cb9c4a449b94bd4dbc369d5d177 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Sat, 31 Oct 2015 11:04:50 -0700 Subject: [PATCH 899/956] aws: fix test breakage due to upstream breaking change #2891 --- builder/amazon/ebs/builder_acc_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/builder/amazon/ebs/builder_acc_test.go b/builder/amazon/ebs/builder_acc_test.go index 890b3228a..47da06c10 100644 --- a/builder/amazon/ebs/builder_acc_test.go +++ b/builder/amazon/ebs/builder_acc_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/mitchellh/packer/builder/amazon/common" builderT "github.com/mitchellh/packer/helper/builder/testing" @@ -160,7 +161,8 @@ func testEC2Conn() (*ec2.EC2, error) { return nil, err } - return ec2.New(config), nil + session := session.New(config) + return ec2.New(session), nil } const testBuilderAccBasic = ` From 31dd989e2efef42a30a1f2f8ea6c0364f3805adc Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Sat, 31 Oct 2015 18:15:19 -0700 Subject: [PATCH 900/956] Add qcow2 shrink/compress tests for #2748 --- builder/qemu/builder_test.go | 42 ++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/builder/qemu/builder_test.go b/builder/qemu/builder_test.go index d2a1d1eba..f885c570b 100644 --- a/builder/qemu/builder_test.go +++ b/builder/qemu/builder_test.go @@ -132,6 +132,48 @@ func TestBuilderPrepare_BootWait(t *testing.T) { } } +func TestBuilderPrepare_DiskCompaction(t *testing.T) { + var b Builder + config := testConfig() + + // Bad + config["skip_compaction"] = false + config["disk_compression"] = true + config["format"] = "img" + warns, err := b.Prepare(config) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err == nil { + t.Fatal("should have error") + } + if b.config.SkipCompaction != true { + t.Fatalf("SkipCompaction should be true") + } + if b.config.DiskCompression != false { + t.Fatalf("DiskCompression should be false") + } + + // Good + config["skip_compaction"] = false + config["disk_compression"] = true + config["format"] = "qcow2" + b = Builder{} + warns, err = b.Prepare(config) + if len(warns) > 0 { + t.Fatalf("bad: %#v", warns) + } + if err != nil { + t.Fatalf("should not have error: %s", err) + } + if b.config.SkipCompaction != false { + t.Fatalf("SkipCompaction should be false") + } + if b.config.DiskCompression != true { + t.Fatalf("DiskCompression should be true") + } +} + func TestBuilderPrepare_DiskSize(t *testing.T) { var b Builder config := testConfig() From 82893590db507166c72fcc524dd192d8cd553a15 Mon Sep 17 00:00:00 2001 From: Vasiliy Tolstov Date: Mon, 2 Nov 2015 11:21:15 +0000 Subject: [PATCH 901/956] docker-import: allow artifice artifacts Signed-off-by: Vasiliy Tolstov --- post-processor/docker-import/post-processor.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/post-processor/docker-import/post-processor.go b/post-processor/docker-import/post-processor.go index 8074d1a02..38c73ab55 100644 --- a/post-processor/docker-import/post-processor.go +++ b/post-processor/docker-import/post-processor.go @@ -4,10 +4,10 @@ import ( "fmt" "github.com/mitchellh/packer/builder/docker" - "github.com/mitchellh/packer/builder/qemu" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/post-processor/artifice" "github.com/mitchellh/packer/template/interpolate" ) @@ -44,11 +44,11 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { switch artifact.BuilderId() { - case docker.BuilderId, qemu.BuilderId: + case docker.BuilderId, artifice.BuilderId: break default: err := fmt.Errorf( - "Unknown artifact type: %s\nCan only import from Docker, Qemu builder artifacts.", + "Unknown artifact type: %s\nCan only import from Docker builder and Artifice post-processor artifacts.", artifact.BuilderId()) return nil, false, err } From b34525358d21a2f32cce0bdf76e446c75bde9388 Mon Sep 17 00:00:00 2001 From: Sergio Rodriguez Date: Mon, 2 Nov 2015 16:35:07 -0500 Subject: [PATCH 902/956] add "disable_sudo" configuration reference --- website/source/docs/provisioners/salt-masterless.html.markdown | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/provisioners/salt-masterless.html.markdown b/website/source/docs/provisioners/salt-masterless.html.markdown index adb1c4bb3..7e69f2351 100644 --- a/website/source/docs/provisioners/salt-masterless.html.markdown +++ b/website/source/docs/provisioners/salt-masterless.html.markdown @@ -38,6 +38,9 @@ Optional: has more detailed usage instructions. By default, no arguments are sent to the script. +- `disable_sudo` (boolean) - By default, the bootstrap install command is prefixed with `sudo`. When using a + Docker builder, you will likely want to pass `true` since `sudo` is often not pre-installed. + - `remote_pillar_roots` (string) - The path to your remote [pillar roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). default: `/srv/pillar`. From 61c98bb701d528dd4232f310d7638619e5ba2ba8 Mon Sep 17 00:00:00 2001 From: Barrie Bremner Date: Tue, 3 Nov 2015 14:48:21 +0000 Subject: [PATCH 903/956] Doc change only: misspelling of 'termination' --- website/source/docs/builders/amazon-ebs.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index effa07be2..860040ee7 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -278,7 +278,7 @@ Here is an example using the optional AMI tags. This will add the tags -> **Note:** Packer uses pre-built AMIs as the source for building images. These source AMIs may include volumes that are not flagged to be destroyed on -termiation of the instance building the new image. Packer will attempt to clean +termination of the instance building the new image. Packer will attempt to clean up all residual volumes that are not designated by the user to remain after termination. If you need to preserve those source volumes, you can overwrite the termination setting by specifying `delete_on_termination=false` in the From ebed9e53fb9336f2c7dfe5fcb6d42d4f3a12fb55 Mon Sep 17 00:00:00 2001 From: Trevor Suarez Date: Tue, 3 Nov 2015 12:30:55 -0500 Subject: [PATCH 904/956] Adding new "Options" configuration parameter for the puppet-masterless provisioner, to allow for specifying additional options to pass to the execute command --- provisioner/puppet-masterless/provisioner.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index 546224a54..b3dc5117f 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -22,6 +22,9 @@ type Config struct { // The command used to execute Puppet. ExecuteCommand string `mapstructure:"execute_command"` + // Additional options to pass when executing Puppet + Options []string + // Additional facts to set when executing Puppet Facter map[string]string @@ -62,6 +65,7 @@ type ExecuteTemplate struct { ManifestFile string ManifestDir string Sudo bool + Options string } func (p *Provisioner) Prepare(raws ...interface{}) error { @@ -86,6 +90,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { "{{if ne .HieraConfigPath \"\"}}--hiera_config='{{.HieraConfigPath}}' {{end}}" + "{{if ne .ManifestDir \"\"}}--manifestdir='{{.ManifestDir}}' {{end}}" + "--detailed-exitcodes " + + "{{.Options}} " + "{{.ManifestFile}}" } @@ -218,6 +223,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { ModulePath: strings.Join(modulePaths, ":"), Sudo: !p.config.PreventSudo, WorkingDir: p.config.WorkingDir, + Options: strings.Join(p.config.Options, " "), } command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) if err != nil { From e41f0bb9f56c26e4ab221df43c7fabd8424dd8fb Mon Sep 17 00:00:00 2001 From: Trevor Suarez Date: Tue, 3 Nov 2015 13:55:40 -0500 Subject: [PATCH 905/956] Adding documentation for the new configuration parameter --- .../docs/provisioners/puppet-masterless.html.markdown | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index 7ef13265e..c283c7a46 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -59,6 +59,12 @@ Optional parameters: variables](/docs/templates/configuration-templates.html) available. See below for more information. +- `options` (array of strings) - This is an array of additional options to + pass to the puppet command when executing puppet. This allows for + customization of the `execute_command` without having to completely replace + or include it's contents, making forward-compatible customizations much + easier. + - `facter` (object of key/value strings) - Additional [facts](http://puppetlabs.com/puppet/related-projects/facter) to make available when Puppet is running. From 84e1b387c4c5adb69dafe5f949150ad7cc0ed593 Mon Sep 17 00:00:00 2001 From: Trevor Suarez Date: Tue, 3 Nov 2015 14:36:04 -0500 Subject: [PATCH 906/956] New test for preparing the new config parameter --- .../puppet-masterless/provisioner_test.go | 32 ++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/provisioner/puppet-masterless/provisioner_test.go b/provisioner/puppet-masterless/provisioner_test.go index 42ddd9d7a..5416447a2 100644 --- a/provisioner/puppet-masterless/provisioner_test.go +++ b/provisioner/puppet-masterless/provisioner_test.go @@ -1,10 +1,11 @@ package puppetmasterless import ( - "github.com/mitchellh/packer/packer" "io/ioutil" "os" "testing" + + "github.com/mitchellh/packer/packer" ) func testConfig() map[string]interface{} { @@ -177,3 +178,32 @@ func TestProvisionerPrepare_facterFacts(t *testing.T) { t.Fatalf("err: Default facts are not set in the Puppet provisioner!") } } + +func TestProvisionerPrepare_options(t *testing.T) { + config := testConfig() + + delete(config, "options") + p := new(Provisioner) + err := p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test with malformed fact + config["options"] = "{{}}" + p = new(Provisioner) + err = p.Prepare(config) + if err == nil { + t.Fatal("should be an error") + } + + config["options"] = []string{ + "arg", + } + + p = new(Provisioner) + err = p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } +} From 4ea7e3473ddcb95930097d9335a3f391ed645167 Mon Sep 17 00:00:00 2001 From: Trevor Suarez Date: Tue, 3 Nov 2015 14:59:55 -0500 Subject: [PATCH 907/956] Testing the new options argument during the actual call to `Provision()` --- .../puppet-masterless/provisioner_test.go | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/provisioner/puppet-masterless/provisioner_test.go b/provisioner/puppet-masterless/provisioner_test.go index 5416447a2..10048551c 100644 --- a/provisioner/puppet-masterless/provisioner_test.go +++ b/provisioner/puppet-masterless/provisioner_test.go @@ -3,6 +3,7 @@ package puppetmasterless import ( "io/ioutil" "os" + "strings" "testing" "github.com/mitchellh/packer/packer" @@ -207,3 +208,34 @@ func TestProvisionerPrepare_options(t *testing.T) { t.Fatalf("err: %s", err) } } + +func TestProvisionerProvision_options(t *testing.T) { + config := testConfig() + ui := &packer.MachineReadableUi{ + Writer: ioutil.Discard, + } + comm := new(packer.MockCommunicator) + + options := []string{ + "--some-arg=yup", + "--some-other-arg", + } + config["options"] = options + + p := new(Provisioner) + err := p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = p.Provision(ui, comm) + if err != nil { + t.Fatalf("err: %s", err) + } + + expectedArgs := strings.Join(options, " ") + + if !strings.Contains(comm.StartCmd.Command, expectedArgs) { + t.Fatalf("Command %q doesn't contain the expected arguments %q", comm.StartCmd.Command, expectedArgs) + } +} From 627a8fe819cfd0db9bd2072465e719283de20774 Mon Sep 17 00:00:00 2001 From: Trevor Suarez Date: Tue, 3 Nov 2015 17:55:03 -0500 Subject: [PATCH 908/956] Renaming the config parameter from "options" to "extra_arguments" --- provisioner/puppet-masterless/provisioner.go | 10 +++++----- .../puppet-masterless/provisioner_test.go | 16 ++++++++-------- .../provisioners/puppet-masterless.html.markdown | 2 +- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index b3dc5117f..4e35d1a94 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -22,8 +22,8 @@ type Config struct { // The command used to execute Puppet. ExecuteCommand string `mapstructure:"execute_command"` - // Additional options to pass when executing Puppet - Options []string + // Additional arguments to pass when executing Puppet + ExtraArguments []string `mapstructure:"extra_arguments"` // Additional facts to set when executing Puppet Facter map[string]string @@ -65,7 +65,7 @@ type ExecuteTemplate struct { ManifestFile string ManifestDir string Sudo bool - Options string + ExtraArguments string } func (p *Provisioner) Prepare(raws ...interface{}) error { @@ -90,7 +90,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { "{{if ne .HieraConfigPath \"\"}}--hiera_config='{{.HieraConfigPath}}' {{end}}" + "{{if ne .ManifestDir \"\"}}--manifestdir='{{.ManifestDir}}' {{end}}" + "--detailed-exitcodes " + - "{{.Options}} " + + "{{.ExtraArguments}} " + "{{.ManifestFile}}" } @@ -223,7 +223,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { ModulePath: strings.Join(modulePaths, ":"), Sudo: !p.config.PreventSudo, WorkingDir: p.config.WorkingDir, - Options: strings.Join(p.config.Options, " "), + ExtraArguments: strings.Join(p.config.ExtraArguments, " "), } command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) if err != nil { diff --git a/provisioner/puppet-masterless/provisioner_test.go b/provisioner/puppet-masterless/provisioner_test.go index 10048551c..9355897b4 100644 --- a/provisioner/puppet-masterless/provisioner_test.go +++ b/provisioner/puppet-masterless/provisioner_test.go @@ -180,10 +180,10 @@ func TestProvisionerPrepare_facterFacts(t *testing.T) { } } -func TestProvisionerPrepare_options(t *testing.T) { +func TestProvisionerPrepare_extraArguments(t *testing.T) { config := testConfig() - delete(config, "options") + delete(config, "extra_arguments") p := new(Provisioner) err := p.Prepare(config) if err != nil { @@ -191,14 +191,14 @@ func TestProvisionerPrepare_options(t *testing.T) { } // Test with malformed fact - config["options"] = "{{}}" + config["extra_arguments"] = "{{}}" p = new(Provisioner) err = p.Prepare(config) if err == nil { t.Fatal("should be an error") } - config["options"] = []string{ + config["extra_arguments"] = []string{ "arg", } @@ -209,18 +209,18 @@ func TestProvisionerPrepare_options(t *testing.T) { } } -func TestProvisionerProvision_options(t *testing.T) { +func TestProvisionerProvision_extraArguments(t *testing.T) { config := testConfig() ui := &packer.MachineReadableUi{ Writer: ioutil.Discard, } comm := new(packer.MockCommunicator) - options := []string{ + extraArguments := []string{ "--some-arg=yup", "--some-other-arg", } - config["options"] = options + config["extra_arguments"] = extraArguments p := new(Provisioner) err := p.Prepare(config) @@ -233,7 +233,7 @@ func TestProvisionerProvision_options(t *testing.T) { t.Fatalf("err: %s", err) } - expectedArgs := strings.Join(options, " ") + expectedArgs := strings.Join(extraArguments, " ") if !strings.Contains(comm.StartCmd.Command, expectedArgs) { t.Fatalf("Command %q doesn't contain the expected arguments %q", comm.StartCmd.Command, expectedArgs) diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index c283c7a46..7995a0ee2 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -59,7 +59,7 @@ Optional parameters: variables](/docs/templates/configuration-templates.html) available. See below for more information. -- `options` (array of strings) - This is an array of additional options to +- `extra_arguments` (array of strings) - This is an array of additional options to pass to the puppet command when executing puppet. This allows for customization of the `execute_command` without having to completely replace or include it's contents, making forward-compatible customizations much From 6ca02286d4875c2713579d411c94822eab485fa0 Mon Sep 17 00:00:00 2001 From: Trevor Suarez Date: Tue, 3 Nov 2015 18:18:24 -0500 Subject: [PATCH 909/956] Test for when the config parameter isn't passed --- .../puppet-masterless/provisioner_test.go | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/provisioner/puppet-masterless/provisioner_test.go b/provisioner/puppet-masterless/provisioner_test.go index 9355897b4..b2cc3adb9 100644 --- a/provisioner/puppet-masterless/provisioner_test.go +++ b/provisioner/puppet-masterless/provisioner_test.go @@ -183,6 +183,7 @@ func TestProvisionerPrepare_facterFacts(t *testing.T) { func TestProvisionerPrepare_extraArguments(t *testing.T) { config := testConfig() + // Test with missing parameter delete(config, "extra_arguments") p := new(Provisioner) err := p.Prepare(config) @@ -190,7 +191,7 @@ func TestProvisionerPrepare_extraArguments(t *testing.T) { t.Fatalf("err: %s", err) } - // Test with malformed fact + // Test with malformed value config["extra_arguments"] = "{{}}" p = new(Provisioner) err = p.Prepare(config) @@ -198,6 +199,7 @@ func TestProvisionerPrepare_extraArguments(t *testing.T) { t.Fatal("should be an error") } + // Test with valid values config["extra_arguments"] = []string{ "arg", } @@ -222,6 +224,7 @@ func TestProvisionerProvision_extraArguments(t *testing.T) { } config["extra_arguments"] = extraArguments + // Test with valid values p := new(Provisioner) err := p.Prepare(config) if err != nil { @@ -238,4 +241,24 @@ func TestProvisionerProvision_extraArguments(t *testing.T) { if !strings.Contains(comm.StartCmd.Command, expectedArgs) { t.Fatalf("Command %q doesn't contain the expected arguments %q", comm.StartCmd.Command, expectedArgs) } + + // Test with missing parameter + delete(config, "extra_arguments") + + p = new(Provisioner) + err = p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = p.Provision(ui, comm) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Check the expected `extra_arguments` position for an empty value + splitCommand := strings.Split(comm.StartCmd.Command, " ") + if "" == splitCommand[len(splitCommand)-2] { + t.Fatalf("Command %q contains an extra-space which may cause arg parsing issues", comm.StartCmd.Command) + } } From f006a83c95bde7c3c4ec77609ca0acef513d24b0 Mon Sep 17 00:00:00 2001 From: Trevor Suarez Date: Tue, 3 Nov 2015 18:19:03 -0500 Subject: [PATCH 910/956] Fixing the bug found in the tests --- provisioner/puppet-masterless/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index 4e35d1a94..6eaac474b 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -90,7 +90,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { "{{if ne .HieraConfigPath \"\"}}--hiera_config='{{.HieraConfigPath}}' {{end}}" + "{{if ne .ManifestDir \"\"}}--manifestdir='{{.ManifestDir}}' {{end}}" + "--detailed-exitcodes " + - "{{.ExtraArguments}} " + + "{{if ne .ExtraArguments \"\"}}{{.ExtraArguments}} {{end}}" + "{{.ManifestFile}}" } From 38612d45a923c6f276524cfe6d8164668b79edf5 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Wed, 4 Nov 2015 15:29:26 +0100 Subject: [PATCH 911/956] Make all scripts portable regardless of where bash is installed. --- scripts/build.sh | 2 +- scripts/dist.sh | 2 +- scripts/upload.sh | 2 +- scripts/website_push.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/build.sh b/scripts/build.sh index dcd9bd7c8..2b5048ecb 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # This script builds the application from source for multiple platforms. set -e diff --git a/scripts/dist.sh b/scripts/dist.sh index 9533ef285..ba5e1b2c0 100755 --- a/scripts/dist.sh +++ b/scripts/dist.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e # Get the parent directory of where this script is. diff --git a/scripts/upload.sh b/scripts/upload.sh index 9854a3b0e..284724bab 100755 --- a/scripts/upload.sh +++ b/scripts/upload.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e # Get the parent directory of where this script is. diff --git a/scripts/website_push.sh b/scripts/website_push.sh index 95168f977..a51c9c9b1 100755 --- a/scripts/website_push.sh +++ b/scripts/website_push.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Set the tmpdir if [ -z "$TMPDIR" ]; then From 1c7a8553021c6facc63cbc5e69f84c5ebe060382 Mon Sep 17 00:00:00 2001 From: Mark Peek Date: Wed, 4 Nov 2015 12:36:00 -0800 Subject: [PATCH 912/956] Switch osext package from mitchellh -> kardianos #2842 --- config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.go b/config.go index 3a4c41ec9..07a64d0af 100644 --- a/config.go +++ b/config.go @@ -10,7 +10,7 @@ import ( "runtime" "strings" - "github.com/mitchellh/osext" + "github.com/kardianos/osext" "github.com/mitchellh/packer/command" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer/plugin" From 6cf95b519d85265291822967609c6a417fe05c79 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Wed, 4 Nov 2015 17:11:46 -0800 Subject: [PATCH 913/956] Updated changelog for current work towards 0.9 --- CHANGELOG.md | 37 ++++++++++++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 87bec68cf..6399411ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,18 +3,49 @@ BACKWARDS INCOMPATIBILITIES: * Packer now ships as a single binary, including plugins. If you install packer 0.9.0 over a previous packer installation, **you must delete all of the packer-* plugin files** or packer will load out-of-date plugins from disk. + * Release binaries are now provided via . + * Packer 0.9.0 is now built with Go 1.5. Future versions will drop support for building with Go 1.4. FEATURES: - * + * **Artifice post-processor**: Override packer artifacts during post- + processing. This allows you to extract artifacts from a packer builder + and use them with other post-processors like compress, docker, and Atlas. + * **New `vmware-esxi` feature**: Packer can now export images from vCloud or vSphere during the build. [GH-1921] IMPROVEMENTS: - * Packer plugins are now compiled into the main binary, reducing file size and build times, and making packer easier to install. The overall plugin architecture has not changed and third-party plugins can still be loaded from disk. Please make sure your plugins are up-to-date! + * core: Packer plugins are now compiled into the main binary, reducing file size and build times, and making packer easier to install. The overall plugin architecture has not changed and third-party plugins can still be loaded from disk. Please make sure your plugins are up-to-date! [GH-2854] + * core: Packer now indicates line numbers for template parse errors [GH-2742] + * core: Scripts are executed via `/usr/bin/env bash` instead of `/bin/bash` for broader compatibility. [GH-2913] + * core: `target_path` for builder downloads can now be specified. [GH-2600] + * builder/amazon: Add support for `ebs_optimized` [GH-2806] + * builder/amazon: You can now specify `0` for `spot_price` to switch to on demand instances [GH-2845] + * builder/google: `account_file` can now be provided as a JSON string [GH-2811] + * builder/parallels: Improve support for Parallels 11 [GH-2662] + * builder/parallels: Parallels disks are now compacted by default [GH-2731] + * builder/parallels: Packer will look for Parallels in `/Applications/Parallels Desktop.app` if it is not detected automatically [GH-2839] + * builder/docker: Now works remote hosts, such as boot2docker [GH-2846] + * builder/qemu: qcow2 images are now compacted by default [GH-2748] + * builder/qemu: qcow2 images can now be compressed [GH-2748] + * builder/qemu: Now specifies `virtio-scsi` by default [GH-2422] + * builder/qemu: Now checks for version-specific options [GH-2376] + * provisioner/puppet: Now accepts the `extra_arguments` parameter [GH-2635] + * post-processor/atlas: Added support for compile ID. [GH-2775] BUG FIXES: - * + * core: Random number generator is now seeded. [GH-2640] + * core: Packer should now have a lot less race conditions [GH-2824] + * builder/amazon: The `no_device` option for block device mappings is now handled correctly [GH-2398] + * builder/amazon: AMI name validation now matches Amazon's spec [GH-2774] + * builder/amazon: Use snapshot size when volume size is unspecified [GH-2480] + * builder/parallels: Now supports interpolation in `prlctl_post` [GH-2828] + * builder/vmware: `format` option is now read correctly [GH-2892] + * provisioner/shell: No longer leaves temp scripts behind [GH-1536] + * provisioner/winrm: Now waits for reboot to complete before continuing with provisioning [GH-2568] + * post-processor/artifice: Fix truncation of files downloaded from Docker. [GH-2793] + ## 0.8.6 (Aug 22, 2015) From d36b653d3fc56157a9f3e42de71bae008762363e Mon Sep 17 00:00:00 2001 From: Andy Williams Date: Tue, 6 Oct 2015 10:52:47 -0400 Subject: [PATCH 914/956] Make DigitalOcean artifact ID match AWS format The Vagrant post processor expects the DO artifact ID to look like an AWS artifact ID (region_id:snapshot_id). This commit makes the DO artifact Id() function output this format. --- builder/digitalocean/artifact.go | 2 +- builder/digitalocean/artifact_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/digitalocean/artifact.go b/builder/digitalocean/artifact.go index 3b6a05e53..6abb561c9 100644 --- a/builder/digitalocean/artifact.go +++ b/builder/digitalocean/artifact.go @@ -32,7 +32,7 @@ func (*Artifact) Files() []string { } func (a *Artifact) Id() string { - return strconv.FormatUint(uint64(a.snapshotId), 10) + return fmt.Sprintf("%s:%s", a.regionName, strconv.FormatUint(uint64(a.snapshotId), 10)) } func (a *Artifact) String() string { diff --git a/builder/digitalocean/artifact_test.go b/builder/digitalocean/artifact_test.go index 4492c7bf3..7ea586111 100644 --- a/builder/digitalocean/artifact_test.go +++ b/builder/digitalocean/artifact_test.go @@ -16,7 +16,7 @@ func TestArtifact_Impl(t *testing.T) { func TestArtifactId(t *testing.T) { a := &Artifact{"packer-foobar", 42, "San Francisco", nil} - expected := "42" + expected := "San Francisco:42" if a.Id() != expected { t.Fatalf("artifact ID should match: %v", expected) From 424edc825362ff3dea0e0dd0e3d2dab98ca745cc Mon Sep 17 00:00:00 2001 From: Greg Baker Date: Tue, 10 Nov 2015 15:46:15 -0600 Subject: [PATCH 915/956] Added alternative floppy boot command syntax The vmware-iso example configuration to load a Kickstart configuration file off of a mounted floppy image is not valid for recent versions of Linux including RHEL7. The version of Anaconda used no longer supports ks=floppy. This commit adds an alternative syntax that users can use with more recent version of Linux to source a Kickstart file from a mounted floppy image. --- .../docs/builders/vmware-iso.html.markdown | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index 8e851dfb9..3b8a8ba0d 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -421,3 +421,19 @@ file by attaching a floppy disk. An example below, based on RHEL: ] } ``` + +It's also worth noting that `ks=floppy` has been deprecated in the latest versions of Anaconda. Later versions of Linux may require a different syntax to source a kickstart file from a mounted floppy image. + +``` {.javascript} +{ + "builders": [ + { + "type":"vmware-iso", + "floppy_files": [ + "folder/ks.cfg" + ], + "boot_command": " inst.text inst.ks=hd:fd0:/ks.cfg " + } + ] +} +``` From 6490e4c608adf6a3c284402469525e70da279bc6 Mon Sep 17 00:00:00 2001 From: Greg Baker Date: Wed, 11 Nov 2015 10:01:12 -0600 Subject: [PATCH 916/956] Updated the language of floppy documentation to increase clarity Changed the language of the vmware-iso floppy documentation to make is clearer that the deprecated ks=floppy syntax is a result of changes to Anaconda. --- website/source/docs/builders/vmware-iso.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/vmware-iso.html.markdown b/website/source/docs/builders/vmware-iso.html.markdown index 3b8a8ba0d..97bb36aa1 100644 --- a/website/source/docs/builders/vmware-iso.html.markdown +++ b/website/source/docs/builders/vmware-iso.html.markdown @@ -422,7 +422,7 @@ file by attaching a floppy disk. An example below, based on RHEL: } ``` -It's also worth noting that `ks=floppy` has been deprecated in the latest versions of Anaconda. Later versions of Linux may require a different syntax to source a kickstart file from a mounted floppy image. +It's also worth noting that `ks=floppy` has been deprecated. Later versions of the Anaconda installer (used in RHEL/CentOS 7 and Fedora) may require a different syntax to source a kickstart file from a mounted floppy image. ``` {.javascript} { From ed8627771458b7d35d88ba168fb3f90605086d5e Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Thu, 12 Nov 2015 18:06:10 -0500 Subject: [PATCH 917/956] Update Post-Processor Docs Update Post-Processor docs to clarify sequences in packer templates. --- .../templates/post-processors.html.markdown | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/website/source/docs/templates/post-processors.html.markdown b/website/source/docs/templates/post-processors.html.markdown index 2c71e6664..7fd7fa248 100644 --- a/website/source/docs/templates/post-processors.html.markdown +++ b/website/source/docs/templates/post-processors.html.markdown @@ -82,6 +82,8 @@ sequence definition. Sequence definitions are used to chain together multiple post-processors. An example is shown below, where the artifact of a build is compressed then uploaded, but the compressed result is not kept. +It is very important that any post processors that need to be ran in order, be sequenced! + ``` {.javascript} { "post-processors": [ @@ -96,6 +98,27 @@ compressed then uploaded, but the compressed result is not kept. As you may be able to imagine, the **simple** and **detailed** definitions are simply shortcuts for a **sequence** definition of only one element. +## Creating Vagrant Boxes in Atlas + +It is important to sequence post processors when creating and uploading vagrant boxes to Atlas via Packer. Using a sequence will ensure that the post processors are ran in order and creates the vagrant box prior to uploading the box to Atlas. + +``` {.javascript} +{ + "post-processors": [ + [ + { + "type": "vagrant", + "keep_input_artifact": false + }, + { + "type": "atlas", + ... + } + ] + ] +} +``` + ## Input Artifacts When using post-processors, the input artifact (coming from a builder or another From 912111ca428b398f5f4b970ef56602a6fe5b279b Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Thu, 12 Nov 2015 18:32:31 -0500 Subject: [PATCH 918/956] Add full example and link to Atlas Post Processor --- .../docs/templates/post-processors.html.markdown | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/website/source/docs/templates/post-processors.html.markdown b/website/source/docs/templates/post-processors.html.markdown index 7fd7fa248..c6926b5f1 100644 --- a/website/source/docs/templates/post-processors.html.markdown +++ b/website/source/docs/templates/post-processors.html.markdown @@ -112,13 +112,21 @@ It is important to sequence post processors when creating and uploading vagrant }, { "type": "atlas", - ... + "only": ["virtualbox-iso"], + "artifact": "dundlermifflin/dwight-schrute", + "artifact_type": "vagrant.box", + "metadata": { + "provider": "virtualbox", + "version": "0.0.1" + } } ] ] } ``` +More documentation on the Atlas post-processor can be found [here](/docs/post-processors/atlas.html) + ## Input Artifacts When using post-processors, the input artifact (coming from a builder or another From 18e875f3aa3ed090dbd2fcf930fbd6b019d34065 Mon Sep 17 00:00:00 2001 From: captainill Date: Mon, 9 Nov 2015 23:52:21 -0800 Subject: [PATCH 919/956] basic redesign structure while still being fluid --- .../source/assets/stylesheets/_columns.scss | 100 +++--- .../assets/stylesheets/_components.scss | 2 +- .../source/assets/stylesheets/_helpers.scss | 7 - website/source/assets/stylesheets/_nav.scss | 115 +++--- .../source/assets/stylesheets/_sidebar.scss | 2 +- .../assets/stylesheets/application.scss | 9 +- .../hashicorp-shared/_hashicorp-header.scss | 333 ++++++++++++++++++ .../hashicorp-shared/_hashicorp-sidebar.scss | 293 +++++++++++++++ .../hashicorp-shared/_hashicorp-utility.scss | 87 +++++ .../hashicorp-shared/_project-utility.scss | 71 ++++ website/source/layouts/inner.erb | 8 +- website/source/layouts/layout.erb | 48 ++- .../source/layouts/svg/_svg-by-hashicorp.erb | 18 + website/source/layouts/svg/_svg-download.erb | 4 + website/source/layouts/svg/_svg-github.erb | 9 + .../layouts/svg/_svg-hashicorp-logo.erb | 7 + 16 files changed, 983 insertions(+), 130 deletions(-) create mode 100755 website/source/assets/stylesheets/hashicorp-shared/_hashicorp-header.scss create mode 100644 website/source/assets/stylesheets/hashicorp-shared/_hashicorp-sidebar.scss create mode 100755 website/source/assets/stylesheets/hashicorp-shared/_hashicorp-utility.scss create mode 100755 website/source/assets/stylesheets/hashicorp-shared/_project-utility.scss create mode 100644 website/source/layouts/svg/_svg-by-hashicorp.erb create mode 100644 website/source/layouts/svg/_svg-download.erb create mode 100644 website/source/layouts/svg/_svg-github.erb create mode 100644 website/source/layouts/svg/_svg-hashicorp-logo.erb diff --git a/website/source/assets/stylesheets/_columns.scss b/website/source/assets/stylesheets/_columns.scss index 71f1a099e..17b9ce69e 100644 --- a/website/source/assets/stylesheets/_columns.scss +++ b/website/source/assets/stylesheets/_columns.scss @@ -1,50 +1,50 @@ -.container-xs-height { - display:table; - padding-left:0px; - padding-right:0px; -} - - -.col-xs-height { - display:table-cell; - float:none; -} - -@media (min-width: 768px) { - .container-sm-height { - display:table; - padding-left:0px; - padding-right:0px; - } - - .col-sm-height { - display:table-cell; - float:none; - } -} - -@media (min-width: 992px) { - .container-md-height { - display:table; - padding-left:0px; - padding-right:0px; - } - - .col-md-height { - display:table-cell; - float:none; - } -} - -@media (min-width: 1200px) { - .container-lg-height { - display:table; - padding-left:0px; - padding-right:0px; - } - - .col-lg-height { - display:table-cell; - float:none; - } -} +// .container-xs-height { +// display:table; +// padding-left:0px; +// padding-right:0px; +// } +// +// +// .col-xs-height { +// display:table-cell; +// float:none; +// } +// +// @media (min-width: 768px) { +// .container-sm-height { +// display:table; +// padding-left:0px; +// padding-right:0px; +// } +// +// .col-sm-height { +// display:table-cell; +// float:none; +// } +// } +// +// @media (min-width: 992px) { +// .container-md-height { +// display:table; +// padding-left:0px; +// padding-right:0px; +// } +// +// .col-md-height { +// display:table-cell; +// float:none; +// } +// } +// +// @media (min-width: 1200px) { +// .container-lg-height { +// display:table; +// padding-left:0px; +// padding-right:0px; +// } +// +// .col-lg-height { +// display:table-cell; +// float:none; +// } +// } diff --git a/website/source/assets/stylesheets/_components.scss b/website/source/assets/stylesheets/_components.scss index 72af67f90..f7cf9945d 100644 --- a/website/source/assets/stylesheets/_components.scss +++ b/website/source/assets/stylesheets/_components.scss @@ -167,11 +167,11 @@ header .header { .docs-wrapper { .docs-body { + max-width: 960px; @extend .white-background; .docs-content { padding: $docs-top-margin 80px; - max-width: 960px; display: block; code { diff --git a/website/source/assets/stylesheets/_helpers.scss b/website/source/assets/stylesheets/_helpers.scss index 8c20db3fc..9b9818a37 100644 --- a/website/source/assets/stylesheets/_helpers.scss +++ b/website/source/assets/stylesheets/_helpers.scss @@ -174,13 +174,6 @@ $break-lg: 980px; transform: scale($value); } -@mixin transition($type, $speed, $easing) { - -webkit-transition: $type $speed $easing; - -moz-transition: $type $speed $easing; - -o-transition: $type $speed $easing; - transition: $type $speed $easing; -} - @mixin rounded($radius) { -webkit-border-radius: $radius; -moz-border-radius: $radius; diff --git a/website/source/assets/stylesheets/_nav.scss b/website/source/assets/stylesheets/_nav.scss index 13acc3ff2..733a6c1ce 100644 --- a/website/source/assets/stylesheets/_nav.scss +++ b/website/source/assets/stylesheets/_nav.scss @@ -1,70 +1,87 @@ -nav { - height: $nav-height; - height: $nav-height; - padding: 0; - margin: 0; - min-width: 940px; - text-transform: uppercase; - color: $white; - font-family: $sans; - font-size: 16px; - border-bottom: 1px solid $border-dark; +// +// Header +// - Project Specific +// - edits should be made here +// -------------------------------------------------- - ul { - margin-top: ($baseline * 2); - margin-left: $nav-height; +#header { + background-color: $black; + .navbar-brand { + .logo{ + font-size: 20px; + text-transform: uppercase; + background: image-url('../images/logo-header.png') 0 0 no-repeat; + @include img-retina("../images/logo-header.png", "../images/logo-header@2x.png", $project-logo-width, $project-logo-height); + background-position: 0 45%; - li { - display: inline-block; - margin-right: 50px; + &:hover{ + opacity: .6; + } } - } - .packer-logo { - background: image-url('logo_nav.png') no-repeat center top; - height: 80px; - width: 80px; - background-size: 34px 50px; - text-indent: -999999px; - display: inline-block; - margin-top: 25px; - } - - @media (min-width: $screen-md-min) { - ul { - li { - &.featured { - margin: -20px 0 0 10px; - float: right; - - a { - @include button; - font-weight: bold; + .by-hashicorp{ + &:hover{ + svg{ + line{ + opacity: .4; } } } } } - @media (max-width: $screen-sm-max) { - height: auto; - padding: 0; - min-width: auto; + .buttons{ + margin-top: 2px; //baseline everything - ul { - margin: 1em 0 1em 80px; + .navigation-links{ + float: right; + } + ul.navbar-nav{ li { - display: block; - margin: 0; - padding: 1em; + svg path{ + fill: $white; + } } } } - @media (max-width: $screen-xs-max) { - button { + .main-links, + .external-links { + li > a { + @include project-a-style(); + } + } +} + +@media (max-width: 768px) { + #header { + .navbar-brand { } } } + +@media (max-width: 414px) { + #header { + .navbar-brand { + .logo{ + padding-left: 37px; + font-size: 18px; + @include img-retina("../images/logo-header.png", "../images/logo-header@2x.png", $project-logo-width * .75, $project-logo-height * .75); + //background-position: 0 45%; + } + } + } +} + + +@media (max-width: 320px) { + #header { + .navbar-brand { + .logo{ + font-size: 0 !important; //hide terraform text + } + } + } +} diff --git a/website/source/assets/stylesheets/_sidebar.scss b/website/source/assets/stylesheets/_sidebar.scss index 0814efdcf..95af1c7f1 100644 --- a/website/source/assets/stylesheets/_sidebar.scss +++ b/website/source/assets/stylesheets/_sidebar.scss @@ -1,4 +1,4 @@ -.sidebar { +#sidebar { $border: 1px solid $gray-dark; font-size: 16px; font-family: $mono; diff --git a/website/source/assets/stylesheets/application.scss b/website/source/assets/stylesheets/application.scss index 581c8c217..7b6e99ff3 100644 --- a/website/source/assets/stylesheets/application.scss +++ b/website/source/assets/stylesheets/application.scss @@ -1,7 +1,7 @@ @import "bootstrap-sprockets"; @import "bootstrap"; -@import url("//fonts.googleapis.com/css?family=Inconsolata"); +@import url("//fonts.googleapis.com/css?family=Inconsolata|Open+Sans:300,400,600"); @import "_helpers"; @import "_reset"; @@ -9,6 +9,13 @@ @import "_columns"; @import "_buttons"; @import "_styles"; + +// Hashicorp Shared Project Styles +@import 'hashicorp-shared/_project-utility'; +@import 'hashicorp-shared/_hashicorp-utility'; +@import 'hashicorp-shared/_hashicorp-header'; +@import 'hashicorp-shared/_hashicorp-sidebar'; + @import "_nav"; @import "_footer"; @import "_components"; diff --git a/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-header.scss b/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-header.scss new file mode 100755 index 000000000..b88467e8e --- /dev/null +++ b/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-header.scss @@ -0,0 +1,333 @@ +// +// Hashicorp header +// - Shared throughout projects +// - Edits should not be made here +// -------------------------------------------------- + +#header{ + position: relative; + margin-bottom: 0; +} + +.navigation { + color: black; + text-rendering: optimizeLegibility; + transition: all 1s ease; + + &.white{ + .navbar-brand { + .logo { + color: white; + } + } + + .main-links, + .external-links { + li > a { + &:hover{ + opacity: 1; + } + } + } + } + + &.black{ + .navbar-brand { + .logo { + color: black; + } + } + + .main-links, + .external-links { + li > a { + color: black; + } + } + } + + .navbar-toggle{ + height: $header-height; + margin: 0; + border-radius: 0; + .icon-bar{ + border: 1px solid $black; + border-radius: 0; + } + } + + .external-links { + &.white{ + svg path{ + fill: $white; + } + } + + li { + position: relative; + + svg path{ + @include transition( all 300ms ease-in ); + } + + &:hover{ + svg path{ + @include transition( all 300ms ease-in ); + } + } + + @include project-svg-external-links-style(); + + &.download{ + margin-right: 10px; + } + + > a { + padding-left: 12px !important; + svg{ + position: absolute; + left: -12px; + top: 50%; + margin-top: -7px; + width: 14px; + height: 14px; + } + } + } + } + + .main-links{ + margin-right: $nav-margin-right * 2; + } + + .main-links, + .external-links { + &.white{ + li > a { + color: white; + } + } + li > a { + @include hashi-a-style(); + margin: 0 10px; + padding-top: 1px; + line-height: $header-height; + @include project-a-style(); + } + } + + .nav > li > a:hover, .nav > li > a:focus { + background-color: transparent; + @include transition( all 300ms ease-in ); + } +} + +.navbar-brand { + display: block; + height: $header-height; + padding: 0; + margin: 0 10px 0 0; + + .logo{ + display: inline-block; + height: $header-height; + vertical-align:top; + padding: 0; + line-height: $header-height; + padding-left: $project-logo-width + $project-logo-pad-left; + background-position: 0 center; + @include transition(all 300ms ease-in); + + &:hover{ + @include transition(all 300ms ease-in); + text-decoration: none; + } + } +} + +.navbar-toggle{ + &.white{ + .icon-bar{ + border: 1px solid white; + } + } +} + +.by-hashicorp{ + display: inline-block; + vertical-align:top; + height: $header-height; + margin-left: 3px; + padding-top: 2px; + color: black; + line-height: $header-height; + font-family: $header-font-family; + font-weight: 600; + font-size: 0; + text-decoration: none; + + &.white{ + color: white; + font-weight: 300; + svg{ + path, + polygon{ + fill: white; + } + line{ + stroke: white; + } + } + + &:focus, + &:hover{ + text-decoration: none; + color: white; + } + } + + &:focus, + &:hover{ + text-decoration: none; + } + + .svg-wrap{ + font-size: 13px; + } + + svg{ + &.svg-by{ + width: $by-hashicorp-width; + height: $by-hashicorp-height; + margin-bottom: -4px; + margin-left: 4px; + } + + &.svg-logo{ + width: 16px; + height: 16px; + margin-bottom: -3px; + margin-left: 4px; + } + + path, + polygon{ + fill: black; + @include transition(all 300ms ease-in); + + &:hover{ + @include transition(all 300ms ease-in); + } + } + line{ + stroke: black; + @include transition(all 300ms ease-in); + + &:hover{ + @include transition(all 300ms ease-in); + } + } + } +} + +.hashicorp-project{ + display: inline-block; + height: 30px; + line-height: 30px; + text-decoration: none; + font-size: 14px; + color: $black; + font-weight: 600; + + &.white{ + color: white; + svg{ + path, + polygon{ + fill: white; + } + line{ + stroke: white; + } + } + } + + &:focus, + &:hover{ + text-decoration: none; + } + + span{ + margin-right: 4px; + font-family: $header-font-family; + font-weight: 500; + } + + span, + svg{ + display: inline-block; + } + + svg{ + &.svg-by{ + width: $by-hashicorp-width; + height: $by-hashicorp-height; + margin-bottom: -4px; + margin-left: -3px; + } + + &.svg-logo{ + width: 30px; + height: 30px; + margin-bottom: -10px; + margin-left: -1px; + } + + path, + line{ + fill: $black; + @include transition(all 300ms ease-in); + + &:hover{ + @include transition(all 300ms ease-in); + } + } + } +} + +@media (max-width: 480px) { + .navigation { + .main-links{ + margin-right: 0; + } + } +} + +@media (max-width: 414px) { + #header { + .navbar-toggle{ + padding-top: 10px; + height: $header-mobile-height; + } + + .navbar-brand { + height: $header-mobile-height; + + .logo{ + height: $header-mobile-height; + line-height: $header-mobile-height; + } + .by-hashicorp{ + height: $header-mobile-height; + line-height: $header-mobile-height; + padding-top: 0; + } + } + .main-links, + .external-links { + li > a { + line-height: $header-mobile-height; + } + } + } +} diff --git a/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-sidebar.scss b/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-sidebar.scss new file mode 100644 index 000000000..99f77f6c5 --- /dev/null +++ b/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-sidebar.scss @@ -0,0 +1,293 @@ +// +// Hashicorp Sidebar +// - Shared throughout projects +// - Edits should not be made here +// -------------------------------------------------- + +// Base variables +// -------------------------------------------------- +$screen-tablet: 768px; + +$gray-darker: #212121; // #212121 - text +$gray-secondary: #757575; // #757575 - secondary text, icons +$gray: #bdbdbd; // #bdbdbd - hint text +$gray-light: #e0e0e0; // #e0e0e0 - divider +$gray-lighter: #f5f5f5; // #f5f5f5 - background +$link-color: $gray-darker; +$link-bg: transparent; +$link-hover-color: $gray-lighter; +$link-hover-bg: $gray-lighter; +$link-active-color: $gray-darker; +$link-active-bg: $gray-light; +$link-disabled-color: $gray-light; +$link-disabled-bg: transparent; + +/* -- Sidebar style ------------------------------- */ + +// Sidebar variables +// -------------------------------------------------- +$zindex-sidebar-fixed: 1035; + +$sidebar-desktop-width: 280px; +$sidebar-width: 240px; + +$sidebar-padding: 16px; +$sidebar-divider: $sidebar-padding/2; + +$sidebar-icon-width: 40px; +$sidebar-icon-height: 20px; + +@mixin sidebar-nav-base { + text-align: center; + + &:last-child{ + border-bottom: none; + } + + li > a { + background-color: $link-bg; + } + li:hover > a { + background-color: $link-hover-bg; + } + li:focus > a, li > a:focus { + background-color: $link-bg; + } + + > .open > a { + &, + &:hover, + &:focus { + background-color: $link-hover-bg; + } + } + + > .active > a { + &, + &:hover, + &:focus { + background-color: $link-active-bg; + } + } + > .disabled > a { + &, + &:hover, + &:focus { + background-color: $link-disabled-bg; + } + } + + // Dropdown menu items + > .dropdown { + // Remove background color from open dropdown + > .dropdown-menu { + background-color: $link-hover-bg; + + > li > a { + &:focus { + background-color: $link-hover-bg; + } + &:hover { + background-color: $link-hover-bg; + } + } + + > .active > a { + &, + &:hover, + &:focus { + color: $link-active-color; + background-color: $link-active-bg; + } + } + } + } +} + +// +// Sidebar +// -------------------------------------------------- + +// Sidebar Elements +// +// Basic style of sidebar elements +.sidebar { + position: relative; + display: block; + min-height: 100%; + overflow-y: auto; + overflow-x: hidden; + border: none; + @include transition(all 0.5s cubic-bezier(0.55, 0, 0.1, 1)); + @include clearfix(); + background-color: $white; + + ul{ + padding-left: 0; + list-style-type: none; + } + + .sidebar-divider, .divider { + width: 80%; + height: 1px; + margin: 8px auto; + background-color: lighten($gray, 20%); + } + + // Sidebar heading + //---------------- + .sidebar-header { + position: relative; + margin-bottom: $sidebar-padding; + @include transition(all .2s ease-in-out); + } + + .sidebar-image { + padding-top: 24px; + img { + display: block; + margin: 0 auto; + } + } + + + // Sidebar icons + //---------------- + .sidebar-icon { + display: inline-block; + height: $sidebar-icon-height; + margin-right: $sidebar-divider; + text-align: left; + font-size: $sidebar-icon-height; + vertical-align: middle; + + &:before, &:after { + vertical-align: middle; + } + } + + .sidebar-nav { + margin: 0; + padding: 0; + + @include sidebar-nav-base(); + + // Links + //---------------- + li { + position: relative; + list-style-type: none; + text-align: center; + + a { + position: relative; + cursor: pointer; + user-select: none; + @include hashi-a-style-core(); + + svg{ + top: 2px; + width: 14px; + height: 14px; + margin-bottom: -2px; + margin-right: 4px; + } + } + } + } +} + +// Sidebar toggling +// +// Hide sidebar +.sidebar { + width: 0; + @include translate3d(-$sidebar-desktop-width, 0, 0); + + &.open { + min-width: $sidebar-desktop-width; + width: $sidebar-desktop-width; + @include translate3d(0, 0, 0); + } +} + +// Sidebar positions: fix the left/right sidebars +.sidebar-fixed-left, +.sidebar-fixed-right, +.sidebar-stacked { + position: fixed; + top: 0; + bottom: 0; + z-index: $zindex-sidebar-fixed; +} +.sidebar-stacked { + left: 0; +} +.sidebar-fixed-left { + left: 0; + box-shadow: 2px 0px 25px rgba(0,0,0,0.15); + -webkit-box-shadow: 2px 0px 25px rgba(0,0,0,0.15); +} +.sidebar-fixed-right { + right: 0; + box-shadow: 0px 2px 25px rgba(0,0,0,0.15); + -webkit-box-shadow: 0px 2px 25px rgba(0,0,0,0.15); + + @include translate3d($sidebar-desktop-width, 0, 0); + &.open { + @include translate3d(0, 0, 0); + } + .icon-material-sidebar-arrow:before { + content: "\e614"; // icon-material-arrow-forward + } +} + +// Sidebar size +// +// Change size of sidebar and sidebar elements on small screens +@media (max-width: $screen-tablet) { + .sidebar.open { + min-width: $sidebar-width; + width: $sidebar-width; + } + + .sidebar .sidebar-header { + //height: $sidebar-width * 9/16; // 16:9 header dimension + } + + .sidebar .sidebar-image { + /* img { + width: $sidebar-width/4 - $sidebar-padding; + height: $sidebar-width/4 - $sidebar-padding; + } */ + } +} + +.sidebar-overlay { + visibility: hidden; + position: fixed; + top: 0; + left: 0; + right: 0; + bottom: 0; + opacity: 0; + background: $white; + z-index: $zindex-sidebar-fixed - 1; + + -webkit-transition: visibility 0 linear .4s,opacity .4s cubic-bezier(.4,0,.2,1); + -moz-transition: visibility 0 linear .4s,opacity .4s cubic-bezier(.4,0,.2,1); + transition: visibility 0 linear .4s,opacity .4s cubic-bezier(.4,0,.2,1); + -webkit-transform: translateZ(0); + -moz-transform: translateZ(0); + -ms-transform: translateZ(0); + -o-transform: translateZ(0); + transform: translateZ(0); +} + +.sidebar-overlay.active { + opacity: 0.3; + visibility: visible; + -webkit-transition-delay: 0; + -moz-transition-delay: 0; + transition-delay: 0; +} diff --git a/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-utility.scss b/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-utility.scss new file mode 100755 index 000000000..de17e9815 --- /dev/null +++ b/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-utility.scss @@ -0,0 +1,87 @@ +// +// Hashicorp Nav (header/footer) Utiliy Vars and Mixins +// +// Notes: +// - Include this in Application.scss before header and feature-footer +// - Open Sans Google (Semibold - 600) font needs to be included if not already +// -------------------------------------------------- + +// Variables +$font-family-open-sans: 'Open Sans', 'Helvetica Neue', Helvetica, Arial, sans-serif; +$header-font-family: $font-family-open-sans; +$header-font-weight: 600; // semi-bold + +$header-height: 74px; +$header-mobile-height: 60px; +$by-hashicorp-width: 74px; +$by-hashicorp-height: 16px; +$nav-margin-right: 12px; + +// Mixins +@mixin hashi-a-style-core{ + font-family: $header-font-family; + font-weight: $header-font-weight; + font-size: 14px; + //letter-spacing: 0.0625em; +} + +@mixin hashi-a-style{ + margin: 0 15px; + padding: 0; + line-height: 22px; + @include hashi-a-style-core(); + @include transition( all 300ms ease-in ); + + &:hover{ + @include transition( all 300ms ease-in ); + background-color: transparent; + } +} + +//general shared project mixins +@mixin img-retina($image1x, $image, $width, $height) { + background-image: url($image1x); + background-size: $width $height; + background-repeat: no-repeat; + + @media (min--moz-device-pixel-ratio: 1.3), + (-o-min-device-pixel-ratio: 2.6/2), + (-webkit-min-device-pixel-ratio: 1.3), + (min-device-pixel-ratio: 1.3), + (min-resolution: 1.3dppx) { + /* on retina, use image that's scaled by 2 */ + background-image: url($image); + background-size: $width $height; + } +} + +// +// ------------------------- +@mixin anti-alias() { + text-rendering: optimizeLegibility; + -webkit-font-smoothing: antialiased; +} + +@mixin open-light() { + font-family: $font-family-open-sans; + font-weight: 300; +} + +@mixin open() { + font-family: $font-family-open-sans; + font-weight: 400; +} + +@mixin open-sb() { + font-family: $font-family-open-sans; + font-weight: 600; +} + +@mixin open-bold() { + font-family: $font-family-open-sans; + font-weight: 700; +} + +@mixin bez-1-transition{ + @include transition( all 300ms ease-in-out ); +} diff --git a/website/source/assets/stylesheets/hashicorp-shared/_project-utility.scss b/website/source/assets/stylesheets/hashicorp-shared/_project-utility.scss new file mode 100755 index 000000000..114a3da24 --- /dev/null +++ b/website/source/assets/stylesheets/hashicorp-shared/_project-utility.scss @@ -0,0 +1,71 @@ +// +// Mixins Specific to project +// - make edits to mixins here +// -------------------------------------------------- + +// Variables +$project-logo-width: 38px; +$project-logo-height: 40px; +$project-logo-pad-left: 8px; + +// Mixins +@mixin project-a-style{ + color: $white; + font-weight: 300; + opacity: .75; + + &:hover{ + color: $white; + opacity: 1; + } +} + +@mixin project-footer-a-style{ + color: $black; + font-weight: 400; + + &:hover{ + color: $green; + + svg path{ + fill: $green; + } + } +} + +@mixin project-footer-a-subpage-style{ + color: $white; + font-weight: 300; + + svg path{ + fill: $white; + } + + &:hover{ + color: $green; + + svg path{ + fill: $green; + } + } +} + +@mixin project-svg-external-links-style{ + svg path{ + fill: $black; + } + + &:hover{ + svg path{ + fill: $green; + } + } +} + +@mixin project-by-hashicorp-style{ + &:hover{ + line{ + stroke: $green; + } + } +} diff --git a/website/source/layouts/inner.erb b/website/source/layouts/inner.erb index 0706d1f9d..e45fe3cea 100644 --- a/website/source/layouts/inner.erb +++ b/website/source/layouts/inner.erb @@ -1,10 +1,10 @@ <% wrap_layout :layout do %> -
    -
    -