From a100e9393b9ae963f502c7dcab578cc0dabe4652 Mon Sep 17 00:00:00 2001 From: Colin Hebert Date: Sun, 11 Jan 2015 10:25:48 +1100 Subject: [PATCH 01/50] Add support for custom working directory for puppet --- provisioner/puppet-masterless/provisioner.go | 14 +++++++++++++- .../provisioners/puppet-masterless.html.markdown | 7 +++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index 307ecce38..d96397e41 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -41,6 +41,10 @@ type Config struct { // The directory where files will be uploaded. Packer requires write // permissions in this directory. StagingDir string `mapstructure:"staging_directory"` + + // The directory from which the command will be executed. + // Packer requires the directory to exist when running puppet. + WorkingDir string `mapstructure:"working_directory"` } type Provisioner struct { @@ -48,6 +52,7 @@ type Provisioner struct { } type ExecuteTemplate struct { + WorkingDir string FacterVars string HieraConfigPath string ModulePath string @@ -73,7 +78,8 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { // Set some defaults if p.config.ExecuteCommand == "" { - p.config.ExecuteCommand = "{{.FacterVars}} {{if .Sudo}} sudo -E {{end}}" + + p.config.ExecuteCommand = "cd {{.WorkingDir}} && " + + "{{.FacterVars}} {{if .Sudo}} sudo -E {{end}}" + "puppet apply --verbose --modulepath='{{.ModulePath}}' " + "{{if ne .HieraConfigPath \"\"}}--hiera_config='{{.HieraConfigPath}}' {{end}}" + "{{if ne .ManifestDir \"\"}}--manifestdir='{{.ManifestDir}}' {{end}}" + @@ -85,12 +91,17 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { p.config.StagingDir = "/tmp/packer-puppet-masterless" } + if p.config.WorkingDir == "" { + p.config.StagingDir = p.config.StagingDir + } + // Templates templates := map[string]*string{ "hiera_config_path": &p.config.HieraConfigPath, "manifest_file": &p.config.ManifestFile, "manifest_dir": &p.config.ManifestDir, "staging_dir": &p.config.StagingDir, + "working_dir": &p.config.WorkingDir, } for n, ptr := range templates { @@ -256,6 +267,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { // Execute Puppet command, err := p.config.tpl.Process(p.config.ExecuteCommand, &ExecuteTemplate{ + WorkingDir: p.config.WorkingDir, FacterVars: strings.Join(facterVars, " "), HieraConfigPath: remoteHieraConfigPath, ManifestDir: remoteManifestDir, diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index 953dca6ea..4ed566bc6 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -79,12 +79,18 @@ Optional parameters: this folder. If the permissions are not correct, use a shell provisioner prior to this to configure it properly. +* `working_directory` (string) - This is the directory from which the puppet command + will be run. When using hiera with a relative path, this option allows to ensure + that he paths are working properly. If not specified, defaults to the value of + specified `staging_directory` (or its default value if not specified either). + ## Execute Command By default, Packer uses the following command (broken across multiple lines for readability) to execute Puppet: ```liquid +cd {{.WorkingDir}} && \ {{.FacterVars}}{{if .Sudo}} sudo -E {{end}}puppet apply \ --verbose \ --modulepath='{{.ModulePath}}' \ @@ -98,6 +104,7 @@ This command can be customized using the `execute_command` configuration. As you can see from the default value above, the value of this configuration can contain various template variables, defined below: +* `WorkingDir` - The path from which Puppet will be executed. * `FacterVars` - Shell-friendly string of environmental variables used to set custom facts configured for this provisioner. * `HieraConfigPath` - The path to a hiera configuration file. From b7fccec91c930d67b11b18d5cf6b3afece228f3a Mon Sep 17 00:00:00 2001 From: Colin Hebert Date: Sun, 11 Jan 2015 10:29:01 +1100 Subject: [PATCH 02/50] Set the working dir to staging dir --- provisioner/puppet-masterless/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index d96397e41..6bbfc5c1f 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -92,7 +92,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } if p.config.WorkingDir == "" { - p.config.StagingDir = p.config.StagingDir + p.config.WorkingDir = p.config.StagingDir } // Templates From 8c87b1cc00618632ef1a80e4d349e2918bf92c8b Mon Sep 17 00:00:00 2001 From: "Lesko, Matthew (NIH/NLM/NCBI) [C]" Date: Tue, 13 Jan 2015 12:20:31 -0500 Subject: [PATCH 03/50] First attempt for re-using a named EC2 keypair Adds a 'ssh_keypair_name' option to the configuration for AWS, along with some munging to create the temporarily keypair if one isn't specific. NOT YET WORKING. From a 'make' I get the following errors: builder/amazon/ebs/builder.go:94: b.config.SSHKeyPairName undefined (type config has no field or method SSHKeyPairName) builder/amazon/instance/builder.go:199: b.config.SSHKeyPairName undefined (type Config has no field or method SSHKeyPairName) --- builder/amazon/common/run_config.go | 7 +++++-- builder/amazon/common/run_config_test.go | 4 ++-- builder/amazon/common/step_key_pair.go | 22 ++++++++++++---------- builder/amazon/ebs/builder.go | 2 +- builder/amazon/instance/builder.go | 2 +- 5 files changed, 21 insertions(+), 16 deletions(-) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index a71387623..f6e859c03 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -24,6 +24,7 @@ type RunConfig struct { RawSSHTimeout string `mapstructure:"ssh_timeout"` SSHUsername string `mapstructure:"ssh_username"` SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file"` + SSHKeyPairName string `mapstructure:"ssh_keypair_name"` SSHPrivateIp bool `mapstructure:"ssh_private_ip"` SSHPort int `mapstructure:"ssh_port"` SecurityGroupId string `mapstructure:"security_group_id"` @@ -55,6 +56,7 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { "ssh_timeout": &c.RawSSHTimeout, "ssh_username": &c.SSHUsername, "ssh_private_key_file": &c.SSHPrivateKeyFile, + "ssh_keypair_name": &c.SSHKeyPairName, "source_ami": &c.SourceAmi, "subnet_id": &c.SubnetId, "temporary_key_pair_name": &c.TemporaryKeyPairName, @@ -84,8 +86,9 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { c.RawSSHTimeout = "5m" } - if c.TemporaryKeyPairName == "" { - c.TemporaryKeyPairName = fmt.Sprintf( + // if we are not given an explicit keypairname, create a temporary one + if c.SSHKeyPairName == "" { + c.SSHKeyPairName = fmt.Sprintf( "packer %s", uuid.TimeOrderedUUID()) } diff --git a/builder/amazon/common/run_config_test.go b/builder/amazon/common/run_config_test.go index 8e9c4b6b9..c4e1fa110 100644 --- a/builder/amazon/common/run_config_test.go +++ b/builder/amazon/common/run_config_test.go @@ -142,12 +142,12 @@ func TestRunConfigPrepare_UserDataFile(t *testing.T) { func TestRunConfigPrepare_TemporaryKeyPairName(t *testing.T) { c := testConfig() - c.TemporaryKeyPairName = "" + c.SSHKeyPairName = "" if err := c.Prepare(nil); len(err) != 0 { t.Fatalf("err: %s", err) } - if c.TemporaryKeyPairName == "" { + if c.SSHKeyPairName == "" { t.Fatal("keypair empty") } } diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index 3a7eb9f35..db60e1e40 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -21,7 +21,7 @@ type StepKeyPair struct { func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { if s.PrivateKeyFile != "" { - s.keyName = "" + s.keyName = s.KeyPairName // need to get from config privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile) if err != nil { @@ -29,7 +29,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - state.Put("keyPair", "") + state.Put("keyPair", s.keyName) state.Put("privateKey", string(privateKeyBytes)) return multistep.ActionContinue @@ -83,17 +83,19 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { func (s *StepKeyPair) Cleanup(state multistep.StateBag) { // If no key name is set, then we never created it, so just return - if s.keyName == "" { + // If we used an SSH private key file, do not go about deleting + // keypairs + if s.PrivateKeyFile != "" { return } - ec2conn := state.Get("ec2").(*ec2.EC2) + //ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) - ui.Say("Deleting temporary keypair...") - _, err := ec2conn.DeleteKeyPair(s.keyName) - if err != nil { - ui.Error(fmt.Sprintf( - "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) - } + ui.Say("DANGER: Deleting temporary keypair (not really)...") + //_, err := ec2conn.DeleteKeyPair(s.keyName) + //if err != nil { + //ui.Error(fmt.Sprintf( + //"Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) + //} } diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 889cc7b60..083507993 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -91,7 +91,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &awscommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.TemporaryKeyPairName, + KeyPairName: b.config.SSHKeyPairName, PrivateKeyFile: b.config.SSHPrivateKeyFile, }, &awscommon.StepSecurityGroup{ diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 1f5c1d9c8..ce582e039 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -196,7 +196,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &awscommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.TemporaryKeyPairName, + KeyPairName: b.config.SSHKeyPairName, PrivateKeyFile: b.config.SSHPrivateKeyFile, }, &awscommon.StepSecurityGroup{ From 9d097f9d4ef331adac5f322d14ca185e879331a0 Mon Sep 17 00:00:00 2001 From: "Lesko, Matthew (NIH/NLM/NCBI) [C]" Date: Tue, 13 Jan 2015 16:27:33 -0500 Subject: [PATCH 04/50] Permit Temp keys and named SSH keypairs These changes permit the use of pre-created SSH keypairs with AWS. If so, the configuration for the builder needs to include an ssh_keypair_name option and a ssh_private_key_file. If ssh_private_key_file is *not* defined, it'll go through the rigamarole of creating a temporary keypair. The ssh_keypair_name option by itself won't make that change, because it doesn't make sense to specify a keypair but not tell packer where the private key is, but it does happen that you could have a private key and the public-key is "baked in", and not part of your EC2 account. --- builder/amazon/common/run_config.go | 2 +- builder/amazon/common/run_config_test.go | 4 ++-- builder/amazon/common/step_key_pair.go | 25 +++++++++++++----------- builder/amazon/ebs/builder.go | 9 +++++---- builder/amazon/instance/builder.go | 9 +++++---- 5 files changed, 27 insertions(+), 22 deletions(-) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index f6e859c03..67ec74d79 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -88,7 +88,7 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { // if we are not given an explicit keypairname, create a temporary one if c.SSHKeyPairName == "" { - c.SSHKeyPairName = fmt.Sprintf( + c.TemporaryKeyPairName = fmt.Sprintf( "packer %s", uuid.TimeOrderedUUID()) } diff --git a/builder/amazon/common/run_config_test.go b/builder/amazon/common/run_config_test.go index c4e1fa110..8e9c4b6b9 100644 --- a/builder/amazon/common/run_config_test.go +++ b/builder/amazon/common/run_config_test.go @@ -142,12 +142,12 @@ func TestRunConfigPrepare_UserDataFile(t *testing.T) { func TestRunConfigPrepare_TemporaryKeyPairName(t *testing.T) { c := testConfig() - c.SSHKeyPairName = "" + c.TemporaryKeyPairName = "" if err := c.Prepare(nil); len(err) != 0 { t.Fatalf("err: %s", err) } - if c.SSHKeyPairName == "" { + if c.TemporaryKeyPairName == "" { t.Fatal("keypair empty") } } diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index db60e1e40..f6e6a0555 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -13,6 +13,7 @@ import ( type StepKeyPair struct { Debug bool DebugKeyPath string + TemporaryKeyPairName string KeyPairName string PrivateKeyFile string @@ -21,7 +22,9 @@ type StepKeyPair struct { func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { if s.PrivateKeyFile != "" { - s.keyName = s.KeyPairName // need to get from config + if s.KeyPairName != "" { + s.keyName = s.KeyPairName // need to get from config + } privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile) if err != nil { @@ -38,15 +41,15 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) - ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.KeyPairName)) - keyResp, err := ec2conn.CreateKeyPair(s.KeyPairName) + ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.TemporaryKeyPairName)) + keyResp, err := ec2conn.CreateKeyPair(s.TemporaryKeyPairName) if err != nil { state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err)) return multistep.ActionHalt } // Set the keyname so we know to delete it later - s.keyName = s.KeyPairName + s.keyName = s.TemporaryKeyPairName // Set some state data for use in future steps state.Put("keyPair", s.keyName) @@ -89,13 +92,13 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) { return } - //ec2conn := state.Get("ec2").(*ec2.EC2) + ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) - ui.Say("DANGER: Deleting temporary keypair (not really)...") - //_, err := ec2conn.DeleteKeyPair(s.keyName) - //if err != nil { - //ui.Error(fmt.Sprintf( - //"Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) - //} + ui.Say("DANGER: Deleting temporary keypair...") + _, err := ec2conn.DeleteKeyPair(s.keyName) + if err != nil { + ui.Error(fmt.Sprintf( + "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) + } } diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 083507993..95e7ea016 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -89,10 +89,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe EnhancedNetworking: b.config.AMIEnhancedNetworking, }, &awscommon.StepKeyPair{ - Debug: b.config.PackerDebug, - DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.SSHKeyPairName, - PrivateKeyFile: b.config.SSHPrivateKeyFile, + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), + TemporaryKeyPairName: b.config.TemporaryKeyPairName, + KeyPairName: b.config.SSHKeyPairName, + PrivateKeyFile: b.config.SSHPrivateKeyFile, }, &awscommon.StepSecurityGroup{ SecurityGroupIds: b.config.SecurityGroupIds, diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index ce582e039..b677f4da9 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -194,10 +194,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe EnhancedNetworking: b.config.AMIEnhancedNetworking, }, &awscommon.StepKeyPair{ - Debug: b.config.PackerDebug, - DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.SSHKeyPairName, - PrivateKeyFile: b.config.SSHPrivateKeyFile, + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), + TemporaryKeyPairName: b.config.TemporaryKeyPairName, + KeyPairName: b.config.SSHKeyPairName, + PrivateKeyFile: b.config.SSHPrivateKeyFile, }, &awscommon.StepSecurityGroup{ SecurityGroupIds: b.config.SecurityGroupIds, From 62e054c404e7e622203a5aadb9e062c77c663d87 Mon Sep 17 00:00:00 2001 From: "Lesko, Matthew (NIH/NLM/NCBI) [C]" Date: Tue, 13 Jan 2015 21:40:15 -0500 Subject: [PATCH 05/50] simplify output --- builder/amazon/common/step_key_pair.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index f6e6a0555..a4baf8e0b 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -95,7 +95,7 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) { ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) - ui.Say("DANGER: Deleting temporary keypair...") + ui.Say("Deleting temporary keypair...") _, err := ec2conn.DeleteKeyPair(s.keyName) if err != nil { ui.Error(fmt.Sprintf( From 43f08b2664d9736a71152e0bd0df397f994e6afe Mon Sep 17 00:00:00 2001 From: "Lesko, Matthew (NIH/NLM/NCBI) [C]" Date: Tue, 13 Jan 2015 22:58:41 -0500 Subject: [PATCH 06/50] go fmt all the things --- builder/amazon/common/run_config.go | 2 +- builder/amazon/common/step_key_pair.go | 24 ++++++++++++------------ builder/amazon/ebs/builder.go | 2 +- builder/amazon/instance/builder.go | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index 67ec74d79..d4ebec0e1 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -87,7 +87,7 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error { } // if we are not given an explicit keypairname, create a temporary one - if c.SSHKeyPairName == "" { + if c.SSHKeyPairName == "" { c.TemporaryKeyPairName = fmt.Sprintf( "packer %s", uuid.TimeOrderedUUID()) } diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index a4baf8e0b..3f40e3d77 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -11,20 +11,20 @@ import ( ) type StepKeyPair struct { - Debug bool - DebugKeyPath string - TemporaryKeyPairName string - KeyPairName string - PrivateKeyFile string + Debug bool + DebugKeyPath string + TemporaryKeyPairName string + KeyPairName string + PrivateKeyFile string keyName string } func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { if s.PrivateKeyFile != "" { - if s.KeyPairName != "" { - s.keyName = s.KeyPairName // need to get from config - } + if s.KeyPairName != "" { + s.keyName = s.KeyPairName // need to get from config + } privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile) if err != nil { @@ -86,8 +86,8 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { func (s *StepKeyPair) Cleanup(state multistep.StateBag) { // If no key name is set, then we never created it, so just return - // If we used an SSH private key file, do not go about deleting - // keypairs + // If we used an SSH private key file, do not go about deleting + // keypairs if s.PrivateKeyFile != "" { return } @@ -98,7 +98,7 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) { ui.Say("Deleting temporary keypair...") _, err := ec2conn.DeleteKeyPair(s.keyName) if err != nil { - ui.Error(fmt.Sprintf( - "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) + ui.Error(fmt.Sprintf( + "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) } } diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 95e7ea016..0c2258ad6 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -91,7 +91,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &awscommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - TemporaryKeyPairName: b.config.TemporaryKeyPairName, + TemporaryKeyPairName: b.config.TemporaryKeyPairName, KeyPairName: b.config.SSHKeyPairName, PrivateKeyFile: b.config.SSHPrivateKeyFile, }, diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index b677f4da9..538e9efb7 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -196,7 +196,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &awscommon.StepKeyPair{ Debug: b.config.PackerDebug, DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - TemporaryKeyPairName: b.config.TemporaryKeyPairName, + TemporaryKeyPairName: b.config.TemporaryKeyPairName, KeyPairName: b.config.SSHKeyPairName, PrivateKeyFile: b.config.SSHPrivateKeyFile, }, From d1445bc6fe331eb20270f771795b621ed6ea9059 Mon Sep 17 00:00:00 2001 From: David Danzilio Date: Tue, 27 Jan 2015 14:11:08 -0500 Subject: [PATCH 07/50] Make PackerBuildName and PackerBuilderType available as Facts during a masterless run similar to the way we do with the Shell provisioner. --- provisioner/puppet-masterless/provisioner.go | 2 ++ .../provisioners/puppet-masterless.html.markdown | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index 307ecce38..c1085e2f4 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -147,6 +147,8 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { newFacts[k] = v } + newFacts["packer_build_name"] = p.config.PackerBuildName + newFacts["packer_builder_type"] = p.config.PackerBuilderType p.config.Facter = newFacts // Validation diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index bc65ae812..d0d8e1c95 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -106,3 +106,17 @@ can contain various template variables, defined below: * `ModulePath` - The paths to the module directories. * `Sudo` - A boolean of whether to `sudo` the command or not, depending on the value of the `prevent_sudo` configuration. + +## Default Facts + +In addition to being able to specify custom Facter facts using the `facter` +configuration, the provisioner automatically defines certain commonly useful +facts: + +* `packer_build_name` is set to the name of the build that Packer is running. + This is most useful when Packer is making multiple builds and you want to + distinguish them in your Hiera hierarchy. + +* `packer_builder_type` is the type of the builder that was used to create the + machine that Puppet is running on. This is useful if you want to run only + certain parts of your Puppet code on systems built with certain builders. From 8404f6ce860c6dbba8d9889f4769b5cdfd384635 Mon Sep 17 00:00:00 2001 From: David Danzilio Date: Tue, 24 Feb 2015 22:52:09 -0500 Subject: [PATCH 08/50] Taking a stab at a test for the facter facts --- .../puppet-masterless/provisioner_test.go | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/provisioner/puppet-masterless/provisioner_test.go b/provisioner/puppet-masterless/provisioner_test.go index 0d5576b6b..7ed4a59cd 100644 --- a/provisioner/puppet-masterless/provisioner_test.go +++ b/provisioner/puppet-masterless/provisioner_test.go @@ -133,3 +133,47 @@ func TestProvisionerPrepare_modulePaths(t *testing.T) { t.Fatalf("err: %s", err) } } + +func TestProvisionerPrepare_facterFacts(t *testing.T) { + config := testConfig() + + delete(config, "facter") + p := new(Provisioner) + err := p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test with malformed fact + config["facter"] = "fact=stringified" + p = new(Provisioner) + err = p.Prepare(config) + if err == nil { + t.Fatal("should be an error") + } + + // Test with a good one + td, err := ioutil.TempDir("", "packer") + if err != nil { + t.Fatalf("error: %s", err) + } + defer os.RemoveAll(td) + + facts := make(map[string]string) + facts["fact_name"] = "fact_value" + config["facter"] = facts + + p = new(Provisioner) + err = p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Make sure the default facts are present + delete(config, "facter") + p = new(Provisioner) + err = p.Prepare(config) + if p.config.Facter == nil { + t.Fatalf("err: Default facts are not set in the Puppet provisioner!") + } +} From 7e3d172581aff9d5dc967019ef4901b5d5572642 Mon Sep 17 00:00:00 2001 From: David Danzilio Date: Tue, 24 Feb 2015 22:56:37 -0500 Subject: [PATCH 09/50] Fixing spacing on line 162 and 164 of provisioner/puppet-masterless/provisioner_test.go --- provisioner/puppet-masterless/provisioner_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/provisioner/puppet-masterless/provisioner_test.go b/provisioner/puppet-masterless/provisioner_test.go index 7ed4a59cd..42ddd9d7a 100644 --- a/provisioner/puppet-masterless/provisioner_test.go +++ b/provisioner/puppet-masterless/provisioner_test.go @@ -159,9 +159,9 @@ func TestProvisionerPrepare_facterFacts(t *testing.T) { } defer os.RemoveAll(td) - facts := make(map[string]string) + facts := make(map[string]string) facts["fact_name"] = "fact_value" - config["facter"] = facts + config["facter"] = facts p = new(Provisioner) err = p.Prepare(config) @@ -169,7 +169,7 @@ func TestProvisionerPrepare_facterFacts(t *testing.T) { t.Fatalf("err: %s", err) } - // Make sure the default facts are present + // Make sure the default facts are present delete(config, "facter") p = new(Provisioner) err = p.Prepare(config) From 34e34d1f1825c6a1657ddedf4f9304f81df0c52c Mon Sep 17 00:00:00 2001 From: Colin Hebert Date: Sat, 28 Feb 2015 23:02:49 +1100 Subject: [PATCH 10/50] Fix typo --- .../source/docs/provisioners/puppet-masterless.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index 4ed566bc6..bd239ebe0 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -81,7 +81,7 @@ Optional parameters: * `working_directory` (string) - This is the directory from which the puppet command will be run. When using hiera with a relative path, this option allows to ensure - that he paths are working properly. If not specified, defaults to the value of + that the paths are working properly. If not specified, defaults to the value of specified `staging_directory` (or its default value if not specified either). ## Execute Command From 5c06af872dfef440926f8941328ee5e6b5ce7a0f Mon Sep 17 00:00:00 2001 From: Ameir Abdeldayem Date: Thu, 9 Apr 2015 02:15:16 -0400 Subject: [PATCH 11/50] Support chef-client 'client_key' and default to /client.pem. --- provisioner/chef-client/provisioner.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index b3d91b3e4..a4eac96fc 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -35,6 +35,7 @@ type Config struct { SkipCleanNode bool `mapstructure:"skip_clean_node"` SkipInstall bool `mapstructure:"skip_install"` StagingDir string `mapstructure:"staging_directory"` + ClientKey string `mapstructure:"client_key"` ValidationKeyPath string `mapstructure:"validation_key_path"` ValidationClientName string `mapstructure:"validation_client_name"` @@ -48,6 +49,7 @@ type Provisioner struct { type ConfigTemplate struct { NodeName string ServerUrl string + ClientKey string ValidationKeyPath string ValidationClientName string ChefEnvironment string @@ -88,6 +90,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { "chef_server_url": &p.config.ServerUrl, "execute_command": &p.config.ExecuteCommand, "install_command": &p.config.InstallCommand, + "client_key": &p.config.ClientKey, "validation_key_path": &p.config.ValidationKeyPath, "validation_client_name": &p.config.ValidationClientName, } @@ -209,6 +212,10 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return fmt.Errorf("Error creating staging directory: %s", err) } + if p.config.ClientKey == "" { + p.config.ClientKey = fmt.Sprintf("%s/client.pem", p.config.StagingDir) + } + if p.config.ValidationKeyPath != "" { remoteValidationKeyPath = fmt.Sprintf("%s/validation.pem", p.config.StagingDir) if err := p.copyValidationKey(ui, comm, remoteValidationKeyPath); err != nil { @@ -217,7 +224,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } configPath, err := p.createConfig( - ui, comm, nodeName, serverUrl, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment, p.config.SslVerifyMode) + ui, comm, nodeName, serverUrl, p.config.ClientKey, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment, p.config.SslVerifyMode) if err != nil { return fmt.Errorf("Error creating Chef config file: %s", err) } @@ -271,7 +278,7 @@ func (p *Provisioner) uploadDirectory(ui packer.Ui, comm packer.Communicator, ds return comm.UploadDir(dst, src, nil) } -func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, remoteKeyPath string, validationClientName string, chefEnvironment string, sslVerifyMode string) (string, error) { +func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, clientKey string, remoteKeyPath string, validationClientName string, chefEnvironment string, sslVerifyMode string) (string, error) { ui.Message("Creating configuration file 'client.rb'") // Read the template @@ -294,6 +301,7 @@ func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeN configString, err := p.config.tpl.Process(tpl, &ConfigTemplate{ NodeName: nodeName, ServerUrl: serverUrl, + ClientKey: clientKey, ValidationKeyPath: remoteKeyPath, ValidationClientName: validationClientName, ChefEnvironment: chefEnvironment, @@ -566,6 +574,7 @@ var DefaultConfigTemplate = ` log_level :info log_location STDOUT chef_server_url "{{.ServerUrl}}" +client_key "{{.ClientKey}}" {{if ne .ValidationClientName ""}} validation_client_name "{{.ValidationClientName}}" {{else}} From 4735ab004a32facd41ee8bff17e27ea8b2e74ac9 Mon Sep 17 00:00:00 2001 From: Ameir Abdeldayem Date: Thu, 9 Apr 2015 02:19:52 -0400 Subject: [PATCH 12/50] Add docs for `client_key` option of `chef-client` provisioner. --- website/source/docs/provisioners/chef-client.html.markdown | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/provisioners/chef-client.html.markdown b/website/source/docs/provisioners/chef-client.html.markdown index a2e2f6f5a..eaeadbf45 100644 --- a/website/source/docs/provisioners/chef-client.html.markdown +++ b/website/source/docs/provisioners/chef-client.html.markdown @@ -88,6 +88,9 @@ configuration is actually required. this folder. If the permissions are not correct, use a shell provisioner prior to this to configure it properly. +* `client_key` (string) - Path to client key. If not set, this defaults to a file + named client.pem in `staging_directory`. + * `validation_client_name` (string) - Name of the validation client. If not set, this won't be set in the configuration and the default that Chef uses will be used. From 64fd3a3302c04868065edd5a874010a53e28f7a2 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 12 Jun 2015 17:24:03 -0700 Subject: [PATCH 13/50] Added file builder as a cheap, fast way to build something with output for testing post-processors --- builder/file/artifact.go | 36 ++++++++++++++++++++++++ builder/file/artifact_test.go | 11 ++++++++ builder/file/builder.go | 53 +++++++++++++++++++++++++++++++++++ builder/file/builder_test.go | 11 ++++++++ builder/file/config.go | 48 +++++++++++++++++++++++++++++++ builder/file/config_test.go | 35 +++++++++++++++++++++++ 6 files changed, 194 insertions(+) create mode 100644 builder/file/artifact.go create mode 100644 builder/file/artifact_test.go create mode 100644 builder/file/builder.go create mode 100644 builder/file/builder_test.go create mode 100644 builder/file/config.go create mode 100644 builder/file/config_test.go diff --git a/builder/file/artifact.go b/builder/file/artifact.go new file mode 100644 index 000000000..35bf06e6c --- /dev/null +++ b/builder/file/artifact.go @@ -0,0 +1,36 @@ +package file + +import ( + "fmt" + "log" + "os" +) + +type FileArtifact struct { + filename string +} + +func (*FileArtifact) BuilderId() string { + return BuilderId +} + +func (a *FileArtifact) Files() []string { + return []string{a.filename} +} + +func (a *FileArtifact) Id() string { + return "File" +} + +func (a *FileArtifact) String() string { + return fmt.Sprintf("Stored file: %s", a.filename) +} + +func (a *FileArtifact) State(name string) interface{} { + return nil +} + +func (a *FileArtifact) Destroy() error { + log.Printf("Deleting %s", a.filename) + return os.Remove(a.filename) +} diff --git a/builder/file/artifact_test.go b/builder/file/artifact_test.go new file mode 100644 index 000000000..0aa77894b --- /dev/null +++ b/builder/file/artifact_test.go @@ -0,0 +1,11 @@ +package file + +import ( + "testing" + + "github.com/mitchellh/packer/packer" +) + +func TestNullArtifact(t *testing.T) { + var _ packer.Artifact = new(FileArtifact) +} diff --git a/builder/file/builder.go b/builder/file/builder.go new file mode 100644 index 000000000..89047ab75 --- /dev/null +++ b/builder/file/builder.go @@ -0,0 +1,53 @@ +package file + +/* +The File builder creates an artifact from a file. Because it does not require +any virutalization or network resources, it's very fast and useful for testing. +*/ + +import ( + "io/ioutil" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" +) + +const BuilderId = "cbednarski.file" + +type Builder struct { + config *Config + runner multistep.Runner +} + +// Prepare is responsible for configuring the builder and validating +// that configuration. Any setup should be done in this method. Note that +// NO side effects should take place in prepare, it is meant as a state +// setup only. Calling Prepare is not necessarilly followed by a Run. +// +// The parameters to Prepare are a set of interface{} values of the +// configuration. These are almost always `map[string]interface{}` +// parsed from a template, but no guarantee is made. +// +// Each of the configuration values should merge into the final +// configuration. +// +// Prepare should return a list of warnings along with any errors +// that occured while preparing. +func (b *Builder) Prepare(...interface{}) ([]string, error) { + return nil, nil +} + +// Run is where the actual build should take place. It takes a Build and a Ui. +func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { + artifact := new(FileArtifact) + + ioutil.WriteFile(b.config.Filename, []byte(b.config.Contents), 0600) + + return artifact, nil +} + +// Cancel cancels a possibly running Builder. This should block until +// the builder actually cancels and cleans up after itself. +func (b *Builder) Cancel() { + b.runner.Cancel() +} diff --git a/builder/file/builder_test.go b/builder/file/builder_test.go new file mode 100644 index 000000000..63d36a0a5 --- /dev/null +++ b/builder/file/builder_test.go @@ -0,0 +1,11 @@ +package file + +import ( + "testing" + + "github.com/mitchellh/packer/packer" +) + +func TestBuilder_implBuilder(t *testing.T) { + var _ packer.Builder = new(Builder) +} diff --git a/builder/file/config.go b/builder/file/config.go new file mode 100644 index 000000000..534428ca4 --- /dev/null +++ b/builder/file/config.go @@ -0,0 +1,48 @@ +package file + +import ( + "fmt" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" +) + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + + Filename string `mapstructure:"filename"` + Contents string `mapstructure:"contents"` +} + +func NewConfig(raws ...interface{}) (*Config, []string, error) { + c := new(Config) + warnings := []string{} + + err := config.Decode(c, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) + if err != nil { + return nil, warnings, err + } + + var errs *packer.MultiError + + if c.Filename == "" { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("filename is required")) + } + + if c.Contents == "" { + warnings = append(warnings, "contents is empty") + } + + if errs != nil && len(errs.Errors) > 0 { + return nil, warnings, errs + } + + return c, warnings, nil +} diff --git a/builder/file/config_test.go b/builder/file/config_test.go new file mode 100644 index 000000000..061bb97e5 --- /dev/null +++ b/builder/file/config_test.go @@ -0,0 +1,35 @@ +package file + +import ( + "fmt" + "testing" +) + +func testConfig() map[string]interface{} { + return map[string]interface{}{ + "filename": "test.txt", + "contents": "Hello, world!", + } +} + +func TestNoFilename(t *testing.T) { + raw := testConfig() + + delete(raw, "filename") + _, _, errs := NewConfig(raw) + if errs == nil { + t.Error("Expected config to error without a filename") + } +} + +func TestNoContent(t *testing.T) { + raw := testConfig() + + delete(raw, "contents") + _, warns, _ := NewConfig(raw) + fmt.Println(len(warns)) + fmt.Printf("%#v\n", warns) + if len(warns) == 0 { + t.Error("Expected config to warn without any content") + } +} From 766d217ed71f511b12a30e13efa829bcf3b05b23 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 12 Jun 2015 17:34:46 -0700 Subject: [PATCH 14/50] Pull config into the builder --- builder/file/builder.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/builder/file/builder.go b/builder/file/builder.go index 89047ab75..3b00aae60 100644 --- a/builder/file/builder.go +++ b/builder/file/builder.go @@ -33,8 +33,14 @@ type Builder struct { // // Prepare should return a list of warnings along with any errors // that occured while preparing. -func (b *Builder) Prepare(...interface{}) ([]string, error) { - return nil, nil +func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { + c, warnings, errs := NewConfig(raws...) + if errs != nil { + return warnings, errs + } + b.config = c + + return warnings, nil } // Run is where the actual build should take place. It takes a Build and a Ui. From f7d85eb49cf169f1dfdf66d20320032ff370fff6 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 12 Jun 2015 17:35:17 -0700 Subject: [PATCH 15/50] Add main() for file builder --- plugin/builder-file/main.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 plugin/builder-file/main.go diff --git a/plugin/builder-file/main.go b/plugin/builder-file/main.go new file mode 100644 index 000000000..54bc4f437 --- /dev/null +++ b/plugin/builder-file/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "github.com/mitchellh/packer/builder/file" + "github.com/mitchellh/packer/packer/plugin" +) + +func main() { + server, err := plugin.Server() + if err != nil { + panic(err) + } + server.RegisterBuilder(new(file.Builder)) + server.Serve() +} From e60b22d48f8b557c620ff083a55263acedc19d55 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Fri, 12 Jun 2015 18:18:38 -0700 Subject: [PATCH 16/50] Changed file builder to support content or source file operation --- builder/file/builder.go | 33 ++++++++++++++++++++++++++++++++- builder/file/config.go | 20 ++++++++++++++------ builder/file/config_test.go | 22 +++++++++++++++++----- 3 files changed, 63 insertions(+), 12 deletions(-) diff --git a/builder/file/builder.go b/builder/file/builder.go index 3b00aae60..ea3206dad 100644 --- a/builder/file/builder.go +++ b/builder/file/builder.go @@ -6,7 +6,10 @@ any virutalization or network resources, it's very fast and useful for testing. */ import ( + "fmt" + "io" "io/ioutil" + "os" "github.com/mitchellh/multistep" "github.com/mitchellh/packer/packer" @@ -47,7 +50,35 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { artifact := new(FileArtifact) - ioutil.WriteFile(b.config.Filename, []byte(b.config.Contents), 0600) + if b.config.Source != "" { + source, err := os.Open(b.config.Source) + defer source.Close() + if err != nil { + return nil, err + } + + target, err := os.OpenFile(b.config.Target, os.O_WRONLY, 0600) + defer target.Close() + if err != nil { + return nil, err + } + + ui.Say(fmt.Sprintf("Copying %s to %s", source.Name(), target.Name())) + bytes, err := io.Copy(source, target) + if err != nil { + return nil, err + } + ui.Say(fmt.Sprintf("Copied %d bytes", bytes)) + artifact.filename = target.Name() + } else { + // We're going to write Contents; if it's empty we'll just create an + // empty file. + err := ioutil.WriteFile(b.config.Target, []byte(b.config.Content), 0600) + if err != nil { + return nil, err + } + artifact.filename = b.config.Target + } return artifact, nil } diff --git a/builder/file/config.go b/builder/file/config.go index 534428ca4..6702e6894 100644 --- a/builder/file/config.go +++ b/builder/file/config.go @@ -9,11 +9,15 @@ import ( "github.com/mitchellh/packer/template/interpolate" ) +var ErrTargetRequired = fmt.Errorf("target required") +var ErrContentSourceConflict = fmt.Errorf("Cannot specify source file AND content") + type Config struct { common.PackerConfig `mapstructure:",squash"` - Filename string `mapstructure:"filename"` - Contents string `mapstructure:"contents"` + Source string `mapstructure:"source"` + Target string `mapstructure:"target"` + Content string `mapstructure:"content"` } func NewConfig(raws ...interface{}) (*Config, []string, error) { @@ -32,12 +36,16 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) { var errs *packer.MultiError - if c.Filename == "" { - errs = packer.MultiErrorAppend(errs, fmt.Errorf("filename is required")) + if c.Target == "" { + errs = packer.MultiErrorAppend(errs, ErrTargetRequired) } - if c.Contents == "" { - warnings = append(warnings, "contents is empty") + if c.Content == "" && c.Source == "" { + warnings = append(warnings, "Both source file and contents are blank; target will have no content") + } + + if c.Content != "" && c.Source != "" { + errs = packer.MultiErrorAppend(errs, ErrContentSourceConflict) } if errs != nil && len(errs.Errors) > 0 { diff --git a/builder/file/config_test.go b/builder/file/config_test.go index 061bb97e5..6d8039558 100644 --- a/builder/file/config_test.go +++ b/builder/file/config_test.go @@ -2,13 +2,24 @@ package file import ( "fmt" + "strings" "testing" ) func testConfig() map[string]interface{} { return map[string]interface{}{ - "filename": "test.txt", - "contents": "Hello, world!", + "source": "src.txt", + "target": "dst.txt", + "content": "Hello, world!", + } +} + +func TestContentSourceConflict(t *testing.T) { + raw := testConfig() + + _, _, errs := NewConfig(raw) + if !strings.Contains(errs.Error(), ErrContentSourceConflict.Error()) { + t.Errorf("Expected config error: %s", ErrContentSourceConflict.Error()) } } @@ -18,18 +29,19 @@ func TestNoFilename(t *testing.T) { delete(raw, "filename") _, _, errs := NewConfig(raw) if errs == nil { - t.Error("Expected config to error without a filename") + t.Errorf("Expected config error: %s", ErrTargetRequired.Error()) } } func TestNoContent(t *testing.T) { raw := testConfig() - delete(raw, "contents") + delete(raw, "content") + delete(raw, "source") _, warns, _ := NewConfig(raw) fmt.Println(len(warns)) fmt.Printf("%#v\n", warns) if len(warns) == 0 { - t.Error("Expected config to warn without any content") + t.Error("Expected config warning without any content") } } From d015d20a2440f8bd92ff881c064439a462b64447 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 15 Jun 2015 11:53:03 -0500 Subject: [PATCH 17/50] document block device mapping fields --- .../docs/builders/amazon-ebs.html.markdown | 29 +++++++++++++++---- .../builders/amazon-instance.html.markdown | 27 +++++++++++++---- 2 files changed, 46 insertions(+), 10 deletions(-) diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index af3ece59e..a7153a950 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -61,11 +61,26 @@ each category, the available configuration keys are alphabetized. ### Optional: * `ami_block_device_mappings` (array of block device mappings) - Add the block - device mappings to the AMI. The block device mappings allow for keys: - "device\_name" (string), "virtual\_name" (string), "snapshot\_id" (string), - "volume\_type" (string), "volume\_size" (integer), "delete\_on\_termination" - (boolean), "encrypted" (boolean), "no\_device" (boolean), and "iops" - (integer). + device mappings to the AMI. The block device mappings allow for keys: + + - `device_name` (string) – The device name exposed to the instance (for + example, "/dev/sdh" or "xvdh") + - `virtual_name` (string) – The virtual device name. See the documentation on + [Block Device Mapping][1] for more information + - `snapshot_id` (string) – The ID of the snapshot + - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + volumes + - `volume_size` (integer) – The size of the volume, in GiB. Required if not + specifying a `snapshot_id` + - `delete_on_termination` (boolean) – Indicates whether the EBS volume is + deleted on instance termination + - `encrypted` (boolean) – Indicates whether to encrypt the volume or not + - `no_device` (boolean) – Suppresses the specified device included in the + block device mapping of the AMI + - `iops` (integer) – The number of I/O operations per second (IOPS) that the + volume supports. See the documentation on [IOPs][2] for more information + * `ami_description` (string) - The description to set for the resulting AMI(s). By default this description is empty. @@ -255,3 +270,7 @@ Here is an example using the optional AMI tags. This will add the tags } } ``` + + +[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html +[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 3ca82731b..2c7f6e4a7 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -81,12 +81,26 @@ each category, the available configuration keys are alphabetized. ### Optional: * `ami_block_device_mappings` (array of block device mappings) - Add the block - device mappings to the AMI. The block device mappings allow for keys: - "device\_name" (string), "virtual\_name" (string), "snapshot\_id" (string), - "volume\_type" (string), "volume\_size" (integer), "delete\_on\_termination" - (boolean), "encrypted" (boolean), "no\_device" (boolean), and "iops" (integer). - See [amazon-ebs](/docs/builders/amazon-ebs.html) for an example template. + device mappings to the AMI. The block device mappings allow for keys: + - `device_name` (string) – The device name exposed to the instance (for + example, "/dev/sdh" or "xvdh") + - `virtual_name` (string) – The virtual device name. See the documentation on + [Block Device Mapping][1] for more information + - `snapshot_id` (string) – The ID of the snapshot + - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + volumes + - `volume_size` (integer) – The size of the volume, in GiB. Required if not + specifying a `snapshot_id` + - `delete_on_termination` (boolean) – Indicates whether the EBS volume is + deleted on instance termination + - `encrypted` (boolean) – Indicates whether to encrypt the volume or not + - `no_device` (boolean) – Suppresses the specified device included in the + block device mapping of the AMI + - `iops` (integer) – The number of I/O operations per second (IOPS) that the + volume supports. See the documentation on [IOPs][2] for more information + * `ami_description` (string) - The description to set for the resulting AMI(s). By default this description is empty. @@ -318,3 +332,6 @@ sudo -i -n ec2-upload-bundle \ The available template variables should be self-explanatory based on the parameters they're used to satisfy the `ec2-upload-bundle` command. + +[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html +[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html From d22c4173d3457684fd2fd1bc290cafd62f43c09a Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 15 Jun 2015 11:53:21 -0500 Subject: [PATCH 18/50] fix crash when waiting for an instance that has failed --- builder/amazon/common/step_run_source_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index ec330ebc4..f88db5efc 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -245,7 +245,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi } latestInstance, err := WaitForState(&stateChange) if err != nil { - err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", *s.instance.InstanceID, err) + err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", instanceId, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt From 2d13db300c493589c63cc5732b0eeaa52ede6a9a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 10:26:46 -0700 Subject: [PATCH 19/50] packer: HookProvision errors if no communicator --- common/step_provision.go | 6 ++++-- packer/build_test.go | 2 +- packer/builder_mock.go | 2 +- packer/provisioner.go | 12 ++++++++++++ packer/provisioner_test.go | 24 +++++++++++++++++++++--- 5 files changed, 39 insertions(+), 7 deletions(-) diff --git a/common/step_provision.go b/common/step_provision.go index ae06f1b0c..f40cfd896 100644 --- a/common/step_provision.go +++ b/common/step_provision.go @@ -23,9 +23,11 @@ type StepProvision struct { func (s *StepProvision) Run(state multistep.StateBag) multistep.StepAction { comm := s.Comm if comm == nil { - comm = state.Get("communicator").(packer.Communicator) + raw, ok := state.Get("communicator").(packer.Communicator) + if ok { + comm = raw.(packer.Communicator) + } } - hook := state.Get("hook").(packer.Hook) ui := state.Get("ui").(packer.Ui) diff --git a/packer/build_test.go b/packer/build_test.go index b183fb95a..e29318972 100644 --- a/packer/build_test.go +++ b/packer/build_test.go @@ -202,7 +202,7 @@ func TestBuild_Run(t *testing.T) { } // Verify provisioners run - dispatchHook.Run(HookProvision, nil, nil, 42) + dispatchHook.Run(HookProvision, nil, new(MockCommunicator), 42) prov := build.provisioners[0].provisioner.(*MockProvisioner) if !prov.ProvCalled { t.Fatal("should be called") diff --git a/packer/builder_mock.go b/packer/builder_mock.go index 9cb016963..d8fd98e13 100644 --- a/packer/builder_mock.go +++ b/packer/builder_mock.go @@ -43,7 +43,7 @@ func (tb *MockBuilder) Run(ui Ui, h Hook, c Cache) (Artifact, error) { } if h != nil { - if err := h.Run(HookProvision, ui, nil, nil); err != nil { + if err := h.Run(HookProvision, ui, new(MockCommunicator), nil); err != nil { return nil, err } } diff --git a/packer/provisioner.go b/packer/provisioner.go index d28d1371a..f4f3fce11 100644 --- a/packer/provisioner.go +++ b/packer/provisioner.go @@ -38,6 +38,18 @@ type ProvisionHook struct { // Runs the provisioners in order. func (h *ProvisionHook) Run(name string, ui Ui, comm Communicator, data interface{}) error { + // Shortcut + if len(h.Provisioners) == 0 { + return nil + } + + if comm == nil { + return fmt.Errorf( + "No communicator found for provisioners! This is usually because the\n" + + "`communicator` config was set to \"none\". If you have any provisioners\n" + + "then a communicator is required. Please fix this to continue.") + } + defer func() { h.lock.Lock() defer h.lock.Unlock() diff --git a/packer/provisioner_test.go b/packer/provisioner_test.go index 5eeebb4a3..7251d6f05 100644 --- a/packer/provisioner_test.go +++ b/packer/provisioner_test.go @@ -19,7 +19,7 @@ func TestProvisionHook(t *testing.T) { pB := &MockProvisioner{} ui := testUi() - var comm Communicator = nil + var comm Communicator = new(MockCommunicator) var data interface{} = nil hook := &ProvisionHook{ @@ -37,6 +37,24 @@ func TestProvisionHook(t *testing.T) { } } +func TestProvisionHook_nilComm(t *testing.T) { + pA := &MockProvisioner{} + pB := &MockProvisioner{} + + ui := testUi() + var comm Communicator = nil + var data interface{} = nil + + hook := &ProvisionHook{ + Provisioners: []Provisioner{pA, pB}, + } + + err := hook.Run("foo", ui, comm, data) + if err == nil { + t.Fatal("should error") + } +} + func TestProvisionHook_cancel(t *testing.T) { var lock sync.Mutex order := make([]string, 0, 2) @@ -59,7 +77,7 @@ func TestProvisionHook_cancel(t *testing.T) { finished := make(chan struct{}) go func() { - hook.Run("foo", nil, nil, nil) + hook.Run("foo", nil, new(MockCommunicator), nil) close(finished) }() @@ -74,7 +92,7 @@ func TestProvisionHook_cancel(t *testing.T) { <-finished // Verify order - if order[0] != "cancel" || order[1] != "prov" { + if len(order) != 2 || order[0] != "cancel" || order[1] != "prov" { t.Fatalf("bad: %#v", order) } } From d393bb5112241c75ecd8da878399ba62038d0d75 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 15 Jun 2015 10:30:45 -0700 Subject: [PATCH 20/50] make updatedeps will actually update now --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0574cbb5c..9abc16995 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ testrace: go test -race $(TEST) $(TESTARGS) updatedeps: - go get -d -v -p 2 ./... + go get -u -d -v -p 2 ./... vet: @go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ From 667c53942b9d7a547928002c417e1948ca0dfe60 Mon Sep 17 00:00:00 2001 From: Marc Siegfriedt Date: Mon, 15 Jun 2015 12:40:34 -0700 Subject: [PATCH 21/50] use template for additional disks --- builder/vmware/iso/builder.go | 29 ++++++++++++++------------- builder/vmware/iso/step_create_vmx.go | 26 ++++++++++++++++++++++-- 2 files changed, 39 insertions(+), 16 deletions(-) mode change 100644 => 100755 builder/vmware/iso/builder.go mode change 100644 => 100755 builder/vmware/iso/step_create_vmx.go diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go old mode 100644 new mode 100755 index 38ba3a4a1..fa8deb983 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -36,20 +36,21 @@ type Config struct { vmwcommon.ToolsConfig `mapstructure:",squash"` vmwcommon.VMXConfig `mapstructure:",squash"` - AdditionalDiskSize []uint `mapstructure:"disk_additional_size"` - DiskName string `mapstructure:"vmdk_name"` - DiskSize uint `mapstructure:"disk_size"` - DiskTypeId string `mapstructure:"disk_type_id"` - FloppyFiles []string `mapstructure:"floppy_files"` - GuestOSType string `mapstructure:"guest_os_type"` - ISOChecksum string `mapstructure:"iso_checksum"` - ISOChecksumType string `mapstructure:"iso_checksum_type"` - ISOUrls []string `mapstructure:"iso_urls"` - Version string `mapstructure:"version"` - VMName string `mapstructure:"vm_name"` - BootCommand []string `mapstructure:"boot_command"` - SkipCompaction bool `mapstructure:"skip_compaction"` - VMXTemplatePath string `mapstructure:"vmx_template_path"` + AdditionalDiskSize []uint `mapstructure:"disk_additional_size"` + DiskName string `mapstructure:"vmdk_name"` + DiskSize uint `mapstructure:"disk_size"` + DiskTypeId string `mapstructure:"disk_type_id"` + FloppyFiles []string `mapstructure:"floppy_files"` + GuestOSType string `mapstructure:"guest_os_type"` + ISOChecksum string `mapstructure:"iso_checksum"` + ISOChecksumType string `mapstructure:"iso_checksum_type"` + ISOUrls []string `mapstructure:"iso_urls"` + Version string `mapstructure:"version"` + VMName string `mapstructure:"vm_name"` + BootCommand []string `mapstructure:"boot_command"` + SkipCompaction bool `mapstructure:"skip_compaction"` + VMXTemplatePath string `mapstructure:"vmx_template_path"` + VMXDiskTemplatePath string `mapstructure:"vmx_disk_template_path"` RemoteType string `mapstructure:"remote_type"` RemoteDatastore string `mapstructure:"remote_datastore"` diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go old mode 100644 new mode 100755 index 69cb3f261..272721893 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -76,7 +76,29 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { DiskName: config.DiskName, } - diskTemplate, err := interpolate.Render(DefaultAdditionalDiskTemplate, &ctx) + diskTemplate := DefaultAdditionalDiskTemplate + if config.VMXDiskTemplatePath != "" { + f, err := os.Open(config.VMXDiskTemplatePath) + if err != nil { + err := fmt.Errorf("Error reading VMX disk template: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + defer f.Close() + + rawBytes, err := ioutil.ReadAll(f) + if err != nil { + err := fmt.Errorf("Error reading VMX disk template: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + diskTemplate = string(rawBytes) + } + + diskContents, err := interpolate.Render(diskTemplate, &ctx) if err != nil { err := fmt.Errorf("Error preparing VMX template for additional disk: %s", err) state.Put("error", err) @@ -84,7 +106,7 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - vmxTemplate += diskTemplate + vmxTemplate += diskContents } } From 106c9403ed3d21ed16e03551b6a7783f42538180 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 13:37:20 -0700 Subject: [PATCH 22/50] provisioner/chef-client: chmod the directories --- provisioner/chef-client/provisioner.go | 21 +++++++++++++------ .../provisioners/chef-client.html.markdown | 9 ++++++++ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index b28c9e83a..527b375c1 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -310,16 +310,25 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri mkdirCmd = "sudo " + mkdirCmd } - cmd := &packer.RemoteCmd{ - Command: mkdirCmd, - } - + cmd := &packer.RemoteCmd{Command: mkdirCmd} if err := cmd.StartWithUi(comm, ui); err != nil { return err } - if cmd.ExitStatus != 0 { - return fmt.Errorf("Non-zero exit status.") + return fmt.Errorf("Non-zero exit status. See output above for more info.") + } + + // Chmod the directory to 0777 just so that we can access it as our user + mkdirCmd = fmt.Sprintf("chmod 0777 '%s'", dir) + if !p.config.PreventSudo { + mkdirCmd = "sudo " + mkdirCmd + } + cmd = &packer.RemoteCmd{Command: mkdirCmd} + if err := cmd.StartWithUi(comm, ui); err != nil { + return err + } + if cmd.ExitStatus != 0 { + return fmt.Errorf("Non-zero exit status. See output above for more info.") } return nil diff --git a/website/source/docs/provisioners/chef-client.html.markdown b/website/source/docs/provisioners/chef-client.html.markdown index eaeadbf45..22e965149 100644 --- a/website/source/docs/provisioners/chef-client.html.markdown +++ b/website/source/docs/provisioners/chef-client.html.markdown @@ -161,3 +161,12 @@ curl -L https://www.opscode.com/chef/install.sh | \ ``` This command can be customized using the `install_command` configuration. + +## Folder Permissions + +The `chef-client` provisioner will chmod the directory with your Chef +keys to 777. This is to ensure that Packer can upload and make use of that +directory. However, once the machine is created, you usually don't +want to keep these directories with those permissions. To change the +permissions on the directories, append a shell provisioner after Chef +to modify them. From 906c45266d96e1cdbbb03853c8c5c43b48e93800 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 13:46:43 -0700 Subject: [PATCH 23/50] website: make warning for chef perms --- website/source/docs/provisioners/chef-client.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/provisioners/chef-client.html.markdown b/website/source/docs/provisioners/chef-client.html.markdown index 22e965149..9a2a11379 100644 --- a/website/source/docs/provisioners/chef-client.html.markdown +++ b/website/source/docs/provisioners/chef-client.html.markdown @@ -164,7 +164,7 @@ This command can be customized using the `install_command` configuration. ## Folder Permissions -The `chef-client` provisioner will chmod the directory with your Chef +!> The `chef-client` provisioner will chmod the directory with your Chef keys to 777. This is to ensure that Packer can upload and make use of that directory. However, once the machine is created, you usually don't want to keep these directories with those permissions. To change the From 6f7818980ce2ec4dfa575295dae54a9643e67213 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 13:50:01 -0700 Subject: [PATCH 24/50] Update updatedeps --- Makefile | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 9abc16995..884d6bbf2 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,13 @@ testrace: go test -race $(TEST) $(TESTARGS) updatedeps: - go get -u -d -v -p 2 ./... + go get -u github.com/mitchellh/gox + go get -u golang.org/x/tools/cmd/stringer + go list ./... \ + | xargs go list -f '{{join .Deps "\n"}}' \ + | grep -v github.com/mitchellh/packer \ + | sort -u \ + | xargs go get -f -u -v vet: @go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ From 86539398ab22a042fe6ad531b9ea883bc475ef60 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 13:52:06 -0700 Subject: [PATCH 25/50] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ac36c6fe5..11566cfea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -121,6 +121,7 @@ BUG FIXES: * post-processor/atlas: Fix index out of range panic [GH-1959] * post-processor/vagrant-cloud: Fixed failing on response * post-processor/vagrant-cloud: Don't delete version on error [GH-2014] + * provisioner/chef-client: Fix permissions issues on default dir [GH-2255] * provisioner/puppet-masterless: Allow manifest_file to be a directory * provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call * provisioner/shell: chmod executable script to 0755, not 0777 [GH-1708] From 91e565d54f08c98cc2d4859f7f828f8d8a1f6b4b Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 15 Jun 2015 16:02:26 -0500 Subject: [PATCH 26/50] builder/amazon: Update docs on ssh_private_key --- website/source/docs/builders/amazon-ebs.html.markdown | 3 ++- website/source/docs/builders/amazon-instance.html.markdown | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index a7153a950..0f7a46186 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -152,7 +152,8 @@ AMI if one with the same name already exists. Default `false`. to port 22. * `ssh_private_key_file` (string) - Use this ssh private key file instead of - a generated ssh key pair for connecting to the instance. + a generated ssh key pair for connecting to the instance. This key file must + already exist on the `source_ami` * `ssh_private_ip` (bool) - If true, then SSH will always use the private IP if available. diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 2c7f6e4a7..326706e69 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -191,7 +191,8 @@ AMI if one with the same name already exists. Default `false`. to port 22. * `ssh_private_key_file` (string) - Use this ssh private key file instead of - a generated ssh key pair for connecting to the instance. + a generated ssh key pair for connecting to the instance. This key file must + already exist on the `source_ami` * `ssh_private_ip` (bool) - If true, then SSH will always use the private IP if available. From 14787fd4cc5332107c5757523cf2229fe9fa8465 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 14:29:12 -0700 Subject: [PATCH 27/50] provisioner/chef-client: run cleanup on node [GH-1295] --- provisioner/chef-client/provisioner.go | 42 +++++++++++++++++--------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index 527b375c1..20b594562 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -9,7 +9,6 @@ import ( "fmt" "io/ioutil" "os" - "os/exec" "path/filepath" "strings" @@ -336,15 +335,9 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node string) error { ui.Say("Cleaning up chef node...") - app := fmt.Sprintf("knife node delete %s -y", node) - - cmd := exec.Command("sh", "-c", app) - out, err := cmd.Output() - - ui.Message(fmt.Sprintf("%s", out)) - - if err != nil { - return err + args := []string{"node", "delete", node} + if err := p.knifeExec(ui, comm, node, args); err != nil { + return fmt.Errorf("Failed to cleanup node: %s", err) } return nil @@ -352,16 +345,35 @@ func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node str func (p *Provisioner) cleanClient(ui packer.Ui, comm packer.Communicator, node string) error { ui.Say("Cleaning up chef client...") - app := fmt.Sprintf("knife client delete %s -y", node) + args := []string{"client", "delete", node} + if err := p.knifeExec(ui, comm, node, args); err != nil { + return fmt.Errorf("Failed to cleanup client: %s", err) + } - cmd := exec.Command("sh", "-c", app) - out, err := cmd.Output() + return nil +} - ui.Message(fmt.Sprintf("%s", out)) +func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node string, args []string) error { + flags := []string{ + "-y", + "-s", fmt.Sprintf("'%s'", p.config.ServerUrl), + "-k", fmt.Sprintf("'%s'", p.config.ClientKey), + "-u", fmt.Sprintf("'%s'", node), + } - if err != nil { + cmdText := fmt.Sprintf( + "knife %s %s", strings.Join(args, " "), strings.Join(flags, " ")) + if !p.config.PreventSudo { + cmdText = "sudo " + cmdText + } + + cmd := &packer.RemoteCmd{Command: cmdText} + if err := cmd.StartWithUi(comm, ui); err != nil { return err } + if cmd.ExitStatus != 0 { + return fmt.Errorf("Non-zero exit status. See output above for more info.") + } return nil } From 711dfc9d0ad5b8b143290a4e71b5f7a5fa5af8c9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 14:35:54 -0700 Subject: [PATCH 28/50] provisioner/chef: show command in output --- provisioner/chef-client/provisioner.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index 20b594562..2d42d361d 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -372,7 +372,10 @@ func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node str return err } if cmd.ExitStatus != 0 { - return fmt.Errorf("Non-zero exit status. See output above for more info.") + return fmt.Errorf( + "Non-zero exit status. See output above for more info.\n\n"+ + "Command: %s", + cmdText) } return nil From 753ad76e2bfe011d3199632bdc4e6b2fb52b5d66 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 14:37:00 -0700 Subject: [PATCH 29/50] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 11566cfea..96fd4c2f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -122,6 +122,7 @@ BUG FIXES: * post-processor/vagrant-cloud: Fixed failing on response * post-processor/vagrant-cloud: Don't delete version on error [GH-2014] * provisioner/chef-client: Fix permissions issues on default dir [GH-2255] + * provisioner/chef-client: Node cleanup works now. [GH-2257] * provisioner/puppet-masterless: Allow manifest_file to be a directory * provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call * provisioner/shell: chmod executable script to 0755, not 0777 [GH-1708] From 452421b8bc7892c5865eb3f93719287bb62fb3bd Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 14:40:53 -0700 Subject: [PATCH 30/50] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 96fd4c2f9..7732e566d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ IMPROVEMENTS: automatic port forward for SSH and to use the guest port directly. [GH-1078] * builder/virtualbox: Added SCSI support * builder/vmware: Support for additional disks [GH-1382] + * builder/vmware: Can now customize the template used for adding disks [GH-2254] * command/fix: After fixing, the template is validated [GH-2228] * command/push: Add `-name` flag for specifying name from CLI [GH-2042] * command/push: Push configuration in templates supports variables [GH-1861] From a235419c7d41c5a63e2c132cab6533f1cfc0b72f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:02:59 -0700 Subject: [PATCH 31/50] provisioner/shell: remove file after exec [GH-1536] --- provisioner/shell/provisioner.go | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index baedd645a..28c1a2e06 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -266,12 +266,24 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return err } - // Close the original file since we copied it - f.Close() - if cmd.ExitStatus != 0 { return fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) } + + // Delete the temporary file we created + cmd = &packer.RemoteCmd{ + Command: fmt.Sprintf("rm -f %s", p.config.RemotePath), + } + if err := comm.Start(cmd); err != nil { + return fmt.Errorf( + "Error removing temporary script at %s: %s", + p.config.RemotePath, err) + } + cmd.Wait() + if cmd.ExitStatus != 0 { + return fmt.Errorf( + "Error removing temporary script at %s!") + } } return nil From 8ecca2aa54dbee4b0ccd36280493b3d944ec6136 Mon Sep 17 00:00:00 2001 From: Alexander Golovko Date: Mon, 9 Feb 2015 04:24:31 +0300 Subject: [PATCH 32/50] implement ssh.Download() --- communicator/ssh/communicator.go | 55 ++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 8fd9ba91e..46dd22e2d 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -14,6 +14,7 @@ import ( "net" "os" "path/filepath" + "strconv" "sync" ) @@ -171,8 +172,58 @@ func (c *comm) UploadDir(dst string, src string, excl []string) error { return c.scpSession("scp -rvt "+dst, scpFunc) } -func (c *comm) Download(string, io.Writer) error { - panic("not implemented yet") +func (c *comm) Download(path string, output io.Writer) error { + + scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { + fmt.Fprint(w, "\x00") + + // read file info + fi, err := stdoutR.ReadString( '\n') + if err != nil { + return err + } + + if len(fi) < 0 { + return fmt.Errorf("empty response from server") + } + + switch fi[0] { + case '\x01', '\x02': + return fmt.Errorf("%s", fi[1:len(fi)]) + case 'C': + case 'D': + return fmt.Errorf("remote file is directory") + default: + return fmt.Errorf("unexpected server response (%x)", fi[0]) + } + + var mode string + var size int64 + + n, err := fmt.Sscanf(fi, "%6s %d ", &mode, &size) + if err != nil || n != 2 { + return fmt.Errorf("can't parse server response (%s)", fi) + } + if size < 0 { + return fmt.Errorf("negative file size") + } + + fmt.Fprint(w, "\x00") + + if _, err := io.CopyN(output, stdoutR, size); err != nil { + return err + } + + fmt.Fprint(w, "\x00") + + if err := checkSCPStatus(stdoutR); err != nil { + return err + } + + return nil + } + + return c.scpSession("scp -vf "+strconv.Quote(path), scpFunc) } func (c *comm) newSession() (session *ssh.Session, err error) { From 500d83b673ca29f2189c9ab9c1470ddb1d13a9ef Mon Sep 17 00:00:00 2001 From: Alexander Golovko Date: Mon, 9 Feb 2015 04:25:27 +0300 Subject: [PATCH 33/50] add download support to file provisioner --- provisioner/file/provisioner.go | 43 +++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/provisioner/file/provisioner.go b/provisioner/file/provisioner.go index ce359a407..9bc2a646c 100644 --- a/provisioner/file/provisioner.go +++ b/provisioner/file/provisioner.go @@ -20,6 +20,9 @@ type Config struct { // The remote path where the local file will be uploaded to. Destination string + // Direction + Direction string + ctx interpolate.Context } @@ -38,12 +41,28 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { return err } + if p.config.Direction == "" { + p.config.Direction = "upload" + } + var errs *packer.MultiError if _, err := os.Stat(p.config.Source); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Bad source '%s': %s", p.config.Source, err)) } + if p.config.Direction != "download" && p.config.Direction != "upload" { + errs = packer.MultiErrorAppend(errs, + errors.New("Direction must be one of: download, upload.")) + } + + if p.config.Direction == "upload" { + if _, err := os.Stat(p.config.Source); err != nil { + errs = packer.MultiErrorAppend(errs, + fmt.Errorf("Bad source '%s': %s", p.config.Source, err)) + } + } + if p.config.Destination == "" { errs = packer.MultiErrorAppend(errs, errors.New("Destination must be specified.")) @@ -57,6 +76,30 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { + if p.config.Direction == "download" { + return p.ProvisionDownload(ui, comm) + } else { + return p.ProvisionUpload(ui, comm) + } +} + +func (p *Provisioner) ProvisionDownload(ui packer.Ui, comm packer.Communicator) error { + ui.Say(fmt.Sprintf("Downloading %s => %s", p.config.Source, p.config.Destination)) + + f, err := os.OpenFile(p.config.Destination, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer f.Close() + + err = comm.Download(p.config.Source, f) + if err != nil { + ui.Error(fmt.Sprintf("Download failed: %s", err)) + } + return err +} + +func (p *Provisioner) ProvisionUpload(ui packer.Ui, comm packer.Communicator) error { ui.Say(fmt.Sprintf("Uploading %s => %s", p.config.Source, p.config.Destination)) info, err := os.Stat(p.config.Source) if err != nil { From 15f40a3d004749425c2f40a9be007468970dec1e Mon Sep 17 00:00:00 2001 From: Alexander Golovko Date: Mon, 9 Feb 2015 04:48:53 +0300 Subject: [PATCH 34/50] fix disabling vmware tools for ESX --- builder/vmware/common/step_upload_tools.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/vmware/common/step_upload_tools.go b/builder/vmware/common/step_upload_tools.go index aa7dd08e7..3f7214965 100644 --- a/builder/vmware/common/step_upload_tools.go +++ b/builder/vmware/common/step_upload_tools.go @@ -23,6 +23,10 @@ type StepUploadTools struct { func (c *StepUploadTools) Run(state multistep.StateBag) multistep.StepAction { driver := state.Get("driver").(Driver) + if c.ToolsUploadFlavor == "" { + return multistep.ActionContinue + } + if c.RemoteType == "esx5" { if err := driver.ToolsInstall(); err != nil { state.Put("error", fmt.Errorf("Couldn't mount VMware tools ISO. Please check the 'guest_os_type' in your template.json.")) @@ -30,10 +34,6 @@ func (c *StepUploadTools) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionContinue } - if c.ToolsUploadFlavor == "" { - return multistep.ActionContinue - } - comm := state.Get("communicator").(packer.Communicator) tools_source := state.Get("tools_upload_source").(string) ui := state.Get("ui").(packer.Ui) From 686d4413ecb7fb9c2e1ccc2a6c08061624d2111a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:07:20 -0700 Subject: [PATCH 35/50] communicator/winrm: error if download --- communicator/ssh/communicator.go | 3 +-- communicator/winrm/communicator.go | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 46dd22e2d..2cc299b30 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -173,12 +173,11 @@ func (c *comm) UploadDir(dst string, src string, excl []string) error { } func (c *comm) Download(path string, output io.Writer) error { - scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { fmt.Fprint(w, "\x00") // read file info - fi, err := stdoutR.ReadString( '\n') + fi, err := stdoutR.ReadString('\n') if err != nil { return err } diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go index 82686e2a7..2b53ac62c 100644 --- a/communicator/winrm/communicator.go +++ b/communicator/winrm/communicator.go @@ -113,7 +113,7 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error } func (c *Communicator) Download(src string, dst io.Writer) error { - panic("download not implemented") + return fmt.Errorf("WinRM doesn't support download.") } func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { From aee48239f74433cd842e0a4a64557915dc95f1dc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:08:04 -0700 Subject: [PATCH 36/50] website: document file download --- website/source/docs/provisioners/file.html.markdown | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/website/source/docs/provisioners/file.html.markdown b/website/source/docs/provisioners/file.html.markdown index 68034fe00..19fcce9be 100644 --- a/website/source/docs/provisioners/file.html.markdown +++ b/website/source/docs/provisioners/file.html.markdown @@ -40,6 +40,10 @@ The available configuration options are listed below. All elements are required. machine. This value must be a writable location and any parent directories must already exist. +* `direction` (string) - The direction of the file transfer. This defaults + to "upload." If it is set to "download" then the file "source" in + the machine wll be downloaded locally to "destination" + ## Directory Uploads The file provisioner is also able to upload a complete directory to the From 13346ba648adb797206750ee8a147467c56d8b82 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:10:15 -0700 Subject: [PATCH 37/50] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7732e566d..340fc9872 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,8 @@ FEATURES: to allow access to remote servers such as private git repos. [GH-1066] * **Docker builder supports SSH**: The Docker builder now supports containers with SSH, just set `communicator` to "ssh" [GH-2244] + * **File provisioner can download**: The file provisioner can now download + files out of the build process. [GH-1909] * **New config function: `build_name`**: The name of the currently running build. [GH-2232] * **New config function: `build_type`**: The type of the currently running From 6c802286614a1127de545056a270199eef5f1aaf Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:11:08 -0700 Subject: [PATCH 38/50] provisioner/shell: missing error arg --- provisioner/shell/provisioner.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index 28c1a2e06..338092755 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -282,7 +282,8 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { cmd.Wait() if cmd.ExitStatus != 0 { return fmt.Errorf( - "Error removing temporary script at %s!") + "Error removing temporary script at %s!", + p.config.RemotePath) } } From 8dfd553e86674c2c082dc29dd3b13dcfc7d175b6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:16:17 -0700 Subject: [PATCH 39/50] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 340fc9872..8744e4a1d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -131,6 +131,7 @@ BUG FIXES: * provisioner/shell: chmod executable script to 0755, not 0777 [GH-1708] * provisioner/shell: inline commands failing will fail the provisioner [GH-2069] * provisioner/shell: single quotes in env vars are escaped [GH-2229] + * provisioner/shell: Temporary file is deleted after run [GH-2259] ## 0.7.5 (December 9, 2014) From b15a77660a1c0d25f3368e6d5ada6cc05e1efd99 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:18:21 -0700 Subject: [PATCH 40/50] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8744e4a1d..628d56d94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,7 @@ IMPROVEMENTS: * post-processor/docker-save: Can be chained [GH-2179] * post-processor/docker-tag: Support `force` option [GH-2055] * post-processor/docker-tag: Can be chained [GH-2179] + * provisioner/puppet-masterless: `working_directory` option [GH-1831] BUG FIXES: From 2bb4bdffc4c1ef012fb279ab163b9221c8318b28 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:23:38 -0700 Subject: [PATCH 41/50] website: update docs for ssh_keypair_name --- .../docs/builders/amazon-ebs.html.markdown | 27 ++++++++++------- .../builders/amazon-instance.html.markdown | 29 +++++++++++-------- 2 files changed, 33 insertions(+), 23 deletions(-) diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 0f7a46186..6c7840575 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -61,24 +61,24 @@ each category, the available configuration keys are alphabetized. ### Optional: * `ami_block_device_mappings` (array of block device mappings) - Add the block - device mappings to the AMI. The block device mappings allow for keys: + device mappings to the AMI. The block device mappings allow for keys: - - `device_name` (string) – The device name exposed to the instance (for - example, "/dev/sdh" or "xvdh") - - `virtual_name` (string) – The virtual device name. See the documentation on + - `device_name` (string) – The device name exposed to the instance (for + example, "/dev/sdh" or "xvdh") + - `virtual_name` (string) – The virtual device name. See the documentation on [Block Device Mapping][1] for more information - `snapshot_id` (string) – The ID of the snapshot - - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) - volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes - `volume_size` (integer) – The size of the volume, in GiB. Required if not specifying a `snapshot_id` - - `delete_on_termination` (boolean) – Indicates whether the EBS volume is + - `delete_on_termination` (boolean) – Indicates whether the EBS volume is deleted on instance termination - `encrypted` (boolean) – Indicates whether to encrypt the volume or not - - `no_device` (boolean) – Suppresses the specified device included in the - block device mapping of the AMI - - `iops` (integer) – The number of I/O operations per second (IOPS) that the + - `no_device` (boolean) – Suppresses the specified device included in the + block device mapping of the AMI + - `iops` (integer) – The number of I/O operations per second (IOPS) that the volume supports. See the documentation on [IOPs][2] for more information @@ -148,11 +148,16 @@ AMI if one with the same name already exists. Default `false`. spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +* `ssh_keypair_name` (string) - If specified, this is the key that will be + used for SSH with the machine. By default, this is blank, and Packer will + generate a temporary keypair. `ssh_private_key_file` must be specified + with this. + * `ssh_port` (integer) - The port that SSH will be available on. This defaults to port 22. * `ssh_private_key_file` (string) - Use this ssh private key file instead of - a generated ssh key pair for connecting to the instance. This key file must + a generated ssh key pair for connecting to the instance. This key file must already exist on the `source_ami` * `ssh_private_ip` (bool) - If true, then SSH will always use the private diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 326706e69..ff9e7c9a2 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -81,26 +81,26 @@ each category, the available configuration keys are alphabetized. ### Optional: * `ami_block_device_mappings` (array of block device mappings) - Add the block - device mappings to the AMI. The block device mappings allow for keys: + device mappings to the AMI. The block device mappings allow for keys: - - `device_name` (string) – The device name exposed to the instance (for - example, "/dev/sdh" or "xvdh") - - `virtual_name` (string) – The virtual device name. See the documentation on + - `device_name` (string) – The device name exposed to the instance (for + example, "/dev/sdh" or "xvdh") + - `virtual_name` (string) – The virtual device name. See the documentation on [Block Device Mapping][1] for more information - `snapshot_id` (string) – The ID of the snapshot - - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) - volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes - `volume_size` (integer) – The size of the volume, in GiB. Required if not specifying a `snapshot_id` - - `delete_on_termination` (boolean) – Indicates whether the EBS volume is + - `delete_on_termination` (boolean) – Indicates whether the EBS volume is deleted on instance termination - `encrypted` (boolean) – Indicates whether to encrypt the volume or not - - `no_device` (boolean) – Suppresses the specified device included in the - block device mapping of the AMI - - `iops` (integer) – The number of I/O operations per second (IOPS) that the + - `no_device` (boolean) – Suppresses the specified device included in the + block device mapping of the AMI + - `iops` (integer) – The number of I/O operations per second (IOPS) that the volume supports. See the documentation on [IOPs][2] for more information - + * `ami_description` (string) - The description to set for the resulting AMI(s). By default this description is empty. @@ -187,11 +187,16 @@ AMI if one with the same name already exists. Default `false`. spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +* `ssh_keypair_name` (string) - If specified, this is the key that will be + used for SSH with the machine. By default, this is blank, and Packer will + generate a temporary keypair. `ssh_private_key_file` must be specified + with this. + * `ssh_port` (integer) - The port that SSH will be available on. This defaults to port 22. * `ssh_private_key_file` (string) - Use this ssh private key file instead of - a generated ssh key pair for connecting to the instance. This key file must + a generated ssh key pair for connecting to the instance. This key file must already exist on the `source_ami` * `ssh_private_ip` (bool) - If true, then SSH will always use the private From 3bae1d24b3193e81a265700284645de7945b79ab Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:24:03 -0700 Subject: [PATCH 42/50] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 628d56d94..f57206328 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ IMPROVEMENTS: * builder/amazon: Add `force_deregister` option for automatic AMI deregistration [GH-2221] * builder/amazon: Now applies tags to EBS snapshots [GH-2212] + * builder/amazon: Support custom keypairs [GH-1837] * builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829] * builder/digitalocean: User data support [GH-2113] * builder/parallels: Support Parallels Desktop 11 [GH-2199] From 4258a4102933ecd3acfafd1389e129ebc4fb3d03 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 15 Jun 2015 15:30:00 -0700 Subject: [PATCH 43/50] update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f57206328..38718e928 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,8 @@ IMPROVEMENTS: * post-processor/docker-tag: Support `force` option [GH-2055] * post-processor/docker-tag: Can be chained [GH-2179] * provisioner/puppet-masterless: `working_directory` option [GH-1831] + * provisioner/puppet-masterless: `packer_build_name` and + `packer_build_type` are default facts. [GH-1878] BUG FIXES: From 29f02d243f2c16be24be9d3f6d1c13d9343aa7ac Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Mon, 15 Jun 2015 18:56:09 -0700 Subject: [PATCH 44/50] Had io.Copy args swapped; also use os.Create instead of os.OpenFile for MAGIC --- builder/file/builder.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/builder/file/builder.go b/builder/file/builder.go index ea3206dad..9297b456d 100644 --- a/builder/file/builder.go +++ b/builder/file/builder.go @@ -57,14 +57,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, err } - target, err := os.OpenFile(b.config.Target, os.O_WRONLY, 0600) + // Create will truncate an existing file + target, err := os.Create(b.config.Target) defer target.Close() if err != nil { return nil, err } ui.Say(fmt.Sprintf("Copying %s to %s", source.Name(), target.Name())) - bytes, err := io.Copy(source, target) + bytes, err := io.Copy(target, source) if err != nil { return nil, err } From 97e94eda7795b15d90b4c136302d6239c772eb53 Mon Sep 17 00:00:00 2001 From: Kerim Satirli Date: Tue, 16 Jun 2015 09:18:59 +0200 Subject: [PATCH 45/50] adds missing comma The `parallels_tools_flavor` key-value pair is missing a trailing comma, thereby making it invalid JSON. --- website/source/docs/builders/parallels-iso.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index ed7ebd86c..b84123f8b 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -32,7 +32,7 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio "iso_url": "http://releases.ubuntu.com/12.04/ubuntu-12.04.3-server-amd64.iso", "iso_checksum": "2cbe868812a871242cdcdd8f2fd6feb9", "iso_checksum_type": "md5", - "parallels_tools_flavor": "lin" + "parallels_tools_flavor": "lin", "ssh_username": "packer", "ssh_password": "packer", "ssh_wait_timeout": "30s", From fe0c548619b2288c3a196369ed0b27ff21eb27e0 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 16 Jun 2015 11:30:49 -0700 Subject: [PATCH 46/50] Added acceptance test for file builder --- builder/file/builder.go | 16 +---- builder/file/builder_test.go | 67 +++++++++++++++++++ builder/file/config_test.go | 4 +- builder/file/test-fixtures/artifact.txt | 1 + .../compress/post-processor_test.go | 21 +++++- 5 files changed, 90 insertions(+), 19 deletions(-) create mode 100644 builder/file/test-fixtures/artifact.txt diff --git a/builder/file/builder.go b/builder/file/builder.go index 9297b456d..9a2c2cc7f 100644 --- a/builder/file/builder.go +++ b/builder/file/builder.go @@ -15,27 +15,13 @@ import ( "github.com/mitchellh/packer/packer" ) -const BuilderId = "cbednarski.file" +const BuilderId = "packer.file" type Builder struct { config *Config runner multistep.Runner } -// Prepare is responsible for configuring the builder and validating -// that configuration. Any setup should be done in this method. Note that -// NO side effects should take place in prepare, it is meant as a state -// setup only. Calling Prepare is not necessarilly followed by a Run. -// -// The parameters to Prepare are a set of interface{} values of the -// configuration. These are almost always `map[string]interface{}` -// parsed from a template, but no guarantee is made. -// -// Each of the configuration values should merge into the final -// configuration. -// -// Prepare should return a list of warnings along with any errors -// that occured while preparing. func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { c, warnings, errs := NewConfig(raws...) if errs != nil { diff --git a/builder/file/builder_test.go b/builder/file/builder_test.go index 63d36a0a5..3ce9e77ae 100644 --- a/builder/file/builder_test.go +++ b/builder/file/builder_test.go @@ -1,11 +1,78 @@ package file import ( + "fmt" + "io/ioutil" "testing" + builderT "github.com/mitchellh/packer/helper/builder/testing" "github.com/mitchellh/packer/packer" ) func TestBuilder_implBuilder(t *testing.T) { var _ packer.Builder = new(Builder) } + +func TestBuilderFileAcc_content(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + Builder: &Builder{}, + Template: fileContentTest, + Check: checkContent, + }) +} + +func TestBuilderFileAcc_copy(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + Builder: &Builder{}, + Template: fileCopyTest, + Check: checkCopy, + }) +} + +func checkContent(artifacts []packer.Artifact) error { + content, err := ioutil.ReadFile("contentTest.txt") + if err != nil { + return err + } + contentString := string(content) + if contentString != "hello world!" { + return fmt.Errorf("Unexpected file contents: %s", contentString) + } + return nil +} + +func checkCopy(artifacts []packer.Artifact) error { + content, err := ioutil.ReadFile("copyTest.txt") + if err != nil { + return err + } + contentString := string(content) + if contentString != "Hello world.\n" { + return fmt.Errorf("Unexpected file contents: %s", contentString) + } + return nil +} + +const fileContentTest = ` +{ + "builders": [ + { + "type":"test", + "target":"contentTest.txt", + "content":"hello world!" + } + ] +} +` + +const fileCopyTest = ` +{ + "builders": [ + { + "type":"test", + "target":"copyTest.txt", + "source":"test-fixtures/artifact.txt" + } + ] +} +` diff --git a/builder/file/config_test.go b/builder/file/config_test.go index 6d8039558..9d8f346fc 100644 --- a/builder/file/config_test.go +++ b/builder/file/config_test.go @@ -1,7 +1,6 @@ package file import ( - "fmt" "strings" "testing" ) @@ -39,8 +38,7 @@ func TestNoContent(t *testing.T) { delete(raw, "content") delete(raw, "source") _, warns, _ := NewConfig(raw) - fmt.Println(len(warns)) - fmt.Printf("%#v\n", warns) + if len(warns) == 0 { t.Error("Expected config warning without any content") } diff --git a/builder/file/test-fixtures/artifact.txt b/builder/file/test-fixtures/artifact.txt new file mode 100644 index 000000000..18249f335 --- /dev/null +++ b/builder/file/test-fixtures/artifact.txt @@ -0,0 +1 @@ +Hello world. diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index 92cbfc4b3..5f4d6b9ca 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -1,3 +1,22 @@ package compress -import () +// import ( +// "testing" +// +// builderT "github.com/mitchellh/packer/helper/builder/testing" +// ) +// +// func TestBuilderTagsAcc_basic(t *testing.T) { +// builderT.Test(t, builderT.TestCase{ +// Builder: &Builder{}, +// Template: simpleTestCase, +// Check: checkTags(), +// }) +// } + +const simpleTestCase = ` +{ + "type": "compress", + "output": "foo.tar.gz" +} +` From aea70d5a720b9cc415bd57f46707a0cb4e4193dd Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 16 Jun 2015 11:31:53 -0700 Subject: [PATCH 47/50] Added acceptance test for file builder --- builder/file/builder.go | 16 +----- builder/file/builder_test.go | 67 +++++++++++++++++++++++++ builder/file/config_test.go | 4 +- builder/file/test-fixtures/artifact.txt | 1 + 4 files changed, 70 insertions(+), 18 deletions(-) create mode 100644 builder/file/test-fixtures/artifact.txt diff --git a/builder/file/builder.go b/builder/file/builder.go index 9297b456d..9a2c2cc7f 100644 --- a/builder/file/builder.go +++ b/builder/file/builder.go @@ -15,27 +15,13 @@ import ( "github.com/mitchellh/packer/packer" ) -const BuilderId = "cbednarski.file" +const BuilderId = "packer.file" type Builder struct { config *Config runner multistep.Runner } -// Prepare is responsible for configuring the builder and validating -// that configuration. Any setup should be done in this method. Note that -// NO side effects should take place in prepare, it is meant as a state -// setup only. Calling Prepare is not necessarilly followed by a Run. -// -// The parameters to Prepare are a set of interface{} values of the -// configuration. These are almost always `map[string]interface{}` -// parsed from a template, but no guarantee is made. -// -// Each of the configuration values should merge into the final -// configuration. -// -// Prepare should return a list of warnings along with any errors -// that occured while preparing. func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { c, warnings, errs := NewConfig(raws...) if errs != nil { diff --git a/builder/file/builder_test.go b/builder/file/builder_test.go index 63d36a0a5..3ce9e77ae 100644 --- a/builder/file/builder_test.go +++ b/builder/file/builder_test.go @@ -1,11 +1,78 @@ package file import ( + "fmt" + "io/ioutil" "testing" + builderT "github.com/mitchellh/packer/helper/builder/testing" "github.com/mitchellh/packer/packer" ) func TestBuilder_implBuilder(t *testing.T) { var _ packer.Builder = new(Builder) } + +func TestBuilderFileAcc_content(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + Builder: &Builder{}, + Template: fileContentTest, + Check: checkContent, + }) +} + +func TestBuilderFileAcc_copy(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + Builder: &Builder{}, + Template: fileCopyTest, + Check: checkCopy, + }) +} + +func checkContent(artifacts []packer.Artifact) error { + content, err := ioutil.ReadFile("contentTest.txt") + if err != nil { + return err + } + contentString := string(content) + if contentString != "hello world!" { + return fmt.Errorf("Unexpected file contents: %s", contentString) + } + return nil +} + +func checkCopy(artifacts []packer.Artifact) error { + content, err := ioutil.ReadFile("copyTest.txt") + if err != nil { + return err + } + contentString := string(content) + if contentString != "Hello world.\n" { + return fmt.Errorf("Unexpected file contents: %s", contentString) + } + return nil +} + +const fileContentTest = ` +{ + "builders": [ + { + "type":"test", + "target":"contentTest.txt", + "content":"hello world!" + } + ] +} +` + +const fileCopyTest = ` +{ + "builders": [ + { + "type":"test", + "target":"copyTest.txt", + "source":"test-fixtures/artifact.txt" + } + ] +} +` diff --git a/builder/file/config_test.go b/builder/file/config_test.go index 6d8039558..9d8f346fc 100644 --- a/builder/file/config_test.go +++ b/builder/file/config_test.go @@ -1,7 +1,6 @@ package file import ( - "fmt" "strings" "testing" ) @@ -39,8 +38,7 @@ func TestNoContent(t *testing.T) { delete(raw, "content") delete(raw, "source") _, warns, _ := NewConfig(raw) - fmt.Println(len(warns)) - fmt.Printf("%#v\n", warns) + if len(warns) == 0 { t.Error("Expected config warning without any content") } diff --git a/builder/file/test-fixtures/artifact.txt b/builder/file/test-fixtures/artifact.txt new file mode 100644 index 000000000..18249f335 --- /dev/null +++ b/builder/file/test-fixtures/artifact.txt @@ -0,0 +1 @@ +Hello world. From 12cf6650a075feab94e5f6695734c2ab4700d7b0 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 16 Jun 2015 12:10:28 -0700 Subject: [PATCH 48/50] Revert compress post-processor to master to get baseline test --- post-processor/compress/artifact.go | 30 +- post-processor/compress/post-processor.go | 405 +++------------------- 2 files changed, 61 insertions(+), 374 deletions(-) diff --git a/post-processor/compress/artifact.go b/post-processor/compress/artifact.go index 054d501d1..34a7ce8d6 100644 --- a/post-processor/compress/artifact.go +++ b/post-processor/compress/artifact.go @@ -8,31 +8,37 @@ import ( const BuilderId = "packer.post-processor.compress" type Artifact struct { - builderId string - dir string - f []string + Path string + Provider string } -func (a *Artifact) BuilderId() string { +func NewArtifact(provider, path string) *Artifact { + return &Artifact{ + Path: path, + Provider: provider, + } +} + +func (*Artifact) BuilderId() string { return BuilderId } -func (a *Artifact) Files() []string { - return a.f +func (self *Artifact) Id() string { + return "" } -func (*Artifact) Id() string { - return "COMPRESS" +func (self *Artifact) Files() []string { + return []string{self.Path} } -func (a *Artifact) String() string { - return fmt.Sprintf("VM compressed files in directory: %s", a.dir) +func (self *Artifact) String() string { + return fmt.Sprintf("'%s' compressing: %s", self.Provider, self.Path) } func (*Artifact) State(name string) interface{} { return nil } -func (a *Artifact) Destroy() error { - return os.RemoveAll(a.dir) +func (self *Artifact) Destroy() error { + return os.Remove(self.Path) } diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 6d28e7c0e..ccf300946 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -2,416 +2,97 @@ package compress import ( "archive/tar" - "archive/zip" - "compress/flate" "compress/gzip" "fmt" "io" "os" - "path/filepath" - "runtime" - "strings" - "time" - "github.com/biogo/hts/bgzf" - "github.com/klauspost/pgzip" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" - "github.com/pierrec/lz4" - "gopkg.in/yaml.v2" ) -type Metadata map[string]Metaitem - -type Metaitem struct { - CompSize int64 `yaml:"compsize"` - OrigSize int64 `yaml:"origsize"` - CompType string `yaml:"comptype"` - CompDate string `yaml:"compdate"` -} - type Config struct { common.PackerConfig `mapstructure:",squash"` - OutputPath string `mapstructure:"output"` - OutputFile string `mapstructure:"file"` - Compression int `mapstructure:"compression"` - Metadata bool `mapstructure:"metadata"` - NumCPU int `mapstructure:"numcpu"` - Format string `mapstructure:"format"` - KeepInputArtifact bool `mapstructure:"keep_input_artifact"` - ctx *interpolate.Context + OutputPath string `mapstructure:"output"` + + ctx interpolate.Context } type PostProcessor struct { - cfg Config + config Config } -func (p *PostProcessor) Configure(raws ...interface{}) error { - p.cfg.Compression = -1 - err := config.Decode(&p.cfg, &config.DecodeOpts{ +func (self *PostProcessor) Configure(raws ...interface{}) error { + err := config.Decode(&self.config, &config.DecodeOpts{ Interpolate: true, InterpolateFilter: &interpolate.RenderFilter{ - Exclude: []string{ - // TODO figure out if something needs to go here. - }, + Exclude: []string{}, }, }, raws...) - - errs := new(packer.MultiError) - - if p.cfg.OutputPath == "" { - p.cfg.OutputPath = "packer_{{.BuildName}}_{{.Provider}}" - } - - if err = interpolate.Validate(p.cfg.OutputPath, p.cfg.ctx); err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error parsing target template: %s", err)) - } - - templates := map[string]*string{ - "output": &p.cfg.OutputPath, - } - - if p.cfg.Compression > flate.BestCompression { - p.cfg.Compression = flate.BestCompression - } - if p.cfg.Compression == -1 { - p.cfg.Compression = flate.DefaultCompression - } - - if p.cfg.NumCPU < 1 { - p.cfg.NumCPU = runtime.NumCPU() - } - - runtime.GOMAXPROCS(p.cfg.NumCPU) - - for key, ptr := range templates { - if *ptr == "" { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("%s must be set", key)) - } - - *ptr, err = interpolate.Render(p.cfg.OutputPath, p.cfg.ctx) - if err != nil { - errs = packer.MultiErrorAppend( - errs, fmt.Errorf("Error processing %s: %s", key, err)) - } - } - - if len(errs.Errors) > 0 { - return errs + if err != nil { + return err } return nil } -func (p *PostProcessor) fillMetadata(metadata Metadata, files []string) Metadata { - // layout shows by example how the reference time should be represented. - const layout = "2006-01-02_15-04-05" - t := time.Now() +func (self *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { + ui.Say(fmt.Sprintf("Creating archive for '%s'", artifact.BuilderId())) - if !p.cfg.Metadata { - return metadata - } - for _, f := range files { - if fi, err := os.Stat(f); err != nil { - continue - } else { - if i, ok := metadata[filepath.Base(f)]; !ok { - metadata[filepath.Base(f)] = Metaitem{CompType: p.cfg.Format, OrigSize: fi.Size(), CompDate: t.Format(layout)} - } else { - i.CompSize = fi.Size() - i.CompDate = t.Format(layout) - metadata[filepath.Base(f)] = i - } - } - } - return metadata -} - -func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { - newartifact := &Artifact{builderId: artifact.BuilderId(), dir: p.cfg.OutputPath} - metafile := filepath.Join(p.cfg.OutputPath, "metadata") - - _, err := os.Stat(newartifact.dir) - if err == nil { - return nil, false, fmt.Errorf("output dir must not exists: %s", err) - } - err = os.MkdirAll(newartifact.dir, 0755) + // Create the compressed archive file at the appropriate OutputPath. + fw, err := os.Create(self.config.OutputPath) if err != nil { - return nil, false, fmt.Errorf("failed to create output: %s", err) - } - - formats := strings.Split(p.cfg.Format, ".") - files := artifact.Files() - - metadata := make(Metadata, 0) - metadata = p.fillMetadata(metadata, files) - - for _, compress := range formats { - switch compress { - case "tar": - files, err = p.cmpTAR(files, filepath.Join(p.cfg.OutputPath, p.cfg.OutputFile)) - metadata = p.fillMetadata(metadata, files) - case "zip": - files, err = p.cmpZIP(files, filepath.Join(p.cfg.OutputPath, p.cfg.OutputFile)) - metadata = p.fillMetadata(metadata, files) - case "pgzip": - files, err = p.cmpPGZIP(files, p.cfg.OutputPath) - metadata = p.fillMetadata(metadata, files) - case "gzip": - files, err = p.cmpGZIP(files, p.cfg.OutputPath) - metadata = p.fillMetadata(metadata, files) - case "bgzf": - files, err = p.cmpBGZF(files, p.cfg.OutputPath) - metadata = p.fillMetadata(metadata, files) - case "lz4": - files, err = p.cmpLZ4(files, p.cfg.OutputPath) - metadata = p.fillMetadata(metadata, files) - case "e2fs": - files, err = p.cmpE2FS(files, filepath.Join(p.cfg.OutputPath, p.cfg.OutputFile)) - metadata = p.fillMetadata(metadata, files) - } - if err != nil { - return nil, false, fmt.Errorf("Failed to compress: %s", err) - } - } - - if p.cfg.Metadata { - fp, err := os.Create(metafile) - if err != nil { - return nil, false, err - } - if buf, err := yaml.Marshal(metadata); err != nil { - fp.Close() - return nil, false, err - } else { - if _, err = fp.Write(buf); err != nil { - fp.Close() - return nil, false, err - } - fp.Close() - } - } - - newartifact.f = append(newartifact.f, files...) - if p.cfg.Metadata { - newartifact.f = append(newartifact.f, metafile) - } - - return newartifact, p.cfg.KeepInputArtifact, nil -} - -func (p *PostProcessor) cmpTAR(src []string, dst string) ([]string, error) { - fw, err := os.Create(dst) - if err != nil { - return nil, fmt.Errorf("tar error: %s", err) + return nil, false, fmt.Errorf( + "Failed creating file for compressed archive: %s", self.config.OutputPath) } defer fw.Close() - tw := tar.NewWriter(fw) - defer tw.Close() + gw := gzip.NewWriter(fw) + defer gw.Close() - for _, name := range src { - fi, err := os.Stat(name) + // Iterate through all of the artifact's files and put them into the + // compressed archive using the tar/gzip writers. + for _, path := range artifact.Files() { + fi, err := os.Stat(path) if err != nil { - return nil, fmt.Errorf("tar error: %s", err) + return nil, false, fmt.Errorf( + "Failed stating file: %s", path) } - target, _ := os.Readlink(name) + target, _ := os.Readlink(path) header, err := tar.FileInfoHeader(fi, target) if err != nil { - return nil, fmt.Errorf("tar error: %s", err) + return nil, false, fmt.Errorf( + "Failed creating archive header: %s", path) } - if err = tw.WriteHeader(header); err != nil { - return nil, fmt.Errorf("tar error: %s", err) + tw := tar.NewWriter(gw) + defer tw.Close() + + // Write the header first to the archive. This takes partial data + // from the FileInfo that is grabbed by running the stat command. + if err := tw.WriteHeader(header); err != nil { + return nil, false, fmt.Errorf( + "Failed writing archive header: %s", path) } - fr, err := os.Open(name) + // Open the target file for archiving and compressing. + fr, err := os.Open(path) if err != nil { - return nil, fmt.Errorf("tar error: %s", err) + return nil, false, fmt.Errorf( + "Failed opening file '%s' to write compressed archive.", path) } + defer fr.Close() if _, err = io.Copy(tw, fr); err != nil { - fr.Close() - return nil, fmt.Errorf("tar error: %s", err) + return nil, false, fmt.Errorf( + "Failed copying file to archive: %s", path) } - fr.Close() } - return []string{dst}, nil -} - -func (p *PostProcessor) cmpGZIP(src []string, dst string) ([]string, error) { - var res []string - for _, name := range src { - filename := filepath.Join(dst, filepath.Base(name)) - fw, err := os.Create(filename) - if err != nil { - return nil, fmt.Errorf("gzip error: %s", err) - } - cw, err := gzip.NewWriterLevel(fw, p.cfg.Compression) - if err != nil { - fw.Close() - return nil, fmt.Errorf("gzip error: %s", err) - } - fr, err := os.Open(name) - if err != nil { - cw.Close() - fw.Close() - return nil, fmt.Errorf("gzip error: %s", err) - } - if _, err = io.Copy(cw, fr); err != nil { - cw.Close() - fr.Close() - fw.Close() - return nil, fmt.Errorf("gzip error: %s", err) - } - cw.Close() - fr.Close() - fw.Close() - res = append(res, filename) - } - return res, nil -} - -func (p *PostProcessor) cmpPGZIP(src []string, dst string) ([]string, error) { - var res []string - for _, name := range src { - filename := filepath.Join(dst, filepath.Base(name)) - fw, err := os.Create(filename) - if err != nil { - return nil, fmt.Errorf("pgzip error: %s", err) - } - cw, err := pgzip.NewWriterLevel(fw, p.cfg.Compression) - if err != nil { - fw.Close() - return nil, fmt.Errorf("pgzip error: %s", err) - } - fr, err := os.Open(name) - if err != nil { - cw.Close() - fw.Close() - return nil, fmt.Errorf("pgzip error: %s", err) - } - if _, err = io.Copy(cw, fr); err != nil { - cw.Close() - fr.Close() - fw.Close() - return nil, fmt.Errorf("pgzip error: %s", err) - } - cw.Close() - fr.Close() - fw.Close() - res = append(res, filename) - } - return res, nil -} - -func (p *PostProcessor) cmpLZ4(src []string, dst string) ([]string, error) { - var res []string - for _, name := range src { - filename := filepath.Join(dst, filepath.Base(name)) - fw, err := os.Create(filename) - if err != nil { - return nil, fmt.Errorf("lz4 error: %s", err) - } - cw := lz4.NewWriter(fw) - if err != nil { - fw.Close() - return nil, fmt.Errorf("lz4 error: %s", err) - } - if p.cfg.Compression > flate.DefaultCompression { - cw.Header.HighCompression = true - } - fr, err := os.Open(name) - if err != nil { - cw.Close() - fw.Close() - return nil, fmt.Errorf("lz4 error: %s", err) - } - if _, err = io.Copy(cw, fr); err != nil { - cw.Close() - fr.Close() - fw.Close() - return nil, fmt.Errorf("lz4 error: %s", err) - } - cw.Close() - fr.Close() - fw.Close() - res = append(res, filename) - } - return res, nil -} - -func (p *PostProcessor) cmpBGZF(src []string, dst string) ([]string, error) { - var res []string - for _, name := range src { - filename := filepath.Join(dst, filepath.Base(name)) - fw, err := os.Create(filename) - if err != nil { - return nil, fmt.Errorf("bgzf error: %s", err) - } - - cw, err := bgzf.NewWriterLevel(fw, p.cfg.Compression, runtime.NumCPU()) - if err != nil { - return nil, fmt.Errorf("bgzf error: %s", err) - } - fr, err := os.Open(name) - if err != nil { - cw.Close() - fw.Close() - return nil, fmt.Errorf("bgzf error: %s", err) - } - if _, err = io.Copy(cw, fr); err != nil { - cw.Close() - fr.Close() - fw.Close() - return nil, fmt.Errorf("bgzf error: %s", err) - } - cw.Close() - fr.Close() - fw.Close() - res = append(res, filename) - } - return res, nil -} - -func (p *PostProcessor) cmpE2FS(src []string, dst string) ([]string, error) { - panic("not implemented") -} - -func (p *PostProcessor) cmpZIP(src []string, dst string) ([]string, error) { - fw, err := os.Create(dst) - if err != nil { - return nil, fmt.Errorf("zip error: %s", err) - } - defer fw.Close() - - zw := zip.NewWriter(fw) - defer zw.Close() - - for _, name := range src { - header, err := zw.Create(name) - if err != nil { - return nil, fmt.Errorf("zip error: %s", err) - } - - fr, err := os.Open(name) - if err != nil { - return nil, fmt.Errorf("zip error: %s", err) - } - - if _, err = io.Copy(header, fr); err != nil { - fr.Close() - return nil, fmt.Errorf("zip error: %s", err) - } - fr.Close() - } - return []string{dst}, nil - + + return NewArtifact(artifact.BuilderId(), self.config.OutputPath), false, nil } From fe105107d25d909d07344c2a58dd524983b652d6 Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 16 Jun 2015 12:11:11 -0700 Subject: [PATCH 49/50] Removed extra files -- will re-add later --- post-processor/compress/LICENSE | 21 --- post-processor/compress/benchmark.go | 197 --------------------------- 2 files changed, 218 deletions(-) delete mode 100644 post-processor/compress/LICENSE delete mode 100644 post-processor/compress/benchmark.go diff --git a/post-processor/compress/LICENSE b/post-processor/compress/LICENSE deleted file mode 100644 index 38bbf26f3..000000000 --- a/post-processor/compress/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Vasiliy Tolstov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/post-processor/compress/benchmark.go b/post-processor/compress/benchmark.go deleted file mode 100644 index ed4d68168..000000000 --- a/post-processor/compress/benchmark.go +++ /dev/null @@ -1,197 +0,0 @@ -// +build ignore - -package main - -import ( - "compress/flate" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "testing" - - "github.com/biogo/hts/bgzf" - "github.com/klauspost/pgzip" - "github.com/pierrec/lz4" -) - -type Compressor struct { - r *os.File - w *os.File - sr int64 - sw int64 -} - -func (c *Compressor) Close() error { - var err error - - fi, _ := c.w.Stat() - c.sw = fi.Size() - if err = c.w.Close(); err != nil { - return err - } - - fi, _ = c.r.Stat() - c.sr = fi.Size() - if err = c.r.Close(); err != nil { - return err - } - - return nil -} - -func NewCompressor(src, dst string) (*Compressor, error) { - r, err := os.Open(src) - if err != nil { - return nil, err - } - - w, err := os.Create(dst) - if err != nil { - r.Close() - return nil, err - } - - c := &Compressor{r: r, w: w} - return c, nil -} - -func main() { - - runtime.GOMAXPROCS(runtime.NumCPU()) - - var resw testing.BenchmarkResult - var resr testing.BenchmarkResult - - c, err := NewCompressor("/tmp/image.r", "/tmp/image.w") - if err != nil { - panic(err) - } - resw = testing.Benchmark(c.BenchmarkGZIPWriter) - c.w.Seek(0, 0) - resr = testing.Benchmark(c.BenchmarkGZIPReader) - c.Close() - fmt.Printf("gzip:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) - - c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") - if err != nil { - panic(err) - } - resw = testing.Benchmark(c.BenchmarkBGZFWriter) - c.w.Seek(0, 0) - resr = testing.Benchmark(c.BenchmarkBGZFReader) - c.Close() - fmt.Printf("bgzf:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) - - c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") - if err != nil { - panic(err) - } - resw = testing.Benchmark(c.BenchmarkPGZIPWriter) - c.w.Seek(0, 0) - resr = testing.Benchmark(c.BenchmarkPGZIPReader) - c.Close() - fmt.Printf("pgzip:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) - - c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") - if err != nil { - panic(err) - } - resw = testing.Benchmark(c.BenchmarkLZ4Writer) - c.w.Seek(0, 0) - resr = testing.Benchmark(c.BenchmarkLZ4Reader) - c.Close() - fmt.Printf("lz4:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) - -} - -func (c *Compressor) BenchmarkGZIPWriter(b *testing.B) { - cw, _ := gzip.NewWriterLevel(c.w, flate.BestSpeed) - b.ResetTimer() - - _, err := io.Copy(cw, c.r) - if err != nil { - b.Fatal(err) - } - cw.Close() - c.w.Sync() -} - -func (c *Compressor) BenchmarkGZIPReader(b *testing.B) { - cr, _ := gzip.NewReader(c.w) - b.ResetTimer() - - _, err := io.Copy(ioutil.Discard, cr) - if err != nil { - b.Fatal(err) - } -} - -func (c *Compressor) BenchmarkBGZFWriter(b *testing.B) { - cw, _ := bgzf.NewWriterLevel(c.w, flate.BestSpeed, runtime.NumCPU()) - b.ResetTimer() - - _, err := io.Copy(cw, c.r) - if err != nil { - b.Fatal(err) - } - c.w.Sync() -} - -func (c *Compressor) BenchmarkBGZFReader(b *testing.B) { - cr, _ := bgzf.NewReader(c.w, 0) - b.ResetTimer() - - _, err := io.Copy(ioutil.Discard, cr) - if err != nil { - b.Fatal(err) - } -} - -func (c *Compressor) BenchmarkPGZIPWriter(b *testing.B) { - cw, _ := pgzip.NewWriterLevel(c.w, flate.BestSpeed) - b.ResetTimer() - - _, err := io.Copy(cw, c.r) - if err != nil { - b.Fatal(err) - } - cw.Close() - c.w.Sync() -} - -func (c *Compressor) BenchmarkPGZIPReader(b *testing.B) { - cr, _ := pgzip.NewReader(c.w) - b.ResetTimer() - - _, err := io.Copy(ioutil.Discard, cr) - if err != nil { - b.Fatal(err) - } -} - -func (c *Compressor) BenchmarkLZ4Writer(b *testing.B) { - cw := lz4.NewWriter(c.w) - // cw.Header.HighCompression = true - cw.Header.NoChecksum = true - b.ResetTimer() - - _, err := io.Copy(cw, c.r) - if err != nil { - b.Fatal(err) - } - cw.Close() - c.w.Sync() -} - -func (c *Compressor) BenchmarkLZ4Reader(b *testing.B) { - cr := lz4.NewReader(c.w) - b.ResetTimer() - - _, err := io.Copy(ioutil.Discard, cr) - if err != nil { - b.Fatal(err) - } -} From ddbc145d29f8e312057cb1221e49ff70172ff77c Mon Sep 17 00:00:00 2001 From: Chris Bednarski Date: Tue, 16 Jun 2015 16:31:09 -0700 Subject: [PATCH 50/50] Implemented acceptance test for compress --- .../compress/post-processor_test.go | 103 +++++++++++++++--- 1 file changed, 88 insertions(+), 15 deletions(-) diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index 5f4d6b9ca..12faeabed 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -1,22 +1,95 @@ package compress -// import ( -// "testing" -// -// builderT "github.com/mitchellh/packer/helper/builder/testing" -// ) -// -// func TestBuilderTagsAcc_basic(t *testing.T) { -// builderT.Test(t, builderT.TestCase{ -// Builder: &Builder{}, -// Template: simpleTestCase, -// Check: checkTags(), -// }) -// } +import ( + "fmt" + "os" + "strings" + "testing" + + "github.com/mitchellh/packer/builder/file" + env "github.com/mitchellh/packer/helper/builder/testing" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template" +) + +func setup(t *testing.T) (packer.Ui, packer.Artifact, error) { + // Create fake UI and Cache + ui := packer.TestUi(t) + cache := &packer.FileCache{CacheDir: os.TempDir()} + + // Create config for file builder + const fileConfig = `{"builders":[{"type":"file","target":"package.txt","content":"Hello world!"}]}` + tpl, err := template.Parse(strings.NewReader(fileConfig)) + if err != nil { + return nil, nil, fmt.Errorf("Unable to parse setup configuration: %s", err) + } + + // Prepare the file builder + builder := file.Builder{} + warnings, err := builder.Prepare(tpl.Builders["file"].Config) + if len(warnings) > 0 { + for _, warn := range warnings { + return nil, nil, fmt.Errorf("Configuration warning: %s", warn) + } + } + if err != nil { + return nil, nil, fmt.Errorf("Invalid configuration: %s", err) + } + + // Run the file builder + artifact, err := builder.Run(ui, nil, cache) + if err != nil { + return nil, nil, fmt.Errorf("Failed to build artifact: %s", err) + } + + return ui, artifact, err +} + +func TestSimpleCompress(t *testing.T) { + if os.Getenv(env.TestEnvVar) == "" { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", env.TestEnvVar)) + } + + ui, artifact, err := setup(t) + if err != nil { + t.Fatalf("Error bootstrapping test: %s", err) + } + if artifact != nil { + defer artifact.Destroy() + } + + tpl, err := template.Parse(strings.NewReader(simpleTestCase)) + if err != nil { + t.Fatalf("Unable to parse test config: %s", err) + } + + compressor := PostProcessor{} + compressor.Configure(tpl.PostProcessors[0][0].Config) + artifactOut, _, err := compressor.PostProcess(ui, artifact) + if err != nil { + t.Fatalf("Failed to compress artifact: %s", err) + } + // Cleanup after the test completes + defer artifactOut.Destroy() + + // Verify things look good + fi, err := os.Stat("package.tar.gz") + if err != nil { + t.Errorf("Unable to read archive: %s", err) + } + if fi.IsDir() { + t.Error("Archive should not be a directory") + } +} const simpleTestCase = ` { - "type": "compress", - "output": "foo.tar.gz" + "post-processors": [ + { + "type": "compress", + "output": "package.tar.gz" + } + ] } `