diff --git a/CHANGELOG.md b/CHANGELOG.md index ac36c6fe5..38718e928 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,8 @@ FEATURES: to allow access to remote servers such as private git repos. [GH-1066] * **Docker builder supports SSH**: The Docker builder now supports containers with SSH, just set `communicator` to "ssh" [GH-2244] + * **File provisioner can download**: The file provisioner can now download + files out of the build process. [GH-1909] * **New config function: `build_name`**: The name of the currently running build. [GH-2232] * **New config function: `build_type`**: The type of the currently running @@ -39,6 +41,7 @@ IMPROVEMENTS: * builder/amazon: Add `force_deregister` option for automatic AMI deregistration [GH-2221] * builder/amazon: Now applies tags to EBS snapshots [GH-2212] + * builder/amazon: Support custom keypairs [GH-1837] * builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829] * builder/digitalocean: User data support [GH-2113] * builder/parallels: Support Parallels Desktop 11 [GH-2199] @@ -55,12 +58,16 @@ IMPROVEMENTS: automatic port forward for SSH and to use the guest port directly. [GH-1078] * builder/virtualbox: Added SCSI support * builder/vmware: Support for additional disks [GH-1382] + * builder/vmware: Can now customize the template used for adding disks [GH-2254] * command/fix: After fixing, the template is validated [GH-2228] * command/push: Add `-name` flag for specifying name from CLI [GH-2042] * command/push: Push configuration in templates supports variables [GH-1861] * post-processor/docker-save: Can be chained [GH-2179] * post-processor/docker-tag: Support `force` option [GH-2055] * post-processor/docker-tag: Can be chained [GH-2179] + * provisioner/puppet-masterless: `working_directory` option [GH-1831] + * provisioner/puppet-masterless: `packer_build_name` and + `packer_build_type` are default facts. [GH-1878] BUG FIXES: @@ -121,11 +128,14 @@ BUG FIXES: * post-processor/atlas: Fix index out of range panic [GH-1959] * post-processor/vagrant-cloud: Fixed failing on response * post-processor/vagrant-cloud: Don't delete version on error [GH-2014] + * provisioner/chef-client: Fix permissions issues on default dir [GH-2255] + * provisioner/chef-client: Node cleanup works now. [GH-2257] * provisioner/puppet-masterless: Allow manifest_file to be a directory * provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call * provisioner/shell: chmod executable script to 0755, not 0777 [GH-1708] * provisioner/shell: inline commands failing will fail the provisioner [GH-2069] * provisioner/shell: single quotes in env vars are escaped [GH-2229] + * provisioner/shell: Temporary file is deleted after run [GH-2259] ## 0.7.5 (December 9, 2014) diff --git a/Makefile b/Makefile index 0574cbb5c..884d6bbf2 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,13 @@ testrace: go test -race $(TEST) $(TESTARGS) updatedeps: - go get -d -v -p 2 ./... + go get -u github.com/mitchellh/gox + go get -u golang.org/x/tools/cmd/stringer + go list ./... \ + | xargs go list -f '{{join .Deps "\n"}}' \ + | grep -v github.com/mitchellh/packer \ + | sort -u \ + | xargs go get -f -u -v vet: @go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index 5589a5578..cc981596d 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -32,12 +32,14 @@ type RunConfig struct { VpcId string `mapstructure:"vpc_id"` // Communicator settings - Comm communicator.Config `mapstructure:",squash"` - SSHPrivateIp bool `mapstructure:"ssh_private_ip"` + Comm communicator.Config `mapstructure:",squash"` + SSHKeyPairName string `mapstructure:"ssh_keypair_name"` + SSHPrivateIp bool `mapstructure:"ssh_private_ip"` } func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { - if c.TemporaryKeyPairName == "" { + // if we are not given an explicit keypairname, create a temporary one + if c.SSHKeyPairName == "" { c.TemporaryKeyPairName = fmt.Sprintf( "packer %s", uuid.TimeOrderedUUID()) } diff --git a/builder/amazon/common/step_key_pair.go b/builder/amazon/common/step_key_pair.go index e15e0c218..5bed27b10 100644 --- a/builder/amazon/common/step_key_pair.go +++ b/builder/amazon/common/step_key_pair.go @@ -12,17 +12,20 @@ import ( ) type StepKeyPair struct { - Debug bool - DebugKeyPath string - KeyPairName string - PrivateKeyFile string + Debug bool + DebugKeyPath string + TemporaryKeyPairName string + KeyPairName string + PrivateKeyFile string keyName string } func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { if s.PrivateKeyFile != "" { - s.keyName = "" + if s.KeyPairName != "" { + s.keyName = s.KeyPairName // need to get from config + } privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile) if err != nil { @@ -30,7 +33,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - state.Put("keyPair", "") + state.Put("keyPair", s.keyName) state.Put("privateKey", string(privateKeyBytes)) return multistep.ActionContinue @@ -39,7 +42,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) - ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.KeyPairName)) + ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.TemporaryKeyPairName)) keyResp, err := ec2conn.CreateKeyPair(&ec2.CreateKeyPairInput{KeyName: &s.KeyPairName}) if err != nil { state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err)) @@ -47,7 +50,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { } // Set the keyname so we know to delete it later - s.keyName = s.KeyPairName + s.keyName = s.TemporaryKeyPairName // Set some state data for use in future steps state.Put("keyPair", s.keyName) @@ -84,7 +87,9 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { func (s *StepKeyPair) Cleanup(state multistep.StateBag) { // If no key name is set, then we never created it, so just return - if s.keyName == "" { + // If we used an SSH private key file, do not go about deleting + // keypairs + if s.PrivateKeyFile != "" { return } diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index ec330ebc4..f88db5efc 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -245,7 +245,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi } latestInstance, err := WaitForState(&stateChange) if err != nil { - err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", *s.instance.InstanceID, err) + err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", instanceId, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index e6fbc8c27..c13947ac4 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -88,10 +88,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe EnhancedNetworking: b.config.AMIEnhancedNetworking, }, &awscommon.StepKeyPair{ - Debug: b.config.PackerDebug, - DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.TemporaryKeyPairName, - PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), + KeyPairName: b.config.TemporaryKeyPairName, + TemporaryKeyPairName: b.config.TemporaryKeyPairName, + PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, }, &awscommon.StepSecurityGroup{ SecurityGroupIds: b.config.SecurityGroupIds, diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index 0243d4a77..5e8068718 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -177,10 +177,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe EnhancedNetworking: b.config.AMIEnhancedNetworking, }, &awscommon.StepKeyPair{ - Debug: b.config.PackerDebug, - DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), - KeyPairName: b.config.TemporaryKeyPairName, - PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, + Debug: b.config.PackerDebug, + DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), + KeyPairName: b.config.TemporaryKeyPairName, + PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey, + TemporaryKeyPairName: b.config.TemporaryKeyPairName, }, &awscommon.StepSecurityGroup{ CommConfig: &b.config.RunConfig.Comm, diff --git a/builder/file/artifact.go b/builder/file/artifact.go new file mode 100644 index 000000000..35bf06e6c --- /dev/null +++ b/builder/file/artifact.go @@ -0,0 +1,36 @@ +package file + +import ( + "fmt" + "log" + "os" +) + +type FileArtifact struct { + filename string +} + +func (*FileArtifact) BuilderId() string { + return BuilderId +} + +func (a *FileArtifact) Files() []string { + return []string{a.filename} +} + +func (a *FileArtifact) Id() string { + return "File" +} + +func (a *FileArtifact) String() string { + return fmt.Sprintf("Stored file: %s", a.filename) +} + +func (a *FileArtifact) State(name string) interface{} { + return nil +} + +func (a *FileArtifact) Destroy() error { + log.Printf("Deleting %s", a.filename) + return os.Remove(a.filename) +} diff --git a/builder/file/artifact_test.go b/builder/file/artifact_test.go new file mode 100644 index 000000000..0aa77894b --- /dev/null +++ b/builder/file/artifact_test.go @@ -0,0 +1,11 @@ +package file + +import ( + "testing" + + "github.com/mitchellh/packer/packer" +) + +func TestNullArtifact(t *testing.T) { + var _ packer.Artifact = new(FileArtifact) +} diff --git a/builder/file/builder.go b/builder/file/builder.go new file mode 100644 index 000000000..9a2c2cc7f --- /dev/null +++ b/builder/file/builder.go @@ -0,0 +1,77 @@ +package file + +/* +The File builder creates an artifact from a file. Because it does not require +any virutalization or network resources, it's very fast and useful for testing. +*/ + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/mitchellh/multistep" + "github.com/mitchellh/packer/packer" +) + +const BuilderId = "packer.file" + +type Builder struct { + config *Config + runner multistep.Runner +} + +func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { + c, warnings, errs := NewConfig(raws...) + if errs != nil { + return warnings, errs + } + b.config = c + + return warnings, nil +} + +// Run is where the actual build should take place. It takes a Build and a Ui. +func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { + artifact := new(FileArtifact) + + if b.config.Source != "" { + source, err := os.Open(b.config.Source) + defer source.Close() + if err != nil { + return nil, err + } + + // Create will truncate an existing file + target, err := os.Create(b.config.Target) + defer target.Close() + if err != nil { + return nil, err + } + + ui.Say(fmt.Sprintf("Copying %s to %s", source.Name(), target.Name())) + bytes, err := io.Copy(target, source) + if err != nil { + return nil, err + } + ui.Say(fmt.Sprintf("Copied %d bytes", bytes)) + artifact.filename = target.Name() + } else { + // We're going to write Contents; if it's empty we'll just create an + // empty file. + err := ioutil.WriteFile(b.config.Target, []byte(b.config.Content), 0600) + if err != nil { + return nil, err + } + artifact.filename = b.config.Target + } + + return artifact, nil +} + +// Cancel cancels a possibly running Builder. This should block until +// the builder actually cancels and cleans up after itself. +func (b *Builder) Cancel() { + b.runner.Cancel() +} diff --git a/builder/file/builder_test.go b/builder/file/builder_test.go new file mode 100644 index 000000000..3ce9e77ae --- /dev/null +++ b/builder/file/builder_test.go @@ -0,0 +1,78 @@ +package file + +import ( + "fmt" + "io/ioutil" + "testing" + + builderT "github.com/mitchellh/packer/helper/builder/testing" + "github.com/mitchellh/packer/packer" +) + +func TestBuilder_implBuilder(t *testing.T) { + var _ packer.Builder = new(Builder) +} + +func TestBuilderFileAcc_content(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + Builder: &Builder{}, + Template: fileContentTest, + Check: checkContent, + }) +} + +func TestBuilderFileAcc_copy(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + Builder: &Builder{}, + Template: fileCopyTest, + Check: checkCopy, + }) +} + +func checkContent(artifacts []packer.Artifact) error { + content, err := ioutil.ReadFile("contentTest.txt") + if err != nil { + return err + } + contentString := string(content) + if contentString != "hello world!" { + return fmt.Errorf("Unexpected file contents: %s", contentString) + } + return nil +} + +func checkCopy(artifacts []packer.Artifact) error { + content, err := ioutil.ReadFile("copyTest.txt") + if err != nil { + return err + } + contentString := string(content) + if contentString != "Hello world.\n" { + return fmt.Errorf("Unexpected file contents: %s", contentString) + } + return nil +} + +const fileContentTest = ` +{ + "builders": [ + { + "type":"test", + "target":"contentTest.txt", + "content":"hello world!" + } + ] +} +` + +const fileCopyTest = ` +{ + "builders": [ + { + "type":"test", + "target":"copyTest.txt", + "source":"test-fixtures/artifact.txt" + } + ] +} +` diff --git a/builder/file/config.go b/builder/file/config.go new file mode 100644 index 000000000..6702e6894 --- /dev/null +++ b/builder/file/config.go @@ -0,0 +1,56 @@ +package file + +import ( + "fmt" + + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" +) + +var ErrTargetRequired = fmt.Errorf("target required") +var ErrContentSourceConflict = fmt.Errorf("Cannot specify source file AND content") + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + + Source string `mapstructure:"source"` + Target string `mapstructure:"target"` + Content string `mapstructure:"content"` +} + +func NewConfig(raws ...interface{}) (*Config, []string, error) { + c := new(Config) + warnings := []string{} + + err := config.Decode(c, &config.DecodeOpts{ + Interpolate: true, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) + if err != nil { + return nil, warnings, err + } + + var errs *packer.MultiError + + if c.Target == "" { + errs = packer.MultiErrorAppend(errs, ErrTargetRequired) + } + + if c.Content == "" && c.Source == "" { + warnings = append(warnings, "Both source file and contents are blank; target will have no content") + } + + if c.Content != "" && c.Source != "" { + errs = packer.MultiErrorAppend(errs, ErrContentSourceConflict) + } + + if errs != nil && len(errs.Errors) > 0 { + return nil, warnings, errs + } + + return c, warnings, nil +} diff --git a/builder/file/config_test.go b/builder/file/config_test.go new file mode 100644 index 000000000..9d8f346fc --- /dev/null +++ b/builder/file/config_test.go @@ -0,0 +1,45 @@ +package file + +import ( + "strings" + "testing" +) + +func testConfig() map[string]interface{} { + return map[string]interface{}{ + "source": "src.txt", + "target": "dst.txt", + "content": "Hello, world!", + } +} + +func TestContentSourceConflict(t *testing.T) { + raw := testConfig() + + _, _, errs := NewConfig(raw) + if !strings.Contains(errs.Error(), ErrContentSourceConflict.Error()) { + t.Errorf("Expected config error: %s", ErrContentSourceConflict.Error()) + } +} + +func TestNoFilename(t *testing.T) { + raw := testConfig() + + delete(raw, "filename") + _, _, errs := NewConfig(raw) + if errs == nil { + t.Errorf("Expected config error: %s", ErrTargetRequired.Error()) + } +} + +func TestNoContent(t *testing.T) { + raw := testConfig() + + delete(raw, "content") + delete(raw, "source") + _, warns, _ := NewConfig(raw) + + if len(warns) == 0 { + t.Error("Expected config warning without any content") + } +} diff --git a/builder/file/test-fixtures/artifact.txt b/builder/file/test-fixtures/artifact.txt new file mode 100644 index 000000000..18249f335 --- /dev/null +++ b/builder/file/test-fixtures/artifact.txt @@ -0,0 +1 @@ +Hello world. diff --git a/builder/vmware/common/step_upload_tools.go b/builder/vmware/common/step_upload_tools.go index aa7dd08e7..3f7214965 100644 --- a/builder/vmware/common/step_upload_tools.go +++ b/builder/vmware/common/step_upload_tools.go @@ -23,6 +23,10 @@ type StepUploadTools struct { func (c *StepUploadTools) Run(state multistep.StateBag) multistep.StepAction { driver := state.Get("driver").(Driver) + if c.ToolsUploadFlavor == "" { + return multistep.ActionContinue + } + if c.RemoteType == "esx5" { if err := driver.ToolsInstall(); err != nil { state.Put("error", fmt.Errorf("Couldn't mount VMware tools ISO. Please check the 'guest_os_type' in your template.json.")) @@ -30,10 +34,6 @@ func (c *StepUploadTools) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionContinue } - if c.ToolsUploadFlavor == "" { - return multistep.ActionContinue - } - comm := state.Get("communicator").(packer.Communicator) tools_source := state.Get("tools_upload_source").(string) ui := state.Get("ui").(packer.Ui) diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go old mode 100644 new mode 100755 index 38ba3a4a1..fa8deb983 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -36,20 +36,21 @@ type Config struct { vmwcommon.ToolsConfig `mapstructure:",squash"` vmwcommon.VMXConfig `mapstructure:",squash"` - AdditionalDiskSize []uint `mapstructure:"disk_additional_size"` - DiskName string `mapstructure:"vmdk_name"` - DiskSize uint `mapstructure:"disk_size"` - DiskTypeId string `mapstructure:"disk_type_id"` - FloppyFiles []string `mapstructure:"floppy_files"` - GuestOSType string `mapstructure:"guest_os_type"` - ISOChecksum string `mapstructure:"iso_checksum"` - ISOChecksumType string `mapstructure:"iso_checksum_type"` - ISOUrls []string `mapstructure:"iso_urls"` - Version string `mapstructure:"version"` - VMName string `mapstructure:"vm_name"` - BootCommand []string `mapstructure:"boot_command"` - SkipCompaction bool `mapstructure:"skip_compaction"` - VMXTemplatePath string `mapstructure:"vmx_template_path"` + AdditionalDiskSize []uint `mapstructure:"disk_additional_size"` + DiskName string `mapstructure:"vmdk_name"` + DiskSize uint `mapstructure:"disk_size"` + DiskTypeId string `mapstructure:"disk_type_id"` + FloppyFiles []string `mapstructure:"floppy_files"` + GuestOSType string `mapstructure:"guest_os_type"` + ISOChecksum string `mapstructure:"iso_checksum"` + ISOChecksumType string `mapstructure:"iso_checksum_type"` + ISOUrls []string `mapstructure:"iso_urls"` + Version string `mapstructure:"version"` + VMName string `mapstructure:"vm_name"` + BootCommand []string `mapstructure:"boot_command"` + SkipCompaction bool `mapstructure:"skip_compaction"` + VMXTemplatePath string `mapstructure:"vmx_template_path"` + VMXDiskTemplatePath string `mapstructure:"vmx_disk_template_path"` RemoteType string `mapstructure:"remote_type"` RemoteDatastore string `mapstructure:"remote_datastore"` diff --git a/builder/vmware/iso/step_create_vmx.go b/builder/vmware/iso/step_create_vmx.go old mode 100644 new mode 100755 index 69cb3f261..272721893 --- a/builder/vmware/iso/step_create_vmx.go +++ b/builder/vmware/iso/step_create_vmx.go @@ -76,7 +76,29 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { DiskName: config.DiskName, } - diskTemplate, err := interpolate.Render(DefaultAdditionalDiskTemplate, &ctx) + diskTemplate := DefaultAdditionalDiskTemplate + if config.VMXDiskTemplatePath != "" { + f, err := os.Open(config.VMXDiskTemplatePath) + if err != nil { + err := fmt.Errorf("Error reading VMX disk template: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + defer f.Close() + + rawBytes, err := ioutil.ReadAll(f) + if err != nil { + err := fmt.Errorf("Error reading VMX disk template: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + diskTemplate = string(rawBytes) + } + + diskContents, err := interpolate.Render(diskTemplate, &ctx) if err != nil { err := fmt.Errorf("Error preparing VMX template for additional disk: %s", err) state.Put("error", err) @@ -84,7 +106,7 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - vmxTemplate += diskTemplate + vmxTemplate += diskContents } } diff --git a/common/step_provision.go b/common/step_provision.go index ae06f1b0c..f40cfd896 100644 --- a/common/step_provision.go +++ b/common/step_provision.go @@ -23,9 +23,11 @@ type StepProvision struct { func (s *StepProvision) Run(state multistep.StateBag) multistep.StepAction { comm := s.Comm if comm == nil { - comm = state.Get("communicator").(packer.Communicator) + raw, ok := state.Get("communicator").(packer.Communicator) + if ok { + comm = raw.(packer.Communicator) + } } - hook := state.Get("hook").(packer.Hook) ui := state.Get("ui").(packer.Ui) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index 8fd9ba91e..2cc299b30 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -14,6 +14,7 @@ import ( "net" "os" "path/filepath" + "strconv" "sync" ) @@ -171,8 +172,57 @@ func (c *comm) UploadDir(dst string, src string, excl []string) error { return c.scpSession("scp -rvt "+dst, scpFunc) } -func (c *comm) Download(string, io.Writer) error { - panic("not implemented yet") +func (c *comm) Download(path string, output io.Writer) error { + scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { + fmt.Fprint(w, "\x00") + + // read file info + fi, err := stdoutR.ReadString('\n') + if err != nil { + return err + } + + if len(fi) < 0 { + return fmt.Errorf("empty response from server") + } + + switch fi[0] { + case '\x01', '\x02': + return fmt.Errorf("%s", fi[1:len(fi)]) + case 'C': + case 'D': + return fmt.Errorf("remote file is directory") + default: + return fmt.Errorf("unexpected server response (%x)", fi[0]) + } + + var mode string + var size int64 + + n, err := fmt.Sscanf(fi, "%6s %d ", &mode, &size) + if err != nil || n != 2 { + return fmt.Errorf("can't parse server response (%s)", fi) + } + if size < 0 { + return fmt.Errorf("negative file size") + } + + fmt.Fprint(w, "\x00") + + if _, err := io.CopyN(output, stdoutR, size); err != nil { + return err + } + + fmt.Fprint(w, "\x00") + + if err := checkSCPStatus(stdoutR); err != nil { + return err + } + + return nil + } + + return c.scpSession("scp -vf "+strconv.Quote(path), scpFunc) } func (c *comm) newSession() (session *ssh.Session, err error) { diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go index 82686e2a7..2b53ac62c 100644 --- a/communicator/winrm/communicator.go +++ b/communicator/winrm/communicator.go @@ -113,7 +113,7 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error } func (c *Communicator) Download(src string, dst io.Writer) error { - panic("download not implemented") + return fmt.Errorf("WinRM doesn't support download.") } func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { diff --git a/packer/build_test.go b/packer/build_test.go index b183fb95a..e29318972 100644 --- a/packer/build_test.go +++ b/packer/build_test.go @@ -202,7 +202,7 @@ func TestBuild_Run(t *testing.T) { } // Verify provisioners run - dispatchHook.Run(HookProvision, nil, nil, 42) + dispatchHook.Run(HookProvision, nil, new(MockCommunicator), 42) prov := build.provisioners[0].provisioner.(*MockProvisioner) if !prov.ProvCalled { t.Fatal("should be called") diff --git a/packer/builder_mock.go b/packer/builder_mock.go index 9cb016963..d8fd98e13 100644 --- a/packer/builder_mock.go +++ b/packer/builder_mock.go @@ -43,7 +43,7 @@ func (tb *MockBuilder) Run(ui Ui, h Hook, c Cache) (Artifact, error) { } if h != nil { - if err := h.Run(HookProvision, ui, nil, nil); err != nil { + if err := h.Run(HookProvision, ui, new(MockCommunicator), nil); err != nil { return nil, err } } diff --git a/packer/provisioner.go b/packer/provisioner.go index d28d1371a..f4f3fce11 100644 --- a/packer/provisioner.go +++ b/packer/provisioner.go @@ -38,6 +38,18 @@ type ProvisionHook struct { // Runs the provisioners in order. func (h *ProvisionHook) Run(name string, ui Ui, comm Communicator, data interface{}) error { + // Shortcut + if len(h.Provisioners) == 0 { + return nil + } + + if comm == nil { + return fmt.Errorf( + "No communicator found for provisioners! This is usually because the\n" + + "`communicator` config was set to \"none\". If you have any provisioners\n" + + "then a communicator is required. Please fix this to continue.") + } + defer func() { h.lock.Lock() defer h.lock.Unlock() diff --git a/packer/provisioner_test.go b/packer/provisioner_test.go index 5eeebb4a3..7251d6f05 100644 --- a/packer/provisioner_test.go +++ b/packer/provisioner_test.go @@ -19,7 +19,7 @@ func TestProvisionHook(t *testing.T) { pB := &MockProvisioner{} ui := testUi() - var comm Communicator = nil + var comm Communicator = new(MockCommunicator) var data interface{} = nil hook := &ProvisionHook{ @@ -37,6 +37,24 @@ func TestProvisionHook(t *testing.T) { } } +func TestProvisionHook_nilComm(t *testing.T) { + pA := &MockProvisioner{} + pB := &MockProvisioner{} + + ui := testUi() + var comm Communicator = nil + var data interface{} = nil + + hook := &ProvisionHook{ + Provisioners: []Provisioner{pA, pB}, + } + + err := hook.Run("foo", ui, comm, data) + if err == nil { + t.Fatal("should error") + } +} + func TestProvisionHook_cancel(t *testing.T) { var lock sync.Mutex order := make([]string, 0, 2) @@ -59,7 +77,7 @@ func TestProvisionHook_cancel(t *testing.T) { finished := make(chan struct{}) go func() { - hook.Run("foo", nil, nil, nil) + hook.Run("foo", nil, new(MockCommunicator), nil) close(finished) }() @@ -74,7 +92,7 @@ func TestProvisionHook_cancel(t *testing.T) { <-finished // Verify order - if order[0] != "cancel" || order[1] != "prov" { + if len(order) != 2 || order[0] != "cancel" || order[1] != "prov" { t.Fatalf("bad: %#v", order) } } diff --git a/plugin/builder-file/main.go b/plugin/builder-file/main.go new file mode 100644 index 000000000..54bc4f437 --- /dev/null +++ b/plugin/builder-file/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "github.com/mitchellh/packer/builder/file" + "github.com/mitchellh/packer/packer/plugin" +) + +func main() { + server, err := plugin.Server() + if err != nil { + panic(err) + } + server.RegisterBuilder(new(file.Builder)) + server.Serve() +} diff --git a/post-processor/compress/LICENSE b/post-processor/compress/LICENSE deleted file mode 100644 index 38bbf26f3..000000000 --- a/post-processor/compress/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Vasiliy Tolstov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/post-processor/compress/benchmark.go b/post-processor/compress/benchmark.go deleted file mode 100644 index ed4d68168..000000000 --- a/post-processor/compress/benchmark.go +++ /dev/null @@ -1,197 +0,0 @@ -// +build ignore - -package main - -import ( - "compress/flate" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "testing" - - "github.com/biogo/hts/bgzf" - "github.com/klauspost/pgzip" - "github.com/pierrec/lz4" -) - -type Compressor struct { - r *os.File - w *os.File - sr int64 - sw int64 -} - -func (c *Compressor) Close() error { - var err error - - fi, _ := c.w.Stat() - c.sw = fi.Size() - if err = c.w.Close(); err != nil { - return err - } - - fi, _ = c.r.Stat() - c.sr = fi.Size() - if err = c.r.Close(); err != nil { - return err - } - - return nil -} - -func NewCompressor(src, dst string) (*Compressor, error) { - r, err := os.Open(src) - if err != nil { - return nil, err - } - - w, err := os.Create(dst) - if err != nil { - r.Close() - return nil, err - } - - c := &Compressor{r: r, w: w} - return c, nil -} - -func main() { - - runtime.GOMAXPROCS(runtime.NumCPU()) - - var resw testing.BenchmarkResult - var resr testing.BenchmarkResult - - c, err := NewCompressor("/tmp/image.r", "/tmp/image.w") - if err != nil { - panic(err) - } - resw = testing.Benchmark(c.BenchmarkGZIPWriter) - c.w.Seek(0, 0) - resr = testing.Benchmark(c.BenchmarkGZIPReader) - c.Close() - fmt.Printf("gzip:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) - - c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") - if err != nil { - panic(err) - } - resw = testing.Benchmark(c.BenchmarkBGZFWriter) - c.w.Seek(0, 0) - resr = testing.Benchmark(c.BenchmarkBGZFReader) - c.Close() - fmt.Printf("bgzf:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) - - c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") - if err != nil { - panic(err) - } - resw = testing.Benchmark(c.BenchmarkPGZIPWriter) - c.w.Seek(0, 0) - resr = testing.Benchmark(c.BenchmarkPGZIPReader) - c.Close() - fmt.Printf("pgzip:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) - - c, err = NewCompressor("/tmp/image.r", "/tmp/image.w") - if err != nil { - panic(err) - } - resw = testing.Benchmark(c.BenchmarkLZ4Writer) - c.w.Seek(0, 0) - resr = testing.Benchmark(c.BenchmarkLZ4Reader) - c.Close() - fmt.Printf("lz4:\twriter %s\treader %s\tsize %d\n", resw.T.String(), resr.T.String(), c.sw) - -} - -func (c *Compressor) BenchmarkGZIPWriter(b *testing.B) { - cw, _ := gzip.NewWriterLevel(c.w, flate.BestSpeed) - b.ResetTimer() - - _, err := io.Copy(cw, c.r) - if err != nil { - b.Fatal(err) - } - cw.Close() - c.w.Sync() -} - -func (c *Compressor) BenchmarkGZIPReader(b *testing.B) { - cr, _ := gzip.NewReader(c.w) - b.ResetTimer() - - _, err := io.Copy(ioutil.Discard, cr) - if err != nil { - b.Fatal(err) - } -} - -func (c *Compressor) BenchmarkBGZFWriter(b *testing.B) { - cw, _ := bgzf.NewWriterLevel(c.w, flate.BestSpeed, runtime.NumCPU()) - b.ResetTimer() - - _, err := io.Copy(cw, c.r) - if err != nil { - b.Fatal(err) - } - c.w.Sync() -} - -func (c *Compressor) BenchmarkBGZFReader(b *testing.B) { - cr, _ := bgzf.NewReader(c.w, 0) - b.ResetTimer() - - _, err := io.Copy(ioutil.Discard, cr) - if err != nil { - b.Fatal(err) - } -} - -func (c *Compressor) BenchmarkPGZIPWriter(b *testing.B) { - cw, _ := pgzip.NewWriterLevel(c.w, flate.BestSpeed) - b.ResetTimer() - - _, err := io.Copy(cw, c.r) - if err != nil { - b.Fatal(err) - } - cw.Close() - c.w.Sync() -} - -func (c *Compressor) BenchmarkPGZIPReader(b *testing.B) { - cr, _ := pgzip.NewReader(c.w) - b.ResetTimer() - - _, err := io.Copy(ioutil.Discard, cr) - if err != nil { - b.Fatal(err) - } -} - -func (c *Compressor) BenchmarkLZ4Writer(b *testing.B) { - cw := lz4.NewWriter(c.w) - // cw.Header.HighCompression = true - cw.Header.NoChecksum = true - b.ResetTimer() - - _, err := io.Copy(cw, c.r) - if err != nil { - b.Fatal(err) - } - cw.Close() - c.w.Sync() -} - -func (c *Compressor) BenchmarkLZ4Reader(b *testing.B) { - cr := lz4.NewReader(c.w) - b.ResetTimer() - - _, err := io.Copy(ioutil.Discard, cr) - if err != nil { - b.Fatal(err) - } -} diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index 340a75dd4..c2f608685 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -2,47 +2,23 @@ package compress import ( "archive/tar" - "archive/zip" - "compress/flate" "compress/gzip" "fmt" "io" "os" - "path/filepath" - "runtime" - "strings" - "time" - "github.com/biogo/hts/bgzf" - "github.com/klauspost/pgzip" "github.com/mitchellh/packer/common" "github.com/mitchellh/packer/helper/config" "github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/template/interpolate" - "github.com/pierrec/lz4" - "gopkg.in/yaml.v2" ) -type Metadata map[string]Metaitem - -type Metaitem struct { - CompSize int64 `yaml:"compsize"` - OrigSize int64 `yaml:"origsize"` - CompType string `yaml:"comptype"` - CompDate string `yaml:"compdate"` -} - type Config struct { common.PackerConfig `mapstructure:",squash"` - OutputPath string `mapstructure:"output"` - OutputFile string `mapstructure:"file"` - Compression int `mapstructure:"compression"` - Metadata bool `mapstructure:"metadata"` - NumCPU int `mapstructure:"numcpu"` - Format string `mapstructure:"format"` - KeepInputArtifact bool `mapstructure:"keep_input_artifact"` - ctx *interpolate.Context + OutputPath string `mapstructure:"output"` + + ctx interpolate.Context } type PostProcessor struct { @@ -229,7 +205,7 @@ func (p *PostProcessor) cmpTAR(src []string, dst string) ([]string, error) { return nil, fmt.Errorf("tar error on stat of %s: %s", name, err) } - target, _ := os.Readlink(name) + target, _ := os.Readlink(path) header, err := tar.FileInfoHeader(fi, target) if err != nil { return nil, fmt.Errorf("tar error reading info for %s: %s", name, err) diff --git a/post-processor/compress/post-processor_test.go b/post-processor/compress/post-processor_test.go index 92cbfc4b3..12faeabed 100644 --- a/post-processor/compress/post-processor_test.go +++ b/post-processor/compress/post-processor_test.go @@ -1,3 +1,95 @@ package compress -import () +import ( + "fmt" + "os" + "strings" + "testing" + + "github.com/mitchellh/packer/builder/file" + env "github.com/mitchellh/packer/helper/builder/testing" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template" +) + +func setup(t *testing.T) (packer.Ui, packer.Artifact, error) { + // Create fake UI and Cache + ui := packer.TestUi(t) + cache := &packer.FileCache{CacheDir: os.TempDir()} + + // Create config for file builder + const fileConfig = `{"builders":[{"type":"file","target":"package.txt","content":"Hello world!"}]}` + tpl, err := template.Parse(strings.NewReader(fileConfig)) + if err != nil { + return nil, nil, fmt.Errorf("Unable to parse setup configuration: %s", err) + } + + // Prepare the file builder + builder := file.Builder{} + warnings, err := builder.Prepare(tpl.Builders["file"].Config) + if len(warnings) > 0 { + for _, warn := range warnings { + return nil, nil, fmt.Errorf("Configuration warning: %s", warn) + } + } + if err != nil { + return nil, nil, fmt.Errorf("Invalid configuration: %s", err) + } + + // Run the file builder + artifact, err := builder.Run(ui, nil, cache) + if err != nil { + return nil, nil, fmt.Errorf("Failed to build artifact: %s", err) + } + + return ui, artifact, err +} + +func TestSimpleCompress(t *testing.T) { + if os.Getenv(env.TestEnvVar) == "" { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", env.TestEnvVar)) + } + + ui, artifact, err := setup(t) + if err != nil { + t.Fatalf("Error bootstrapping test: %s", err) + } + if artifact != nil { + defer artifact.Destroy() + } + + tpl, err := template.Parse(strings.NewReader(simpleTestCase)) + if err != nil { + t.Fatalf("Unable to parse test config: %s", err) + } + + compressor := PostProcessor{} + compressor.Configure(tpl.PostProcessors[0][0].Config) + artifactOut, _, err := compressor.PostProcess(ui, artifact) + if err != nil { + t.Fatalf("Failed to compress artifact: %s", err) + } + // Cleanup after the test completes + defer artifactOut.Destroy() + + // Verify things look good + fi, err := os.Stat("package.tar.gz") + if err != nil { + t.Errorf("Unable to read archive: %s", err) + } + if fi.IsDir() { + t.Error("Archive should not be a directory") + } +} + +const simpleTestCase = ` +{ + "post-processors": [ + { + "type": "compress", + "output": "package.tar.gz" + } + ] +} +` diff --git a/provisioner/chef-client/provisioner.go b/provisioner/chef-client/provisioner.go index ea41e954c..2d42d361d 100644 --- a/provisioner/chef-client/provisioner.go +++ b/provisioner/chef-client/provisioner.go @@ -9,7 +9,6 @@ import ( "fmt" "io/ioutil" "os" - "os/exec" "path/filepath" "strings" @@ -37,6 +36,7 @@ type Config struct { SkipCleanNode bool `mapstructure:"skip_clean_node"` SkipInstall bool `mapstructure:"skip_install"` StagingDir string `mapstructure:"staging_directory"` + ClientKey string `mapstructure:"client_key"` ValidationKeyPath string `mapstructure:"validation_key_path"` ValidationClientName string `mapstructure:"validation_client_name"` @@ -50,6 +50,7 @@ type Provisioner struct { type ConfigTemplate struct { NodeName string ServerUrl string + ClientKey string ValidationKeyPath string ValidationClientName string ChefEnvironment string @@ -162,6 +163,10 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return fmt.Errorf("Error creating staging directory: %s", err) } + if p.config.ClientKey == "" { + p.config.ClientKey = fmt.Sprintf("%s/client.pem", p.config.StagingDir) + } + if p.config.ValidationKeyPath != "" { remoteValidationKeyPath = fmt.Sprintf("%s/validation.pem", p.config.StagingDir) if err := p.copyValidationKey(ui, comm, remoteValidationKeyPath); err != nil { @@ -170,7 +175,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } configPath, err := p.createConfig( - ui, comm, nodeName, serverUrl, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment, p.config.SslVerifyMode) + ui, comm, nodeName, serverUrl, p.config.ClientKey, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment, p.config.SslVerifyMode) if err != nil { return fmt.Errorf("Error creating Chef config file: %s", err) } @@ -224,7 +229,7 @@ func (p *Provisioner) uploadDirectory(ui packer.Ui, comm packer.Communicator, ds return comm.UploadDir(dst, src, nil) } -func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, remoteKeyPath string, validationClientName string, chefEnvironment string, sslVerifyMode string) (string, error) { +func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, clientKey string, remoteKeyPath string, validationClientName string, chefEnvironment string, sslVerifyMode string) (string, error) { ui.Message("Creating configuration file 'client.rb'") // Read the template @@ -248,6 +253,7 @@ func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeN ctx.Data = &ConfigTemplate{ NodeName: nodeName, ServerUrl: serverUrl, + ClientKey: clientKey, ValidationKeyPath: remoteKeyPath, ValidationClientName: validationClientName, ChefEnvironment: chefEnvironment, @@ -303,16 +309,25 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri mkdirCmd = "sudo " + mkdirCmd } - cmd := &packer.RemoteCmd{ - Command: mkdirCmd, - } - + cmd := &packer.RemoteCmd{Command: mkdirCmd} if err := cmd.StartWithUi(comm, ui); err != nil { return err } - if cmd.ExitStatus != 0 { - return fmt.Errorf("Non-zero exit status.") + return fmt.Errorf("Non-zero exit status. See output above for more info.") + } + + // Chmod the directory to 0777 just so that we can access it as our user + mkdirCmd = fmt.Sprintf("chmod 0777 '%s'", dir) + if !p.config.PreventSudo { + mkdirCmd = "sudo " + mkdirCmd + } + cmd = &packer.RemoteCmd{Command: mkdirCmd} + if err := cmd.StartWithUi(comm, ui); err != nil { + return err + } + if cmd.ExitStatus != 0 { + return fmt.Errorf("Non-zero exit status. See output above for more info.") } return nil @@ -320,15 +335,9 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node string) error { ui.Say("Cleaning up chef node...") - app := fmt.Sprintf("knife node delete %s -y", node) - - cmd := exec.Command("sh", "-c", app) - out, err := cmd.Output() - - ui.Message(fmt.Sprintf("%s", out)) - - if err != nil { - return err + args := []string{"node", "delete", node} + if err := p.knifeExec(ui, comm, node, args); err != nil { + return fmt.Errorf("Failed to cleanup node: %s", err) } return nil @@ -336,16 +345,38 @@ func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node str func (p *Provisioner) cleanClient(ui packer.Ui, comm packer.Communicator, node string) error { ui.Say("Cleaning up chef client...") - app := fmt.Sprintf("knife client delete %s -y", node) + args := []string{"client", "delete", node} + if err := p.knifeExec(ui, comm, node, args); err != nil { + return fmt.Errorf("Failed to cleanup client: %s", err) + } - cmd := exec.Command("sh", "-c", app) - out, err := cmd.Output() + return nil +} - ui.Message(fmt.Sprintf("%s", out)) +func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node string, args []string) error { + flags := []string{ + "-y", + "-s", fmt.Sprintf("'%s'", p.config.ServerUrl), + "-k", fmt.Sprintf("'%s'", p.config.ClientKey), + "-u", fmt.Sprintf("'%s'", node), + } - if err != nil { + cmdText := fmt.Sprintf( + "knife %s %s", strings.Join(args, " "), strings.Join(flags, " ")) + if !p.config.PreventSudo { + cmdText = "sudo " + cmdText + } + + cmd := &packer.RemoteCmd{Command: cmdText} + if err := cmd.StartWithUi(comm, ui); err != nil { return err } + if cmd.ExitStatus != 0 { + return fmt.Errorf( + "Non-zero exit status. See output above for more info.\n\n"+ + "Command: %s", + cmdText) + } return nil } @@ -524,6 +555,7 @@ var DefaultConfigTemplate = ` log_level :info log_location STDOUT chef_server_url "{{.ServerUrl}}" +client_key "{{.ClientKey}}" {{if ne .ValidationClientName ""}} validation_client_name "{{.ValidationClientName}}" {{else}} diff --git a/provisioner/file/provisioner.go b/provisioner/file/provisioner.go index ce359a407..9bc2a646c 100644 --- a/provisioner/file/provisioner.go +++ b/provisioner/file/provisioner.go @@ -20,6 +20,9 @@ type Config struct { // The remote path where the local file will be uploaded to. Destination string + // Direction + Direction string + ctx interpolate.Context } @@ -38,12 +41,28 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { return err } + if p.config.Direction == "" { + p.config.Direction = "upload" + } + var errs *packer.MultiError if _, err := os.Stat(p.config.Source); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Bad source '%s': %s", p.config.Source, err)) } + if p.config.Direction != "download" && p.config.Direction != "upload" { + errs = packer.MultiErrorAppend(errs, + errors.New("Direction must be one of: download, upload.")) + } + + if p.config.Direction == "upload" { + if _, err := os.Stat(p.config.Source); err != nil { + errs = packer.MultiErrorAppend(errs, + fmt.Errorf("Bad source '%s': %s", p.config.Source, err)) + } + } + if p.config.Destination == "" { errs = packer.MultiErrorAppend(errs, errors.New("Destination must be specified.")) @@ -57,6 +76,30 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { + if p.config.Direction == "download" { + return p.ProvisionDownload(ui, comm) + } else { + return p.ProvisionUpload(ui, comm) + } +} + +func (p *Provisioner) ProvisionDownload(ui packer.Ui, comm packer.Communicator) error { + ui.Say(fmt.Sprintf("Downloading %s => %s", p.config.Source, p.config.Destination)) + + f, err := os.OpenFile(p.config.Destination, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer f.Close() + + err = comm.Download(p.config.Source, f) + if err != nil { + ui.Error(fmt.Sprintf("Download failed: %s", err)) + } + return err +} + +func (p *Provisioner) ProvisionUpload(ui packer.Ui, comm packer.Communicator) error { ui.Say(fmt.Sprintf("Uploading %s => %s", p.config.Source, p.config.Destination)) info, err := os.Stat(p.config.Source) if err != nil { diff --git a/provisioner/puppet-masterless/provisioner.go b/provisioner/puppet-masterless/provisioner.go index eb364da58..2002cf359 100644 --- a/provisioner/puppet-masterless/provisioner.go +++ b/provisioner/puppet-masterless/provisioner.go @@ -44,6 +44,10 @@ type Config struct { // The directory where files will be uploaded. Packer requires write // permissions in this directory. StagingDir string `mapstructure:"staging_directory"` + + // The directory from which the command will be executed. + // Packer requires the directory to exist when running puppet. + WorkingDir string `mapstructure:"working_directory"` } type Provisioner struct { @@ -51,6 +55,7 @@ type Provisioner struct { } type ExecuteTemplate struct { + WorkingDir string FacterVars string HieraConfigPath string ModulePath string @@ -74,7 +79,8 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { // Set some defaults if p.config.ExecuteCommand == "" { - p.config.ExecuteCommand = "{{.FacterVars}} {{if .Sudo}} sudo -E {{end}}" + + p.config.ExecuteCommand = "cd {{.WorkingDir}} && " + + "{{.FacterVars}} {{if .Sudo}} sudo -E {{end}}" + "puppet apply --verbose --modulepath='{{.ModulePath}}' " + "{{if ne .HieraConfigPath \"\"}}--hiera_config='{{.HieraConfigPath}}' {{end}}" + "{{if ne .ManifestDir \"\"}}--manifestdir='{{.ManifestDir}}' {{end}}" + @@ -86,6 +92,16 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { p.config.StagingDir = "/tmp/packer-puppet-masterless" } + if p.config.WorkingDir == "" { + p.config.WorkingDir = p.config.StagingDir + } + + if p.config.Facter == nil { + p.config.Facter = make(map[string]string) + } + p.config.Facter["packer_build_name"] = p.config.PackerBuildName + p.config.Facter["packer_builder_type"] = p.config.PackerBuilderType + // Validation var errs *packer.MultiError if p.config.HieraConfigPath != "" { @@ -200,6 +216,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { ManifestFile: remoteManifestFile, ModulePath: strings.Join(modulePaths, ":"), Sudo: !p.config.PreventSudo, + WorkingDir: p.config.WorkingDir, } command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) if err != nil { diff --git a/provisioner/puppet-masterless/provisioner_test.go b/provisioner/puppet-masterless/provisioner_test.go index 0d5576b6b..42ddd9d7a 100644 --- a/provisioner/puppet-masterless/provisioner_test.go +++ b/provisioner/puppet-masterless/provisioner_test.go @@ -133,3 +133,47 @@ func TestProvisionerPrepare_modulePaths(t *testing.T) { t.Fatalf("err: %s", err) } } + +func TestProvisionerPrepare_facterFacts(t *testing.T) { + config := testConfig() + + delete(config, "facter") + p := new(Provisioner) + err := p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test with malformed fact + config["facter"] = "fact=stringified" + p = new(Provisioner) + err = p.Prepare(config) + if err == nil { + t.Fatal("should be an error") + } + + // Test with a good one + td, err := ioutil.TempDir("", "packer") + if err != nil { + t.Fatalf("error: %s", err) + } + defer os.RemoveAll(td) + + facts := make(map[string]string) + facts["fact_name"] = "fact_value" + config["facter"] = facts + + p = new(Provisioner) + err = p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Make sure the default facts are present + delete(config, "facter") + p = new(Provisioner) + err = p.Prepare(config) + if p.config.Facter == nil { + t.Fatalf("err: Default facts are not set in the Puppet provisioner!") + } +} diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index baedd645a..338092755 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -266,12 +266,25 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return err } - // Close the original file since we copied it - f.Close() - if cmd.ExitStatus != 0 { return fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) } + + // Delete the temporary file we created + cmd = &packer.RemoteCmd{ + Command: fmt.Sprintf("rm -f %s", p.config.RemotePath), + } + if err := comm.Start(cmd); err != nil { + return fmt.Errorf( + "Error removing temporary script at %s: %s", + p.config.RemotePath, err) + } + cmd.Wait() + if cmd.ExitStatus != 0 { + return fmt.Errorf( + "Error removing temporary script at %s!", + p.config.RemotePath) + } } return nil diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index af3ece59e..6c7840575 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -62,10 +62,25 @@ each category, the available configuration keys are alphabetized. * `ami_block_device_mappings` (array of block device mappings) - Add the block device mappings to the AMI. The block device mappings allow for keys: - "device\_name" (string), "virtual\_name" (string), "snapshot\_id" (string), - "volume\_type" (string), "volume\_size" (integer), "delete\_on\_termination" - (boolean), "encrypted" (boolean), "no\_device" (boolean), and "iops" - (integer). + + - `device_name` (string) – The device name exposed to the instance (for + example, "/dev/sdh" or "xvdh") + - `virtual_name` (string) – The virtual device name. See the documentation on + [Block Device Mapping][1] for more information + - `snapshot_id` (string) – The ID of the snapshot + - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + volumes + - `volume_size` (integer) – The size of the volume, in GiB. Required if not + specifying a `snapshot_id` + - `delete_on_termination` (boolean) – Indicates whether the EBS volume is + deleted on instance termination + - `encrypted` (boolean) – Indicates whether to encrypt the volume or not + - `no_device` (boolean) – Suppresses the specified device included in the + block device mapping of the AMI + - `iops` (integer) – The number of I/O operations per second (IOPS) that the + volume supports. See the documentation on [IOPs][2] for more information + * `ami_description` (string) - The description to set for the resulting AMI(s). By default this description is empty. @@ -133,11 +148,17 @@ AMI if one with the same name already exists. Default `false`. spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +* `ssh_keypair_name` (string) - If specified, this is the key that will be + used for SSH with the machine. By default, this is blank, and Packer will + generate a temporary keypair. `ssh_private_key_file` must be specified + with this. + * `ssh_port` (integer) - The port that SSH will be available on. This defaults to port 22. * `ssh_private_key_file` (string) - Use this ssh private key file instead of - a generated ssh key pair for connecting to the instance. + a generated ssh key pair for connecting to the instance. This key file must + already exist on the `source_ami` * `ssh_private_ip` (bool) - If true, then SSH will always use the private IP if available. @@ -255,3 +276,7 @@ Here is an example using the optional AMI tags. This will add the tags } } ``` + + +[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html +[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 3ca82731b..ff9e7c9a2 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -82,10 +82,24 @@ each category, the available configuration keys are alphabetized. * `ami_block_device_mappings` (array of block device mappings) - Add the block device mappings to the AMI. The block device mappings allow for keys: - "device\_name" (string), "virtual\_name" (string), "snapshot\_id" (string), - "volume\_type" (string), "volume\_size" (integer), "delete\_on\_termination" - (boolean), "encrypted" (boolean), "no\_device" (boolean), and "iops" (integer). - See [amazon-ebs](/docs/builders/amazon-ebs.html) for an example template. + + - `device_name` (string) – The device name exposed to the instance (for + example, "/dev/sdh" or "xvdh") + - `virtual_name` (string) – The virtual device name. See the documentation on + [Block Device Mapping][1] for more information + - `snapshot_id` (string) – The ID of the snapshot + - `volume_type` (string) – The volume type. gp2 for General Purpose (SSD) + volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic + volumes + - `volume_size` (integer) – The size of the volume, in GiB. Required if not + specifying a `snapshot_id` + - `delete_on_termination` (boolean) – Indicates whether the EBS volume is + deleted on instance termination + - `encrypted` (boolean) – Indicates whether to encrypt the volume or not + - `no_device` (boolean) – Suppresses the specified device included in the + block device mapping of the AMI + - `iops` (integer) – The number of I/O operations per second (IOPS) that the + volume supports. See the documentation on [IOPs][2] for more information * `ami_description` (string) - The description to set for the resulting AMI(s). By default this description is empty. @@ -173,11 +187,17 @@ AMI if one with the same name already exists. Default `false`. spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` +* `ssh_keypair_name` (string) - If specified, this is the key that will be + used for SSH with the machine. By default, this is blank, and Packer will + generate a temporary keypair. `ssh_private_key_file` must be specified + with this. + * `ssh_port` (integer) - The port that SSH will be available on. This defaults to port 22. * `ssh_private_key_file` (string) - Use this ssh private key file instead of - a generated ssh key pair for connecting to the instance. + a generated ssh key pair for connecting to the instance. This key file must + already exist on the `source_ami` * `ssh_private_ip` (bool) - If true, then SSH will always use the private IP if available. @@ -318,3 +338,6 @@ sudo -i -n ec2-upload-bundle \ The available template variables should be self-explanatory based on the parameters they're used to satisfy the `ec2-upload-bundle` command. + +[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html +[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html diff --git a/website/source/docs/builders/parallels-iso.html.markdown b/website/source/docs/builders/parallels-iso.html.markdown index ed7ebd86c..b84123f8b 100644 --- a/website/source/docs/builders/parallels-iso.html.markdown +++ b/website/source/docs/builders/parallels-iso.html.markdown @@ -32,7 +32,7 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio "iso_url": "http://releases.ubuntu.com/12.04/ubuntu-12.04.3-server-amd64.iso", "iso_checksum": "2cbe868812a871242cdcdd8f2fd6feb9", "iso_checksum_type": "md5", - "parallels_tools_flavor": "lin" + "parallels_tools_flavor": "lin", "ssh_username": "packer", "ssh_password": "packer", "ssh_wait_timeout": "30s", diff --git a/website/source/docs/provisioners/chef-client.html.markdown b/website/source/docs/provisioners/chef-client.html.markdown index a2e2f6f5a..9a2a11379 100644 --- a/website/source/docs/provisioners/chef-client.html.markdown +++ b/website/source/docs/provisioners/chef-client.html.markdown @@ -88,6 +88,9 @@ configuration is actually required. this folder. If the permissions are not correct, use a shell provisioner prior to this to configure it properly. +* `client_key` (string) - Path to client key. If not set, this defaults to a file + named client.pem in `staging_directory`. + * `validation_client_name` (string) - Name of the validation client. If not set, this won't be set in the configuration and the default that Chef uses will be used. @@ -158,3 +161,12 @@ curl -L https://www.opscode.com/chef/install.sh | \ ``` This command can be customized using the `install_command` configuration. + +## Folder Permissions + +!> The `chef-client` provisioner will chmod the directory with your Chef +keys to 777. This is to ensure that Packer can upload and make use of that +directory. However, once the machine is created, you usually don't +want to keep these directories with those permissions. To change the +permissions on the directories, append a shell provisioner after Chef +to modify them. diff --git a/website/source/docs/provisioners/file.html.markdown b/website/source/docs/provisioners/file.html.markdown index 68034fe00..19fcce9be 100644 --- a/website/source/docs/provisioners/file.html.markdown +++ b/website/source/docs/provisioners/file.html.markdown @@ -40,6 +40,10 @@ The available configuration options are listed below. All elements are required. machine. This value must be a writable location and any parent directories must already exist. +* `direction` (string) - The direction of the file transfer. This defaults + to "upload." If it is set to "download" then the file "source" in + the machine wll be downloaded locally to "destination" + ## Directory Uploads The file provisioner is also able to upload a complete directory to the diff --git a/website/source/docs/provisioners/puppet-masterless.html.markdown b/website/source/docs/provisioners/puppet-masterless.html.markdown index bc65ae812..08da2c20e 100644 --- a/website/source/docs/provisioners/puppet-masterless.html.markdown +++ b/website/source/docs/provisioners/puppet-masterless.html.markdown @@ -79,12 +79,18 @@ Optional parameters: this folder. If the permissions are not correct, use a shell provisioner prior to this to configure it properly. +* `working_directory` (string) - This is the directory from which the puppet command + will be run. When using hiera with a relative path, this option allows to ensure + that the paths are working properly. If not specified, defaults to the value of + specified `staging_directory` (or its default value if not specified either). + ## Execute Command By default, Packer uses the following command (broken across multiple lines for readability) to execute Puppet: ```liquid +cd {{.WorkingDir}} && \ {{.FacterVars}}{{if .Sudo}} sudo -E {{end}}puppet apply \ --verbose \ --modulepath='{{.ModulePath}}' \ @@ -98,6 +104,7 @@ This command can be customized using the `execute_command` configuration. As you can see from the default value above, the value of this configuration can contain various template variables, defined below: +* `WorkingDir` - The path from which Puppet will be executed. * `FacterVars` - Shell-friendly string of environmental variables used to set custom facts configured for this provisioner. * `HieraConfigPath` - The path to a hiera configuration file. @@ -106,3 +113,17 @@ can contain various template variables, defined below: * `ModulePath` - The paths to the module directories. * `Sudo` - A boolean of whether to `sudo` the command or not, depending on the value of the `prevent_sudo` configuration. + +## Default Facts + +In addition to being able to specify custom Facter facts using the `facter` +configuration, the provisioner automatically defines certain commonly useful +facts: + +* `packer_build_name` is set to the name of the build that Packer is running. + This is most useful when Packer is making multiple builds and you want to + distinguish them in your Hiera hierarchy. + +* `packer_builder_type` is the type of the builder that was used to create the + machine that Puppet is running on. This is useful if you want to run only + certain parts of your Puppet code on systems built with certain builders.