Merge branch 'master' into f-file-builder
This commit is contained in:
commit
f2f8ad16c0
10
CHANGELOG.md
10
CHANGELOG.md
|
@ -24,6 +24,8 @@ FEATURES:
|
|||
to allow access to remote servers such as private git repos. [GH-1066]
|
||||
* **Docker builder supports SSH**: The Docker builder now supports containers
|
||||
with SSH, just set `communicator` to "ssh" [GH-2244]
|
||||
* **File provisioner can download**: The file provisioner can now download
|
||||
files out of the build process. [GH-1909]
|
||||
* **New config function: `build_name`**: The name of the currently running
|
||||
build. [GH-2232]
|
||||
* **New config function: `build_type`**: The type of the currently running
|
||||
|
@ -39,6 +41,7 @@ IMPROVEMENTS:
|
|||
* builder/amazon: Add `force_deregister` option for automatic AMI
|
||||
deregistration [GH-2221]
|
||||
* builder/amazon: Now applies tags to EBS snapshots [GH-2212]
|
||||
* builder/amazon: Support custom keypairs [GH-1837]
|
||||
* builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829]
|
||||
* builder/digitalocean: User data support [GH-2113]
|
||||
* builder/parallels: Support Parallels Desktop 11 [GH-2199]
|
||||
|
@ -55,12 +58,16 @@ IMPROVEMENTS:
|
|||
automatic port forward for SSH and to use the guest port directly. [GH-1078]
|
||||
* builder/virtualbox: Added SCSI support
|
||||
* builder/vmware: Support for additional disks [GH-1382]
|
||||
* builder/vmware: Can now customize the template used for adding disks [GH-2254]
|
||||
* command/fix: After fixing, the template is validated [GH-2228]
|
||||
* command/push: Add `-name` flag for specifying name from CLI [GH-2042]
|
||||
* command/push: Push configuration in templates supports variables [GH-1861]
|
||||
* post-processor/docker-save: Can be chained [GH-2179]
|
||||
* post-processor/docker-tag: Support `force` option [GH-2055]
|
||||
* post-processor/docker-tag: Can be chained [GH-2179]
|
||||
* provisioner/puppet-masterless: `working_directory` option [GH-1831]
|
||||
* provisioner/puppet-masterless: `packer_build_name` and
|
||||
`packer_build_type` are default facts. [GH-1878]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
|
@ -121,11 +128,14 @@ BUG FIXES:
|
|||
* post-processor/atlas: Fix index out of range panic [GH-1959]
|
||||
* post-processor/vagrant-cloud: Fixed failing on response
|
||||
* post-processor/vagrant-cloud: Don't delete version on error [GH-2014]
|
||||
* provisioner/chef-client: Fix permissions issues on default dir [GH-2255]
|
||||
* provisioner/chef-client: Node cleanup works now. [GH-2257]
|
||||
* provisioner/puppet-masterless: Allow manifest_file to be a directory
|
||||
* provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call
|
||||
* provisioner/shell: chmod executable script to 0755, not 0777 [GH-1708]
|
||||
* provisioner/shell: inline commands failing will fail the provisioner [GH-2069]
|
||||
* provisioner/shell: single quotes in env vars are escaped [GH-2229]
|
||||
* provisioner/shell: Temporary file is deleted after run [GH-2259]
|
||||
|
||||
## 0.7.5 (December 9, 2014)
|
||||
|
||||
|
|
8
Makefile
8
Makefile
|
@ -31,7 +31,13 @@ testrace:
|
|||
go test -race $(TEST) $(TESTARGS)
|
||||
|
||||
updatedeps:
|
||||
go get -d -v -p 2 ./...
|
||||
go get -u github.com/mitchellh/gox
|
||||
go get -u golang.org/x/tools/cmd/stringer
|
||||
go list ./... \
|
||||
| xargs go list -f '{{join .Deps "\n"}}' \
|
||||
| grep -v github.com/mitchellh/packer \
|
||||
| sort -u \
|
||||
| xargs go get -f -u -v
|
||||
|
||||
vet:
|
||||
@go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \
|
||||
|
|
|
@ -32,12 +32,14 @@ type RunConfig struct {
|
|||
VpcId string `mapstructure:"vpc_id"`
|
||||
|
||||
// Communicator settings
|
||||
Comm communicator.Config `mapstructure:",squash"`
|
||||
SSHPrivateIp bool `mapstructure:"ssh_private_ip"`
|
||||
Comm communicator.Config `mapstructure:",squash"`
|
||||
SSHKeyPairName string `mapstructure:"ssh_keypair_name"`
|
||||
SSHPrivateIp bool `mapstructure:"ssh_private_ip"`
|
||||
}
|
||||
|
||||
func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
if c.TemporaryKeyPairName == "" {
|
||||
// if we are not given an explicit keypairname, create a temporary one
|
||||
if c.SSHKeyPairName == "" {
|
||||
c.TemporaryKeyPairName = fmt.Sprintf(
|
||||
"packer %s", uuid.TimeOrderedUUID())
|
||||
}
|
||||
|
|
|
@ -12,17 +12,20 @@ import (
|
|||
)
|
||||
|
||||
type StepKeyPair struct {
|
||||
Debug bool
|
||||
DebugKeyPath string
|
||||
KeyPairName string
|
||||
PrivateKeyFile string
|
||||
Debug bool
|
||||
DebugKeyPath string
|
||||
TemporaryKeyPairName string
|
||||
KeyPairName string
|
||||
PrivateKeyFile string
|
||||
|
||||
keyName string
|
||||
}
|
||||
|
||||
func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
||||
if s.PrivateKeyFile != "" {
|
||||
s.keyName = ""
|
||||
if s.KeyPairName != "" {
|
||||
s.keyName = s.KeyPairName // need to get from config
|
||||
}
|
||||
|
||||
privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile)
|
||||
if err != nil {
|
||||
|
@ -30,7 +33,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("keyPair", "")
|
||||
state.Put("keyPair", s.keyName)
|
||||
state.Put("privateKey", string(privateKeyBytes))
|
||||
|
||||
return multistep.ActionContinue
|
||||
|
@ -39,7 +42,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
|||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.KeyPairName))
|
||||
ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.TemporaryKeyPairName))
|
||||
keyResp, err := ec2conn.CreateKeyPair(&ec2.CreateKeyPairInput{KeyName: &s.KeyPairName})
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err))
|
||||
|
@ -47,7 +50,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
// Set the keyname so we know to delete it later
|
||||
s.keyName = s.KeyPairName
|
||||
s.keyName = s.TemporaryKeyPairName
|
||||
|
||||
// Set some state data for use in future steps
|
||||
state.Put("keyPair", s.keyName)
|
||||
|
@ -84,7 +87,9 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
|||
|
||||
func (s *StepKeyPair) Cleanup(state multistep.StateBag) {
|
||||
// If no key name is set, then we never created it, so just return
|
||||
if s.keyName == "" {
|
||||
// If we used an SSH private key file, do not go about deleting
|
||||
// keypairs
|
||||
if s.PrivateKeyFile != "" {
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -245,7 +245,7 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
|||
}
|
||||
latestInstance, err := WaitForState(&stateChange)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", *s.instance.InstanceID, err)
|
||||
err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", instanceId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
|
|
|
@ -88,10 +88,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
EnhancedNetworking: b.config.AMIEnhancedNetworking,
|
||||
},
|
||||
&awscommon.StepKeyPair{
|
||||
Debug: b.config.PackerDebug,
|
||||
DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName),
|
||||
KeyPairName: b.config.TemporaryKeyPairName,
|
||||
PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey,
|
||||
Debug: b.config.PackerDebug,
|
||||
DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName),
|
||||
KeyPairName: b.config.TemporaryKeyPairName,
|
||||
TemporaryKeyPairName: b.config.TemporaryKeyPairName,
|
||||
PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey,
|
||||
},
|
||||
&awscommon.StepSecurityGroup{
|
||||
SecurityGroupIds: b.config.SecurityGroupIds,
|
||||
|
|
|
@ -177,10 +177,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
EnhancedNetworking: b.config.AMIEnhancedNetworking,
|
||||
},
|
||||
&awscommon.StepKeyPair{
|
||||
Debug: b.config.PackerDebug,
|
||||
DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName),
|
||||
KeyPairName: b.config.TemporaryKeyPairName,
|
||||
PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey,
|
||||
Debug: b.config.PackerDebug,
|
||||
DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName),
|
||||
KeyPairName: b.config.TemporaryKeyPairName,
|
||||
PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey,
|
||||
TemporaryKeyPairName: b.config.TemporaryKeyPairName,
|
||||
},
|
||||
&awscommon.StepSecurityGroup{
|
||||
CommConfig: &b.config.RunConfig.Comm,
|
||||
|
|
|
@ -23,6 +23,10 @@ type StepUploadTools struct {
|
|||
func (c *StepUploadTools) Run(state multistep.StateBag) multistep.StepAction {
|
||||
driver := state.Get("driver").(Driver)
|
||||
|
||||
if c.ToolsUploadFlavor == "" {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if c.RemoteType == "esx5" {
|
||||
if err := driver.ToolsInstall(); err != nil {
|
||||
state.Put("error", fmt.Errorf("Couldn't mount VMware tools ISO. Please check the 'guest_os_type' in your template.json."))
|
||||
|
@ -30,10 +34,6 @@ func (c *StepUploadTools) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if c.ToolsUploadFlavor == "" {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
comm := state.Get("communicator").(packer.Communicator)
|
||||
tools_source := state.Get("tools_upload_source").(string)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
|
|
@ -36,20 +36,21 @@ type Config struct {
|
|||
vmwcommon.ToolsConfig `mapstructure:",squash"`
|
||||
vmwcommon.VMXConfig `mapstructure:",squash"`
|
||||
|
||||
AdditionalDiskSize []uint `mapstructure:"disk_additional_size"`
|
||||
DiskName string `mapstructure:"vmdk_name"`
|
||||
DiskSize uint `mapstructure:"disk_size"`
|
||||
DiskTypeId string `mapstructure:"disk_type_id"`
|
||||
FloppyFiles []string `mapstructure:"floppy_files"`
|
||||
GuestOSType string `mapstructure:"guest_os_type"`
|
||||
ISOChecksum string `mapstructure:"iso_checksum"`
|
||||
ISOChecksumType string `mapstructure:"iso_checksum_type"`
|
||||
ISOUrls []string `mapstructure:"iso_urls"`
|
||||
Version string `mapstructure:"version"`
|
||||
VMName string `mapstructure:"vm_name"`
|
||||
BootCommand []string `mapstructure:"boot_command"`
|
||||
SkipCompaction bool `mapstructure:"skip_compaction"`
|
||||
VMXTemplatePath string `mapstructure:"vmx_template_path"`
|
||||
AdditionalDiskSize []uint `mapstructure:"disk_additional_size"`
|
||||
DiskName string `mapstructure:"vmdk_name"`
|
||||
DiskSize uint `mapstructure:"disk_size"`
|
||||
DiskTypeId string `mapstructure:"disk_type_id"`
|
||||
FloppyFiles []string `mapstructure:"floppy_files"`
|
||||
GuestOSType string `mapstructure:"guest_os_type"`
|
||||
ISOChecksum string `mapstructure:"iso_checksum"`
|
||||
ISOChecksumType string `mapstructure:"iso_checksum_type"`
|
||||
ISOUrls []string `mapstructure:"iso_urls"`
|
||||
Version string `mapstructure:"version"`
|
||||
VMName string `mapstructure:"vm_name"`
|
||||
BootCommand []string `mapstructure:"boot_command"`
|
||||
SkipCompaction bool `mapstructure:"skip_compaction"`
|
||||
VMXTemplatePath string `mapstructure:"vmx_template_path"`
|
||||
VMXDiskTemplatePath string `mapstructure:"vmx_disk_template_path"`
|
||||
|
||||
RemoteType string `mapstructure:"remote_type"`
|
||||
RemoteDatastore string `mapstructure:"remote_datastore"`
|
||||
|
|
|
@ -76,7 +76,29 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction {
|
|||
DiskName: config.DiskName,
|
||||
}
|
||||
|
||||
diskTemplate, err := interpolate.Render(DefaultAdditionalDiskTemplate, &ctx)
|
||||
diskTemplate := DefaultAdditionalDiskTemplate
|
||||
if config.VMXDiskTemplatePath != "" {
|
||||
f, err := os.Open(config.VMXDiskTemplatePath)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error reading VMX disk template: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
rawBytes, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error reading VMX disk template: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
diskTemplate = string(rawBytes)
|
||||
}
|
||||
|
||||
diskContents, err := interpolate.Render(diskTemplate, &ctx)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error preparing VMX template for additional disk: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -84,7 +106,7 @@ func (s *stepCreateVMX) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
vmxTemplate += diskTemplate
|
||||
vmxTemplate += diskContents
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,9 +23,11 @@ type StepProvision struct {
|
|||
func (s *StepProvision) Run(state multistep.StateBag) multistep.StepAction {
|
||||
comm := s.Comm
|
||||
if comm == nil {
|
||||
comm = state.Get("communicator").(packer.Communicator)
|
||||
raw, ok := state.Get("communicator").(packer.Communicator)
|
||||
if ok {
|
||||
comm = raw.(packer.Communicator)
|
||||
}
|
||||
}
|
||||
|
||||
hook := state.Get("hook").(packer.Hook)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
|
@ -171,8 +172,57 @@ func (c *comm) UploadDir(dst string, src string, excl []string) error {
|
|||
return c.scpSession("scp -rvt "+dst, scpFunc)
|
||||
}
|
||||
|
||||
func (c *comm) Download(string, io.Writer) error {
|
||||
panic("not implemented yet")
|
||||
func (c *comm) Download(path string, output io.Writer) error {
|
||||
scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error {
|
||||
fmt.Fprint(w, "\x00")
|
||||
|
||||
// read file info
|
||||
fi, err := stdoutR.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(fi) < 0 {
|
||||
return fmt.Errorf("empty response from server")
|
||||
}
|
||||
|
||||
switch fi[0] {
|
||||
case '\x01', '\x02':
|
||||
return fmt.Errorf("%s", fi[1:len(fi)])
|
||||
case 'C':
|
||||
case 'D':
|
||||
return fmt.Errorf("remote file is directory")
|
||||
default:
|
||||
return fmt.Errorf("unexpected server response (%x)", fi[0])
|
||||
}
|
||||
|
||||
var mode string
|
||||
var size int64
|
||||
|
||||
n, err := fmt.Sscanf(fi, "%6s %d ", &mode, &size)
|
||||
if err != nil || n != 2 {
|
||||
return fmt.Errorf("can't parse server response (%s)", fi)
|
||||
}
|
||||
if size < 0 {
|
||||
return fmt.Errorf("negative file size")
|
||||
}
|
||||
|
||||
fmt.Fprint(w, "\x00")
|
||||
|
||||
if _, err := io.CopyN(output, stdoutR, size); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprint(w, "\x00")
|
||||
|
||||
if err := checkSCPStatus(stdoutR); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.scpSession("scp -vf "+strconv.Quote(path), scpFunc)
|
||||
}
|
||||
|
||||
func (c *comm) newSession() (session *ssh.Session, err error) {
|
||||
|
|
|
@ -113,7 +113,7 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error
|
|||
}
|
||||
|
||||
func (c *Communicator) Download(src string, dst io.Writer) error {
|
||||
panic("download not implemented")
|
||||
return fmt.Errorf("WinRM doesn't support download.")
|
||||
}
|
||||
|
||||
func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) {
|
||||
|
|
|
@ -202,7 +202,7 @@ func TestBuild_Run(t *testing.T) {
|
|||
}
|
||||
|
||||
// Verify provisioners run
|
||||
dispatchHook.Run(HookProvision, nil, nil, 42)
|
||||
dispatchHook.Run(HookProvision, nil, new(MockCommunicator), 42)
|
||||
prov := build.provisioners[0].provisioner.(*MockProvisioner)
|
||||
if !prov.ProvCalled {
|
||||
t.Fatal("should be called")
|
||||
|
|
|
@ -43,7 +43,7 @@ func (tb *MockBuilder) Run(ui Ui, h Hook, c Cache) (Artifact, error) {
|
|||
}
|
||||
|
||||
if h != nil {
|
||||
if err := h.Run(HookProvision, ui, nil, nil); err != nil {
|
||||
if err := h.Run(HookProvision, ui, new(MockCommunicator), nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,18 @@ type ProvisionHook struct {
|
|||
|
||||
// Runs the provisioners in order.
|
||||
func (h *ProvisionHook) Run(name string, ui Ui, comm Communicator, data interface{}) error {
|
||||
// Shortcut
|
||||
if len(h.Provisioners) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if comm == nil {
|
||||
return fmt.Errorf(
|
||||
"No communicator found for provisioners! This is usually because the\n" +
|
||||
"`communicator` config was set to \"none\". If you have any provisioners\n" +
|
||||
"then a communicator is required. Please fix this to continue.")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
|
|
@ -19,7 +19,7 @@ func TestProvisionHook(t *testing.T) {
|
|||
pB := &MockProvisioner{}
|
||||
|
||||
ui := testUi()
|
||||
var comm Communicator = nil
|
||||
var comm Communicator = new(MockCommunicator)
|
||||
var data interface{} = nil
|
||||
|
||||
hook := &ProvisionHook{
|
||||
|
@ -37,6 +37,24 @@ func TestProvisionHook(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestProvisionHook_nilComm(t *testing.T) {
|
||||
pA := &MockProvisioner{}
|
||||
pB := &MockProvisioner{}
|
||||
|
||||
ui := testUi()
|
||||
var comm Communicator = nil
|
||||
var data interface{} = nil
|
||||
|
||||
hook := &ProvisionHook{
|
||||
Provisioners: []Provisioner{pA, pB},
|
||||
}
|
||||
|
||||
err := hook.Run("foo", ui, comm, data)
|
||||
if err == nil {
|
||||
t.Fatal("should error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvisionHook_cancel(t *testing.T) {
|
||||
var lock sync.Mutex
|
||||
order := make([]string, 0, 2)
|
||||
|
@ -59,7 +77,7 @@ func TestProvisionHook_cancel(t *testing.T) {
|
|||
|
||||
finished := make(chan struct{})
|
||||
go func() {
|
||||
hook.Run("foo", nil, nil, nil)
|
||||
hook.Run("foo", nil, new(MockCommunicator), nil)
|
||||
close(finished)
|
||||
}()
|
||||
|
||||
|
@ -74,7 +92,7 @@ func TestProvisionHook_cancel(t *testing.T) {
|
|||
<-finished
|
||||
|
||||
// Verify order
|
||||
if order[0] != "cancel" || order[1] != "prov" {
|
||||
if len(order) != 2 || order[0] != "cancel" || order[1] != "prov" {
|
||||
t.Fatalf("bad: %#v", order)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
|
@ -37,6 +36,7 @@ type Config struct {
|
|||
SkipCleanNode bool `mapstructure:"skip_clean_node"`
|
||||
SkipInstall bool `mapstructure:"skip_install"`
|
||||
StagingDir string `mapstructure:"staging_directory"`
|
||||
ClientKey string `mapstructure:"client_key"`
|
||||
ValidationKeyPath string `mapstructure:"validation_key_path"`
|
||||
ValidationClientName string `mapstructure:"validation_client_name"`
|
||||
|
||||
|
@ -50,6 +50,7 @@ type Provisioner struct {
|
|||
type ConfigTemplate struct {
|
||||
NodeName string
|
||||
ServerUrl string
|
||||
ClientKey string
|
||||
ValidationKeyPath string
|
||||
ValidationClientName string
|
||||
ChefEnvironment string
|
||||
|
@ -162,6 +163,10 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
|
|||
return fmt.Errorf("Error creating staging directory: %s", err)
|
||||
}
|
||||
|
||||
if p.config.ClientKey == "" {
|
||||
p.config.ClientKey = fmt.Sprintf("%s/client.pem", p.config.StagingDir)
|
||||
}
|
||||
|
||||
if p.config.ValidationKeyPath != "" {
|
||||
remoteValidationKeyPath = fmt.Sprintf("%s/validation.pem", p.config.StagingDir)
|
||||
if err := p.copyValidationKey(ui, comm, remoteValidationKeyPath); err != nil {
|
||||
|
@ -170,7 +175,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
|
|||
}
|
||||
|
||||
configPath, err := p.createConfig(
|
||||
ui, comm, nodeName, serverUrl, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment, p.config.SslVerifyMode)
|
||||
ui, comm, nodeName, serverUrl, p.config.ClientKey, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment, p.config.SslVerifyMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating Chef config file: %s", err)
|
||||
}
|
||||
|
@ -224,7 +229,7 @@ func (p *Provisioner) uploadDirectory(ui packer.Ui, comm packer.Communicator, ds
|
|||
return comm.UploadDir(dst, src, nil)
|
||||
}
|
||||
|
||||
func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, remoteKeyPath string, validationClientName string, chefEnvironment string, sslVerifyMode string) (string, error) {
|
||||
func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, clientKey string, remoteKeyPath string, validationClientName string, chefEnvironment string, sslVerifyMode string) (string, error) {
|
||||
ui.Message("Creating configuration file 'client.rb'")
|
||||
|
||||
// Read the template
|
||||
|
@ -248,6 +253,7 @@ func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeN
|
|||
ctx.Data = &ConfigTemplate{
|
||||
NodeName: nodeName,
|
||||
ServerUrl: serverUrl,
|
||||
ClientKey: clientKey,
|
||||
ValidationKeyPath: remoteKeyPath,
|
||||
ValidationClientName: validationClientName,
|
||||
ChefEnvironment: chefEnvironment,
|
||||
|
@ -303,16 +309,25 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri
|
|||
mkdirCmd = "sudo " + mkdirCmd
|
||||
}
|
||||
|
||||
cmd := &packer.RemoteCmd{
|
||||
Command: mkdirCmd,
|
||||
}
|
||||
|
||||
cmd := &packer.RemoteCmd{Command: mkdirCmd}
|
||||
if err := cmd.StartWithUi(comm, ui); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cmd.ExitStatus != 0 {
|
||||
return fmt.Errorf("Non-zero exit status.")
|
||||
return fmt.Errorf("Non-zero exit status. See output above for more info.")
|
||||
}
|
||||
|
||||
// Chmod the directory to 0777 just so that we can access it as our user
|
||||
mkdirCmd = fmt.Sprintf("chmod 0777 '%s'", dir)
|
||||
if !p.config.PreventSudo {
|
||||
mkdirCmd = "sudo " + mkdirCmd
|
||||
}
|
||||
cmd = &packer.RemoteCmd{Command: mkdirCmd}
|
||||
if err := cmd.StartWithUi(comm, ui); err != nil {
|
||||
return err
|
||||
}
|
||||
if cmd.ExitStatus != 0 {
|
||||
return fmt.Errorf("Non-zero exit status. See output above for more info.")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -320,15 +335,9 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri
|
|||
|
||||
func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node string) error {
|
||||
ui.Say("Cleaning up chef node...")
|
||||
app := fmt.Sprintf("knife node delete %s -y", node)
|
||||
|
||||
cmd := exec.Command("sh", "-c", app)
|
||||
out, err := cmd.Output()
|
||||
|
||||
ui.Message(fmt.Sprintf("%s", out))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
args := []string{"node", "delete", node}
|
||||
if err := p.knifeExec(ui, comm, node, args); err != nil {
|
||||
return fmt.Errorf("Failed to cleanup node: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -336,16 +345,38 @@ func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node str
|
|||
|
||||
func (p *Provisioner) cleanClient(ui packer.Ui, comm packer.Communicator, node string) error {
|
||||
ui.Say("Cleaning up chef client...")
|
||||
app := fmt.Sprintf("knife client delete %s -y", node)
|
||||
args := []string{"client", "delete", node}
|
||||
if err := p.knifeExec(ui, comm, node, args); err != nil {
|
||||
return fmt.Errorf("Failed to cleanup client: %s", err)
|
||||
}
|
||||
|
||||
cmd := exec.Command("sh", "-c", app)
|
||||
out, err := cmd.Output()
|
||||
return nil
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("%s", out))
|
||||
func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node string, args []string) error {
|
||||
flags := []string{
|
||||
"-y",
|
||||
"-s", fmt.Sprintf("'%s'", p.config.ServerUrl),
|
||||
"-k", fmt.Sprintf("'%s'", p.config.ClientKey),
|
||||
"-u", fmt.Sprintf("'%s'", node),
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
cmdText := fmt.Sprintf(
|
||||
"knife %s %s", strings.Join(args, " "), strings.Join(flags, " "))
|
||||
if !p.config.PreventSudo {
|
||||
cmdText = "sudo " + cmdText
|
||||
}
|
||||
|
||||
cmd := &packer.RemoteCmd{Command: cmdText}
|
||||
if err := cmd.StartWithUi(comm, ui); err != nil {
|
||||
return err
|
||||
}
|
||||
if cmd.ExitStatus != 0 {
|
||||
return fmt.Errorf(
|
||||
"Non-zero exit status. See output above for more info.\n\n"+
|
||||
"Command: %s",
|
||||
cmdText)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -524,6 +555,7 @@ var DefaultConfigTemplate = `
|
|||
log_level :info
|
||||
log_location STDOUT
|
||||
chef_server_url "{{.ServerUrl}}"
|
||||
client_key "{{.ClientKey}}"
|
||||
{{if ne .ValidationClientName ""}}
|
||||
validation_client_name "{{.ValidationClientName}}"
|
||||
{{else}}
|
||||
|
|
|
@ -20,6 +20,9 @@ type Config struct {
|
|||
// The remote path where the local file will be uploaded to.
|
||||
Destination string
|
||||
|
||||
// Direction
|
||||
Direction string
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
|
@ -38,12 +41,28 @@ func (p *Provisioner) Prepare(raws ...interface{}) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if p.config.Direction == "" {
|
||||
p.config.Direction = "upload"
|
||||
}
|
||||
|
||||
var errs *packer.MultiError
|
||||
if _, err := os.Stat(p.config.Source); err != nil {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Bad source '%s': %s", p.config.Source, err))
|
||||
}
|
||||
|
||||
if p.config.Direction != "download" && p.config.Direction != "upload" {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
errors.New("Direction must be one of: download, upload."))
|
||||
}
|
||||
|
||||
if p.config.Direction == "upload" {
|
||||
if _, err := os.Stat(p.config.Source); err != nil {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Bad source '%s': %s", p.config.Source, err))
|
||||
}
|
||||
}
|
||||
|
||||
if p.config.Destination == "" {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
errors.New("Destination must be specified."))
|
||||
|
@ -57,6 +76,30 @@ func (p *Provisioner) Prepare(raws ...interface{}) error {
|
|||
}
|
||||
|
||||
func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
|
||||
if p.config.Direction == "download" {
|
||||
return p.ProvisionDownload(ui, comm)
|
||||
} else {
|
||||
return p.ProvisionUpload(ui, comm)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provisioner) ProvisionDownload(ui packer.Ui, comm packer.Communicator) error {
|
||||
ui.Say(fmt.Sprintf("Downloading %s => %s", p.config.Source, p.config.Destination))
|
||||
|
||||
f, err := os.OpenFile(p.config.Destination, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
err = comm.Download(p.config.Source, f)
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Download failed: %s", err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Provisioner) ProvisionUpload(ui packer.Ui, comm packer.Communicator) error {
|
||||
ui.Say(fmt.Sprintf("Uploading %s => %s", p.config.Source, p.config.Destination))
|
||||
info, err := os.Stat(p.config.Source)
|
||||
if err != nil {
|
||||
|
|
|
@ -44,6 +44,10 @@ type Config struct {
|
|||
// The directory where files will be uploaded. Packer requires write
|
||||
// permissions in this directory.
|
||||
StagingDir string `mapstructure:"staging_directory"`
|
||||
|
||||
// The directory from which the command will be executed.
|
||||
// Packer requires the directory to exist when running puppet.
|
||||
WorkingDir string `mapstructure:"working_directory"`
|
||||
}
|
||||
|
||||
type Provisioner struct {
|
||||
|
@ -51,6 +55,7 @@ type Provisioner struct {
|
|||
}
|
||||
|
||||
type ExecuteTemplate struct {
|
||||
WorkingDir string
|
||||
FacterVars string
|
||||
HieraConfigPath string
|
||||
ModulePath string
|
||||
|
@ -74,7 +79,8 @@ func (p *Provisioner) Prepare(raws ...interface{}) error {
|
|||
|
||||
// Set some defaults
|
||||
if p.config.ExecuteCommand == "" {
|
||||
p.config.ExecuteCommand = "{{.FacterVars}} {{if .Sudo}} sudo -E {{end}}" +
|
||||
p.config.ExecuteCommand = "cd {{.WorkingDir}} && " +
|
||||
"{{.FacterVars}} {{if .Sudo}} sudo -E {{end}}" +
|
||||
"puppet apply --verbose --modulepath='{{.ModulePath}}' " +
|
||||
"{{if ne .HieraConfigPath \"\"}}--hiera_config='{{.HieraConfigPath}}' {{end}}" +
|
||||
"{{if ne .ManifestDir \"\"}}--manifestdir='{{.ManifestDir}}' {{end}}" +
|
||||
|
@ -86,6 +92,16 @@ func (p *Provisioner) Prepare(raws ...interface{}) error {
|
|||
p.config.StagingDir = "/tmp/packer-puppet-masterless"
|
||||
}
|
||||
|
||||
if p.config.WorkingDir == "" {
|
||||
p.config.WorkingDir = p.config.StagingDir
|
||||
}
|
||||
|
||||
if p.config.Facter == nil {
|
||||
p.config.Facter = make(map[string]string)
|
||||
}
|
||||
p.config.Facter["packer_build_name"] = p.config.PackerBuildName
|
||||
p.config.Facter["packer_builder_type"] = p.config.PackerBuilderType
|
||||
|
||||
// Validation
|
||||
var errs *packer.MultiError
|
||||
if p.config.HieraConfigPath != "" {
|
||||
|
@ -200,6 +216,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
|
|||
ManifestFile: remoteManifestFile,
|
||||
ModulePath: strings.Join(modulePaths, ":"),
|
||||
Sudo: !p.config.PreventSudo,
|
||||
WorkingDir: p.config.WorkingDir,
|
||||
}
|
||||
command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx)
|
||||
if err != nil {
|
||||
|
|
|
@ -133,3 +133,47 @@ func TestProvisionerPrepare_modulePaths(t *testing.T) {
|
|||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvisionerPrepare_facterFacts(t *testing.T) {
|
||||
config := testConfig()
|
||||
|
||||
delete(config, "facter")
|
||||
p := new(Provisioner)
|
||||
err := p.Prepare(config)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// Test with malformed fact
|
||||
config["facter"] = "fact=stringified"
|
||||
p = new(Provisioner)
|
||||
err = p.Prepare(config)
|
||||
if err == nil {
|
||||
t.Fatal("should be an error")
|
||||
}
|
||||
|
||||
// Test with a good one
|
||||
td, err := ioutil.TempDir("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(td)
|
||||
|
||||
facts := make(map[string]string)
|
||||
facts["fact_name"] = "fact_value"
|
||||
config["facter"] = facts
|
||||
|
||||
p = new(Provisioner)
|
||||
err = p.Prepare(config)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// Make sure the default facts are present
|
||||
delete(config, "facter")
|
||||
p = new(Provisioner)
|
||||
err = p.Prepare(config)
|
||||
if p.config.Facter == nil {
|
||||
t.Fatalf("err: Default facts are not set in the Puppet provisioner!")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -266,12 +266,25 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Close the original file since we copied it
|
||||
f.Close()
|
||||
|
||||
if cmd.ExitStatus != 0 {
|
||||
return fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus)
|
||||
}
|
||||
|
||||
// Delete the temporary file we created
|
||||
cmd = &packer.RemoteCmd{
|
||||
Command: fmt.Sprintf("rm -f %s", p.config.RemotePath),
|
||||
}
|
||||
if err := comm.Start(cmd); err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error removing temporary script at %s: %s",
|
||||
p.config.RemotePath, err)
|
||||
}
|
||||
cmd.Wait()
|
||||
if cmd.ExitStatus != 0 {
|
||||
return fmt.Errorf(
|
||||
"Error removing temporary script at %s!",
|
||||
p.config.RemotePath)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -62,10 +62,25 @@ each category, the available configuration keys are alphabetized.
|
|||
|
||||
* `ami_block_device_mappings` (array of block device mappings) - Add the block
|
||||
device mappings to the AMI. The block device mappings allow for keys:
|
||||
"device\_name" (string), "virtual\_name" (string), "snapshot\_id" (string),
|
||||
"volume\_type" (string), "volume\_size" (integer), "delete\_on\_termination"
|
||||
(boolean), "encrypted" (boolean), "no\_device" (boolean), and "iops"
|
||||
(integer).
|
||||
|
||||
- `device_name` (string) – The device name exposed to the instance (for
|
||||
example, "/dev/sdh" or "xvdh")
|
||||
- `virtual_name` (string) – The virtual device name. See the documentation on
|
||||
[Block Device Mapping][1] for more information
|
||||
- `snapshot_id` (string) – The ID of the snapshot
|
||||
- `volume_type` (string) – The volume type. gp2 for General Purpose (SSD)
|
||||
volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic
|
||||
volumes
|
||||
- `volume_size` (integer) – The size of the volume, in GiB. Required if not
|
||||
specifying a `snapshot_id`
|
||||
- `delete_on_termination` (boolean) – Indicates whether the EBS volume is
|
||||
deleted on instance termination
|
||||
- `encrypted` (boolean) – Indicates whether to encrypt the volume or not
|
||||
- `no_device` (boolean) – Suppresses the specified device included in the
|
||||
block device mapping of the AMI
|
||||
- `iops` (integer) – The number of I/O operations per second (IOPS) that the
|
||||
volume supports. See the documentation on [IOPs][2] for more information
|
||||
|
||||
|
||||
* `ami_description` (string) - The description to set for the resulting
|
||||
AMI(s). By default this description is empty.
|
||||
|
@ -133,11 +148,17 @@ AMI if one with the same name already exists. Default `false`.
|
|||
spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`,
|
||||
`Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)`
|
||||
|
||||
* `ssh_keypair_name` (string) - If specified, this is the key that will be
|
||||
used for SSH with the machine. By default, this is blank, and Packer will
|
||||
generate a temporary keypair. `ssh_private_key_file` must be specified
|
||||
with this.
|
||||
|
||||
* `ssh_port` (integer) - The port that SSH will be available on. This defaults
|
||||
to port 22.
|
||||
|
||||
* `ssh_private_key_file` (string) - Use this ssh private key file instead of
|
||||
a generated ssh key pair for connecting to the instance.
|
||||
a generated ssh key pair for connecting to the instance. This key file must
|
||||
already exist on the `source_ami`
|
||||
|
||||
* `ssh_private_ip` (bool) - If true, then SSH will always use the private
|
||||
IP if available.
|
||||
|
@ -255,3 +276,7 @@ Here is an example using the optional AMI tags. This will add the tags
|
|||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html
|
||||
[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html
|
||||
|
|
|
@ -82,10 +82,24 @@ each category, the available configuration keys are alphabetized.
|
|||
|
||||
* `ami_block_device_mappings` (array of block device mappings) - Add the block
|
||||
device mappings to the AMI. The block device mappings allow for keys:
|
||||
"device\_name" (string), "virtual\_name" (string), "snapshot\_id" (string),
|
||||
"volume\_type" (string), "volume\_size" (integer), "delete\_on\_termination"
|
||||
(boolean), "encrypted" (boolean), "no\_device" (boolean), and "iops" (integer).
|
||||
See [amazon-ebs](/docs/builders/amazon-ebs.html) for an example template.
|
||||
|
||||
- `device_name` (string) – The device name exposed to the instance (for
|
||||
example, "/dev/sdh" or "xvdh")
|
||||
- `virtual_name` (string) – The virtual device name. See the documentation on
|
||||
[Block Device Mapping][1] for more information
|
||||
- `snapshot_id` (string) – The ID of the snapshot
|
||||
- `volume_type` (string) – The volume type. gp2 for General Purpose (SSD)
|
||||
volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic
|
||||
volumes
|
||||
- `volume_size` (integer) – The size of the volume, in GiB. Required if not
|
||||
specifying a `snapshot_id`
|
||||
- `delete_on_termination` (boolean) – Indicates whether the EBS volume is
|
||||
deleted on instance termination
|
||||
- `encrypted` (boolean) – Indicates whether to encrypt the volume or not
|
||||
- `no_device` (boolean) – Suppresses the specified device included in the
|
||||
block device mapping of the AMI
|
||||
- `iops` (integer) – The number of I/O operations per second (IOPS) that the
|
||||
volume supports. See the documentation on [IOPs][2] for more information
|
||||
|
||||
* `ami_description` (string) - The description to set for the resulting
|
||||
AMI(s). By default this description is empty.
|
||||
|
@ -173,11 +187,17 @@ AMI if one with the same name already exists. Default `false`.
|
|||
spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`,
|
||||
`Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)`
|
||||
|
||||
* `ssh_keypair_name` (string) - If specified, this is the key that will be
|
||||
used for SSH with the machine. By default, this is blank, and Packer will
|
||||
generate a temporary keypair. `ssh_private_key_file` must be specified
|
||||
with this.
|
||||
|
||||
* `ssh_port` (integer) - The port that SSH will be available on. This defaults
|
||||
to port 22.
|
||||
|
||||
* `ssh_private_key_file` (string) - Use this ssh private key file instead of
|
||||
a generated ssh key pair for connecting to the instance.
|
||||
a generated ssh key pair for connecting to the instance. This key file must
|
||||
already exist on the `source_ami`
|
||||
|
||||
* `ssh_private_ip` (bool) - If true, then SSH will always use the private
|
||||
IP if available.
|
||||
|
@ -318,3 +338,6 @@ sudo -i -n ec2-upload-bundle \
|
|||
|
||||
The available template variables should be self-explanatory based on the
|
||||
parameters they're used to satisfy the `ec2-upload-bundle` command.
|
||||
|
||||
[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html
|
||||
[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html
|
||||
|
|
|
@ -32,7 +32,7 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio
|
|||
"iso_url": "http://releases.ubuntu.com/12.04/ubuntu-12.04.3-server-amd64.iso",
|
||||
"iso_checksum": "2cbe868812a871242cdcdd8f2fd6feb9",
|
||||
"iso_checksum_type": "md5",
|
||||
"parallels_tools_flavor": "lin"
|
||||
"parallels_tools_flavor": "lin",
|
||||
"ssh_username": "packer",
|
||||
"ssh_password": "packer",
|
||||
"ssh_wait_timeout": "30s",
|
||||
|
|
|
@ -88,6 +88,9 @@ configuration is actually required.
|
|||
this folder. If the permissions are not correct, use a shell provisioner
|
||||
prior to this to configure it properly.
|
||||
|
||||
* `client_key` (string) - Path to client key. If not set, this defaults to a file
|
||||
named client.pem in `staging_directory`.
|
||||
|
||||
* `validation_client_name` (string) - Name of the validation client. If
|
||||
not set, this won't be set in the configuration and the default that Chef
|
||||
uses will be used.
|
||||
|
@ -158,3 +161,12 @@ curl -L https://www.opscode.com/chef/install.sh | \
|
|||
```
|
||||
|
||||
This command can be customized using the `install_command` configuration.
|
||||
|
||||
## Folder Permissions
|
||||
|
||||
!> The `chef-client` provisioner will chmod the directory with your Chef
|
||||
keys to 777. This is to ensure that Packer can upload and make use of that
|
||||
directory. However, once the machine is created, you usually don't
|
||||
want to keep these directories with those permissions. To change the
|
||||
permissions on the directories, append a shell provisioner after Chef
|
||||
to modify them.
|
||||
|
|
|
@ -40,6 +40,10 @@ The available configuration options are listed below. All elements are required.
|
|||
machine. This value must be a writable location and any parent directories
|
||||
must already exist.
|
||||
|
||||
* `direction` (string) - The direction of the file transfer. This defaults
|
||||
to "upload." If it is set to "download" then the file "source" in
|
||||
the machine wll be downloaded locally to "destination"
|
||||
|
||||
## Directory Uploads
|
||||
|
||||
The file provisioner is also able to upload a complete directory to the
|
||||
|
|
|
@ -79,12 +79,18 @@ Optional parameters:
|
|||
this folder. If the permissions are not correct, use a shell provisioner
|
||||
prior to this to configure it properly.
|
||||
|
||||
* `working_directory` (string) - This is the directory from which the puppet command
|
||||
will be run. When using hiera with a relative path, this option allows to ensure
|
||||
that the paths are working properly. If not specified, defaults to the value of
|
||||
specified `staging_directory` (or its default value if not specified either).
|
||||
|
||||
## Execute Command
|
||||
|
||||
By default, Packer uses the following command (broken across multiple lines
|
||||
for readability) to execute Puppet:
|
||||
|
||||
```liquid
|
||||
cd {{.WorkingDir}} && \
|
||||
{{.FacterVars}}{{if .Sudo}} sudo -E {{end}}puppet apply \
|
||||
--verbose \
|
||||
--modulepath='{{.ModulePath}}' \
|
||||
|
@ -98,6 +104,7 @@ This command can be customized using the `execute_command` configuration.
|
|||
As you can see from the default value above, the value of this configuration
|
||||
can contain various template variables, defined below:
|
||||
|
||||
* `WorkingDir` - The path from which Puppet will be executed.
|
||||
* `FacterVars` - Shell-friendly string of environmental variables used
|
||||
to set custom facts configured for this provisioner.
|
||||
* `HieraConfigPath` - The path to a hiera configuration file.
|
||||
|
@ -106,3 +113,17 @@ can contain various template variables, defined below:
|
|||
* `ModulePath` - The paths to the module directories.
|
||||
* `Sudo` - A boolean of whether to `sudo` the command or not, depending on
|
||||
the value of the `prevent_sudo` configuration.
|
||||
|
||||
## Default Facts
|
||||
|
||||
In addition to being able to specify custom Facter facts using the `facter`
|
||||
configuration, the provisioner automatically defines certain commonly useful
|
||||
facts:
|
||||
|
||||
* `packer_build_name` is set to the name of the build that Packer is running.
|
||||
This is most useful when Packer is making multiple builds and you want to
|
||||
distinguish them in your Hiera hierarchy.
|
||||
|
||||
* `packer_builder_type` is the type of the builder that was used to create the
|
||||
machine that Puppet is running on. This is useful if you want to run only
|
||||
certain parts of your Puppet code on systems built with certain builders.
|
||||
|
|
Loading…
Reference in New Issue