Merge remote-tracking branch 'mitchellh/master'
This commit is contained in:
commit
6bdba942a5
28
CHANGELOG.md
28
CHANGELOG.md
|
@ -1,7 +1,33 @@
|
|||
## (Unreleased)
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
* builder/googlecompute: Correct values for `on_host_maintenance`. [GH-4643]
|
||||
* builder/amazon: Fix crash in `step_region_copy`. [GH-4642]
|
||||
* core: show correct step name when debugging. [GH-4672]
|
||||
* builder/virtualbox: fix `none` communicator by allowing skipping upload of
|
||||
version file. [GH-4678]
|
||||
* communicator/ssh: fix nil pointer error. [GH-4690]
|
||||
* builder/hyper-v: Don't wait for shutdown_command to return. [GH-4691]
|
||||
|
||||
### IMRPOVEMENTS:
|
||||
|
||||
* builder/amazon: validate ssh key name/file. [GH-4665]
|
||||
* builder/amazon: set force_deregister to true on -force. [GH-4649]
|
||||
* builder/hyper-v: validate output dir in step, not in config. [GH-4645]
|
||||
* website: fix display on ios devices. [GH-4618]
|
||||
* builder/openstack: Add ssh agent support. [GH-4655]
|
||||
* builder/parallels-iso: Configuration of disk type, plain or expanding.
|
||||
[GH-4621]
|
||||
* builder/ansible: Clearer error message when we have problems getting the
|
||||
ansible version. [GH-4694]
|
||||
|
||||
## 0.12.3 (March 1, 2017)
|
||||
|
||||
### BACKWARDS INCOMPATIBILITIES:
|
||||
|
||||
* provisioner/ansible: by default, the staging dir will be randomized. [GH-4472]
|
||||
|
||||
### FEATURES:
|
||||
|
||||
* **New builder:** `ebs-surrogate` for building AMIs from EBS volumes. [GH-4351]
|
||||
|
@ -18,7 +44,6 @@
|
|||
* communicator/winrm: support ProxyFromEnvironment. [GH-4463]
|
||||
* core: make VNC links clickable in terminal. [GH-4497] [GH-4498]
|
||||
* post-processor/amazon-import: support AMI attributes on import [GH-4216]
|
||||
* provisioner/ansible: use randomized staging dir [GH-4472]
|
||||
* communicator/ssh: Use SSH agent when enabled for bastion step. [GH-4598]
|
||||
* builder/amazon: enable ena when `enhanced_networking` is set. [GH-4578]
|
||||
* builder/vmware-esxi: try for longer to connect to vnc port. [GH-4480]
|
||||
|
@ -32,7 +57,6 @@
|
|||
* builder/azure:: add two new config variables for temp_compute_name and
|
||||
temp_resource_group_name. [GH-4468]
|
||||
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
* builder/amazon: Fix ssh agent authentication. [GH-4597]
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
[report-badge]: https://goreportcard.com/badge/github.com/mitchellh/packer
|
||||
[report]: https://goreportcard.com/report/github.com/mitchellh/packer
|
||||
|
||||
* Website: http://www.packer.io
|
||||
* Website: https://www.packer.io
|
||||
* IRC: `#packer-tool` on Freenode
|
||||
* Mailing list: [Google Groups](http://groups.google.com/group/packer-tool)
|
||||
|
||||
|
@ -27,14 +27,19 @@ comes out of the box with support for the following platforms:
|
|||
|
||||
* Amazon EC2 (AMI). Both EBS-backed and instance-store AMIs
|
||||
* Azure
|
||||
* CloudStack
|
||||
* DigitalOcean
|
||||
* Docker
|
||||
* Google Compute Engine
|
||||
* Hyper-V
|
||||
* 1&1
|
||||
* OpenStack
|
||||
* Parallels
|
||||
* ProfitBricks
|
||||
* QEMU. Both KVM and Xen images.
|
||||
* VirtualBox
|
||||
* Triton (Joyent Public Cloud)
|
||||
* VMware
|
||||
* VirtualBox
|
||||
|
||||
Support for other platforms can be added via plugins.
|
||||
|
||||
|
|
|
@ -78,6 +78,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if b.config.PackerConfig.PackerForce {
|
||||
b.config.AMIForceDeregister = true
|
||||
}
|
||||
|
||||
// Defaults
|
||||
if b.config.ChrootMounts == nil {
|
||||
b.config.ChrootMounts = make([][]string, 0)
|
||||
|
|
|
@ -75,6 +75,14 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
|||
|
||||
// Validation
|
||||
errs := c.Comm.Prepare(ctx)
|
||||
if c.SSHKeyPairName != "" {
|
||||
if c.Comm.Type == "winrm" && c.Comm.WinRMPassword == "" && c.Comm.SSHPrivateKey == "" {
|
||||
errs = append(errs, errors.New("A private_key_file must be provided to retrieve the winrm password when using ssh_keypair_name."))
|
||||
} else if c.Comm.SSHPrivateKey == "" && !c.Comm.SSHAgentAuth {
|
||||
errs = append(errs, errors.New("A private_key_file must be provided or ssh_agent_auth enabled when ssh_keypair_name is specified."))
|
||||
}
|
||||
}
|
||||
|
||||
if c.SourceAmi == "" && c.SourceAmiFilter.Empty() {
|
||||
errs = append(errs, errors.New("A source_ami or source_ami_filter must be specified"))
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package common
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
|
@ -128,7 +127,7 @@ func amiRegionCopy(state multistep.StateBag, config *AccessConfig, name string,
|
|||
}
|
||||
|
||||
for _, blockDeviceMapping := range describeImageResp.Images[0].BlockDeviceMappings {
|
||||
if blockDeviceMapping.Ebs != nil {
|
||||
if blockDeviceMapping.Ebs != nil && blockDeviceMapping.Ebs.SnapshotId != nil {
|
||||
snapshotIds = append(snapshotIds, *blockDeviceMapping.Ebs.SnapshotId)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,6 +58,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if b.config.PackerConfig.PackerForce {
|
||||
b.config.AMIForceDeregister = true
|
||||
}
|
||||
|
||||
// Accumulate any errors
|
||||
var errs *packer.MultiError
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
||||
|
|
|
@ -58,6 +58,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if b.config.PackerConfig.PackerForce {
|
||||
b.config.AMIForceDeregister = true
|
||||
}
|
||||
|
||||
// Accumulate any errors
|
||||
var errs *packer.MultiError
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
||||
|
|
|
@ -77,6 +77,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if b.config.PackerConfig.PackerForce {
|
||||
b.config.AMIForceDeregister = true
|
||||
}
|
||||
|
||||
if b.config.BundleDestination == "" {
|
||||
b.config.BundleDestination = "/tmp"
|
||||
}
|
||||
|
|
|
@ -93,15 +93,20 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
|||
if c.ImageDescription == "" {
|
||||
c.ImageDescription = "Created by Packer"
|
||||
}
|
||||
// Setting OnHostMaintenance Correct Defaults
|
||||
// "MIGRATE" : Possible if Preemptible is false
|
||||
// "TERMINATE": Posssible if Preemptible is true
|
||||
if c.OnHostMaintenance == "" && c.Preemptible {
|
||||
c.OnHostMaintenance = "MIGRATE"
|
||||
}
|
||||
|
||||
if c.OnHostMaintenance == "" && !c.Preemptible {
|
||||
if c.OnHostMaintenance == "MIGRATE" && c.Preemptible {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
errors.New("on_host_maintenance must be TERMINATE when using preemptible instances."))
|
||||
}
|
||||
// Setting OnHostMaintenance Correct Defaults
|
||||
// "MIGRATE" : Possible and default if Preemptible is false
|
||||
// "TERMINATE": Required if Preemptible is true
|
||||
if c.Preemptible {
|
||||
c.OnHostMaintenance = "TERMINATE"
|
||||
} else {
|
||||
if c.OnHostMaintenance == "" {
|
||||
c.OnHostMaintenance = "MIGRATE"
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure user sets a valid value for on_host_maintenance option
|
||||
|
|
|
@ -64,7 +64,7 @@ func (d *HypervPS4Driver) Verify() error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := d.verifyElevatedMode(); err != nil {
|
||||
if err := d.verifyHypervPermissions(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -293,16 +293,28 @@ func (d *HypervPS4Driver) verifyPSHypervModule() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *HypervPS4Driver) verifyElevatedMode() error {
|
||||
func (d *HypervPS4Driver) verifyHypervPermissions() error {
|
||||
|
||||
log.Printf("Enter method: %s", "verifyElevatedMode")
|
||||
log.Printf("Enter method: %s", "verifyHypervPermissions")
|
||||
|
||||
isAdmin, _ := powershell.IsCurrentUserAnAdministrator()
|
||||
hypervAdminCmd := "([Security.Principal.WindowsPrincipal][Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole('Hyper-V Administrators')"
|
||||
|
||||
if !isAdmin {
|
||||
err := fmt.Errorf("%s", "Please restart your shell in elevated mode")
|
||||
var ps powershell.PowerShellCmd
|
||||
cmdOut, err := ps.Output(hypervAdminCmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res := strings.TrimSpace(string(cmdOut))
|
||||
|
||||
if res == "False" {
|
||||
isAdmin, _ := powershell.IsCurrentUserAnAdministrator()
|
||||
|
||||
if !isAdmin {
|
||||
err := fmt.Errorf("%s", "Current user is not a member of 'Hyper-V Administrators' or 'Administrators' group")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,9 +2,9 @@ package common
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/packer/common"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
"os"
|
||||
)
|
||||
|
||||
type OutputConfig struct {
|
||||
|
@ -16,13 +16,5 @@ func (c *OutputConfig) Prepare(ctx *interpolate.Context, pc *common.PackerConfig
|
|||
c.OutputDir = fmt.Sprintf("output-%s", pc.PackerBuildName)
|
||||
}
|
||||
|
||||
var errs []error
|
||||
if !pc.PackerForce {
|
||||
if _, err := os.Stat(c.OutputDir); err == nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"Output directory '%s' already exists. It must not exist.", c.OutputDir))
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/packer/common"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/packer/common"
|
||||
)
|
||||
|
||||
func TestOutputConfigPrepare(t *testing.T) {
|
||||
|
@ -39,7 +40,7 @@ func TestOutputConfigPrepare_exists(t *testing.T) {
|
|||
PackerForce: false,
|
||||
}
|
||||
errs := c.Prepare(testConfigTemplate(t), pc)
|
||||
if len(errs) == 0 {
|
||||
t.Fatal("should have errors")
|
||||
if len(errs) != 0 {
|
||||
t.Fatal("should not have errors")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,16 +17,30 @@ import (
|
|||
type StepOutputDir struct {
|
||||
Force bool
|
||||
Path string
|
||||
|
||||
cleanup bool
|
||||
}
|
||||
|
||||
func (s *StepOutputDir) Run(state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
if _, err := os.Stat(s.Path); err == nil && s.Force {
|
||||
if _, err := os.Stat(s.Path); err == nil {
|
||||
if !s.Force {
|
||||
err := fmt.Errorf(
|
||||
"Output directory exists: %s\n\n"+
|
||||
"Use the force flag to delete it prior to building.",
|
||||
s.Path)
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Say("Deleting previous output directory...")
|
||||
os.RemoveAll(s.Path)
|
||||
}
|
||||
|
||||
// Enable cleanup
|
||||
s.cleanup = true
|
||||
|
||||
// Create the directory
|
||||
if err := os.MkdirAll(s.Path, 0755); err != nil {
|
||||
state.Put("error", err)
|
||||
|
@ -47,6 +61,10 @@ func (s *StepOutputDir) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
func (s *StepOutputDir) Cleanup(state multistep.StateBag) {
|
||||
if !s.cleanup {
|
||||
return
|
||||
}
|
||||
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
|
||||
|
|
|
@ -4,10 +4,11 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
// This step shuts down the machine. It first attempts to do so gracefully,
|
||||
|
@ -51,14 +52,6 @@ func (s *StepShutdown) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Wait for the command to run so we can print std{err,out}
|
||||
// We don't care if the command errored, since we'll notice
|
||||
// if the vm didn't shut down.
|
||||
cmd.Wait()
|
||||
|
||||
log.Printf("Shutdown stdout: %s", stdout.String())
|
||||
log.Printf("Shutdown stderr: %s", stderr.String())
|
||||
|
||||
// Wait for the machine to actually shut down
|
||||
log.Printf("Waiting max %s for shutdown to complete", s.Timeout)
|
||||
shutdownTimer := time.After(s.Timeout)
|
||||
|
@ -70,12 +63,14 @@ func (s *StepShutdown) Run(state multistep.StateBag) multistep.StepAction {
|
|||
|
||||
select {
|
||||
case <-shutdownTimer:
|
||||
log.Printf("Shutdown stdout: %s", stdout.String())
|
||||
log.Printf("Shutdown stderr: %s", stderr.String())
|
||||
err := errors.New("Timeout while waiting for machine to shut down.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
default:
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -75,10 +75,12 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
Flavor: b.config.Flavor,
|
||||
},
|
||||
&StepKeyPair{
|
||||
Debug: b.config.PackerDebug,
|
||||
DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName),
|
||||
KeyPairName: b.config.SSHKeyPairName,
|
||||
PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey,
|
||||
Debug: b.config.PackerDebug,
|
||||
DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName),
|
||||
KeyPairName: b.config.SSHKeyPairName,
|
||||
TemporaryKeyPairName: b.config.TemporaryKeyPairName,
|
||||
PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey,
|
||||
SSHAgentAuth: b.config.RunConfig.Comm.SSHAgentAuth,
|
||||
},
|
||||
&StepRunSourceServer{
|
||||
Name: b.config.ImageName,
|
||||
|
@ -110,7 +112,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
computeClient,
|
||||
b.config.SSHInterface,
|
||||
b.config.SSHIPVersion),
|
||||
SSHConfig: SSHConfig(b.config.RunConfig.Comm.SSHUsername,
|
||||
SSHConfig: SSHConfig(
|
||||
b.config.RunConfig.Comm.SSHAgentAuth,
|
||||
b.config.RunConfig.Comm.SSHUsername,
|
||||
b.config.RunConfig.Comm.SSHPassword),
|
||||
},
|
||||
&common.StepProvision{},
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/packer/common/uuid"
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
@ -11,10 +12,11 @@ import (
|
|||
// RunConfig contains configuration for running an instance from a source
|
||||
// image and details on how to access that launched image.
|
||||
type RunConfig struct {
|
||||
Comm communicator.Config `mapstructure:",squash"`
|
||||
SSHKeyPairName string `mapstructure:"ssh_keypair_name"`
|
||||
SSHInterface string `mapstructure:"ssh_interface"`
|
||||
SSHIPVersion string `mapstructure:"ssh_ip_version"`
|
||||
Comm communicator.Config `mapstructure:",squash"`
|
||||
SSHKeyPairName string `mapstructure:"ssh_keypair_name"`
|
||||
TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name"`
|
||||
SSHInterface string `mapstructure:"ssh_interface"`
|
||||
SSHIPVersion string `mapstructure:"ssh_ip_version"`
|
||||
|
||||
SourceImage string `mapstructure:"source_image"`
|
||||
SourceImageName string `mapstructure:"source_image_name"`
|
||||
|
@ -38,6 +40,15 @@ type RunConfig struct {
|
|||
}
|
||||
|
||||
func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
// If we are not given an explicit ssh_keypair_name or
|
||||
// ssh_private_key_file, then create a temporary one, but only if the
|
||||
// temporary_key_pair_name has not been provided and we are not using
|
||||
// ssh_password.
|
||||
if c.SSHKeyPairName == "" && c.TemporaryKeyPairName == "" &&
|
||||
c.Comm.SSHPrivateKey == "" && c.Comm.SSHPassword == "" {
|
||||
|
||||
c.TemporaryKeyPairName = fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())
|
||||
}
|
||||
|
||||
if c.UseFloatingIp && c.FloatingIpPool == "" {
|
||||
c.FloatingIpPool = "public"
|
||||
|
@ -45,6 +56,15 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
|||
|
||||
// Validation
|
||||
errs := c.Comm.Prepare(ctx)
|
||||
|
||||
if c.SSHKeyPairName != "" {
|
||||
if c.Comm.Type == "winrm" && c.Comm.WinRMPassword == "" && c.Comm.SSHPrivateKey == "" {
|
||||
errs = append(errs, errors.New("A private_key_file must be provided to retrieve the winrm password when using ssh_keypair_name."))
|
||||
} else if c.Comm.SSHPrivateKey == "" && !c.Comm.SSHAgentAuth {
|
||||
errs = append(errs, errors.New("A private_key_file must be provided or ssh_agent_auth enabled when ssh_keypair_name is specified."))
|
||||
}
|
||||
}
|
||||
|
||||
if c.SourceImage == "" && c.SourceImageName == "" {
|
||||
errs = append(errs, errors.New("Either a source_image or a source_image_name must be specified"))
|
||||
} else if len(c.SourceImage) > 0 && len(c.SourceImageName) > 0 {
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
|
@ -12,6 +14,7 @@ import (
|
|||
"github.com/mitchellh/multistep"
|
||||
packerssh "github.com/mitchellh/packer/communicator/ssh"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/crypto/ssh/agent"
|
||||
)
|
||||
|
||||
// CommHost looks up the host for the communicator.
|
||||
|
@ -63,8 +66,26 @@ func CommHost(
|
|||
// SSHConfig returns a function that can be used for the SSH communicator
|
||||
// config for connecting to the instance created over SSH using a private key
|
||||
// or a password.
|
||||
func SSHConfig(username, password string) func(multistep.StateBag) (*ssh.ClientConfig, error) {
|
||||
func SSHConfig(useAgent bool, username, password string) func(multistep.StateBag) (*ssh.ClientConfig, error) {
|
||||
return func(state multistep.StateBag) (*ssh.ClientConfig, error) {
|
||||
if useAgent {
|
||||
authSock := os.Getenv("SSH_AUTH_SOCK")
|
||||
if authSock == "" {
|
||||
return nil, fmt.Errorf("SSH_AUTH_SOCK is not set")
|
||||
}
|
||||
|
||||
sshAgent, err := net.Dial("unix", authSock)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot connect to SSH Agent socket %q: %s", authSock, err)
|
||||
}
|
||||
|
||||
return &ssh.ClientConfig{
|
||||
User: username,
|
||||
Auth: []ssh.AuthMethod{
|
||||
ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
privateKey, hasKey := state.GetOk("privateKey")
|
||||
|
||||
|
|
|
@ -10,21 +10,24 @@ import (
|
|||
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/common/uuid"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type StepKeyPair struct {
|
||||
Debug bool
|
||||
DebugKeyPath string
|
||||
KeyPairName string
|
||||
PrivateKeyFile string
|
||||
Debug bool
|
||||
SSHAgentAuth bool
|
||||
DebugKeyPath string
|
||||
TemporaryKeyPairName string
|
||||
KeyPairName string
|
||||
PrivateKeyFile string
|
||||
|
||||
keyName string
|
||||
}
|
||||
|
||||
func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
if s.PrivateKeyFile != "" {
|
||||
privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile)
|
||||
if err != nil {
|
||||
|
@ -39,14 +42,25 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
config := state.Get("config").(Config)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
if config.Comm.Type == "ssh" && config.Comm.SSHPassword != "" {
|
||||
ui.Say("Not creating temporary keypair when using password.")
|
||||
if s.SSHAgentAuth && s.KeyPairName == "" {
|
||||
ui.Say("Using SSH Agent with key pair in Source image")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.SSHAgentAuth && s.KeyPairName != "" {
|
||||
ui.Say(fmt.Sprintf("Using SSH Agent for existing key pair %s", s.KeyPairName))
|
||||
state.Put("keyPair", s.KeyPairName)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.TemporaryKeyPairName == "" {
|
||||
ui.Say("Not using temporary keypair")
|
||||
state.Put("keyPair", "")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
config := state.Get("config").(Config)
|
||||
|
||||
// We need the v2 compute client
|
||||
computeClient, err := config.computeV2Client()
|
||||
if err != nil {
|
||||
|
@ -55,10 +69,9 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
keyName := fmt.Sprintf("packer %s", uuid.TimeOrderedUUID())
|
||||
ui.Say(fmt.Sprintf("Creating temporary keypair: %s ...", keyName))
|
||||
ui.Say(fmt.Sprintf("Creating temporary keypair: %s ...", s.TemporaryKeyPairName))
|
||||
keypair, err := keypairs.Create(computeClient, keypairs.CreateOpts{
|
||||
Name: keyName,
|
||||
Name: s.TemporaryKeyPairName,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err))
|
||||
|
@ -70,7 +83,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Created temporary keypair: %s", keyName))
|
||||
ui.Say(fmt.Sprintf("Created temporary keypair: %s", s.TemporaryKeyPairName))
|
||||
|
||||
keypair.PrivateKey = berToDer(keypair.PrivateKey, ui)
|
||||
|
||||
|
@ -101,10 +114,10 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
// Set the keyname so we know to delete it later
|
||||
s.keyName = keyName
|
||||
s.keyName = s.TemporaryKeyPairName
|
||||
|
||||
// Set some state data for use in future steps
|
||||
state.Put("keyPair", keyName)
|
||||
state.Put("keyPair", s.keyName)
|
||||
state.Put("privateKey", keypair.PrivateKey)
|
||||
|
||||
return multistep.ActionContinue
|
||||
|
@ -156,11 +169,11 @@ func berToDer(ber string, ui packer.Ui) string {
|
|||
func (s *StepKeyPair) Cleanup(state multistep.StateBag) {
|
||||
// If we used an SSH private key file, do not go about deleting
|
||||
// keypairs
|
||||
if s.PrivateKeyFile != "" {
|
||||
if s.PrivateKeyFile != "" || (s.KeyPairName == "" && s.keyName == "") {
|
||||
return
|
||||
}
|
||||
// If no key name is set, then we never created it, so just return
|
||||
if s.keyName == "" {
|
||||
if s.TemporaryKeyPairName == "" {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -171,14 +184,14 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) {
|
|||
computeClient, err := config.computeV2Client()
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error cleaning up keypair. Please delete the key manually: %s", s.keyName))
|
||||
"Error cleaning up keypair. Please delete the key manually: %s", s.TemporaryKeyPairName))
|
||||
return
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Deleting temporary keypair: %s ...", s.keyName))
|
||||
ui.Say(fmt.Sprintf("Deleting temporary keypair: %s ...", s.TemporaryKeyPairName))
|
||||
err = keypairs.Delete(computeClient, s.keyName).ExtractErr()
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error cleaning up keypair. Please delete the key manually: %s", s.keyName))
|
||||
"Error cleaning up keypair. Please delete the key manually: %s", s.TemporaryKeyPairName))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ type Config struct {
|
|||
|
||||
BootCommand []string `mapstructure:"boot_command"`
|
||||
DiskSize uint `mapstructure:"disk_size"`
|
||||
DiskType string `mapstructure:"disk_type"`
|
||||
GuestOSType string `mapstructure:"guest_os_type"`
|
||||
HardDriveInterface string `mapstructure:"hard_drive_interface"`
|
||||
HostInterfaces []string `mapstructure:"host_interfaces"`
|
||||
|
@ -87,6 +88,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
b.config.DiskSize = 40000
|
||||
}
|
||||
|
||||
if b.config.DiskType == "" {
|
||||
b.config.DiskType = "expand"
|
||||
}
|
||||
|
||||
if b.config.HardDriveInterface == "" {
|
||||
b.config.HardDriveInterface = "sata"
|
||||
}
|
||||
|
@ -104,6 +109,17 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
b.config.VMName = fmt.Sprintf("packer-%s", b.config.PackerBuildName)
|
||||
}
|
||||
|
||||
if b.config.DiskType != "expand" && b.config.DiskType != "plain" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("disk_type can only be expand, or plain"))
|
||||
}
|
||||
|
||||
if b.config.DiskType == "plain" && !b.config.SkipCompaction {
|
||||
b.config.SkipCompaction = true
|
||||
warnings = append(warnings,
|
||||
"'skip_compaction' is enforced to be true for plain disks.")
|
||||
}
|
||||
|
||||
if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" && b.config.HardDriveInterface != "scsi" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("hard_drive_interface can only be ide, sata, or scsi"))
|
||||
|
|
|
@ -130,6 +130,61 @@ func TestBuilderPrepare_DiskSize(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_DiskType(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
// Test a default disk_type
|
||||
delete(config, "disk_type")
|
||||
warns, err := b.Prepare(config)
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", warns)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if b.config.DiskType != "expand" {
|
||||
t.Fatalf("bad: %s", b.config.DiskType)
|
||||
}
|
||||
|
||||
// Test with a bad
|
||||
config["disk_type"] = "fake"
|
||||
b = Builder{}
|
||||
warns, err = b.Prepare(config)
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", warns)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
||||
// Test with plain disk with wrong setting for compaction
|
||||
config["disk_type"] = "plain"
|
||||
config["skip_compaction"] = false
|
||||
b = Builder{}
|
||||
warns, err = b.Prepare(config)
|
||||
if len(warns) == 0 {
|
||||
t.Fatalf("should have warning")
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
// Test with plain disk with correct setting for compaction
|
||||
config["disk_type"] = "plain"
|
||||
config["skip_compaction"] = true
|
||||
b = Builder{}
|
||||
warns, err = b.Prepare(config)
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", warns)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_HardDriveInterface(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
|
|
@ -22,6 +22,7 @@ func (s *stepCreateDisk) Run(state multistep.StateBag) multistep.StepAction {
|
|||
command := []string{
|
||||
"set", vmName,
|
||||
"--device-add", "hdd",
|
||||
"--type", config.DiskType,
|
||||
"--size", strconv.FormatUint(uint64(config.DiskSize), 10),
|
||||
"--iface", config.HardDriveInterface,
|
||||
}
|
||||
|
|
|
@ -5,12 +5,13 @@ import (
|
|||
)
|
||||
|
||||
type VBoxVersionConfig struct {
|
||||
VBoxVersionFile string `mapstructure:"virtualbox_version_file"`
|
||||
VBoxVersionFile *string `mapstructure:"virtualbox_version_file"`
|
||||
}
|
||||
|
||||
func (c *VBoxVersionConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
if c.VBoxVersionFile == "" {
|
||||
c.VBoxVersionFile = ".vbox_version"
|
||||
if c.VBoxVersionFile == nil {
|
||||
default_file := ".vbox_version"
|
||||
c.VBoxVersionFile = &default_file
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -15,19 +15,50 @@ func TestVBoxVersionConfigPrepare_BootWait(t *testing.T) {
|
|||
t.Fatalf("should not have error: %s", errs)
|
||||
}
|
||||
|
||||
if c.VBoxVersionFile != ".vbox_version" {
|
||||
t.Fatalf("bad value: %s", c.VBoxVersionFile)
|
||||
if *c.VBoxVersionFile != ".vbox_version" {
|
||||
t.Fatalf("bad value: %s", *c.VBoxVersionFile)
|
||||
}
|
||||
|
||||
// Test with a good one
|
||||
c = new(VBoxVersionConfig)
|
||||
c.VBoxVersionFile = "foo"
|
||||
filename := "foo"
|
||||
c.VBoxVersionFile = &filename
|
||||
errs = c.Prepare(testConfigTemplate(t))
|
||||
if len(errs) > 0 {
|
||||
t.Fatalf("should not have error: %s", errs)
|
||||
}
|
||||
|
||||
if c.VBoxVersionFile != "foo" {
|
||||
t.Fatalf("bad value: %s", c.VBoxVersionFile)
|
||||
if *c.VBoxVersionFile != "foo" {
|
||||
t.Fatalf("bad value: %s", *c.VBoxVersionFile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVBoxVersionConfigPrepare_empty(t *testing.T) {
|
||||
var c *VBoxVersionConfig
|
||||
var errs []error
|
||||
|
||||
// Test with nil value
|
||||
c = new(VBoxVersionConfig)
|
||||
c.VBoxVersionFile = nil
|
||||
errs = c.Prepare(testConfigTemplate(t))
|
||||
if len(errs) > 0 {
|
||||
t.Fatalf("should not have error: %s", errs)
|
||||
}
|
||||
|
||||
if *c.VBoxVersionFile != ".vbox_version" {
|
||||
t.Fatalf("bad value: %s", *c.VBoxVersionFile)
|
||||
}
|
||||
|
||||
// Test with empty name
|
||||
c = new(VBoxVersionConfig)
|
||||
filename := ""
|
||||
c.VBoxVersionFile = &filename
|
||||
errs = c.Prepare(testConfigTemplate(t))
|
||||
if len(errs) > 0 {
|
||||
t.Fatalf("should not have error: %s", errs)
|
||||
}
|
||||
|
||||
if *c.VBoxVersionFile != "" {
|
||||
t.Fatalf("bad value: %s", *c.VBoxVersionFile)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -246,7 +246,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
WinRMPort: vboxcommon.SSHPort,
|
||||
},
|
||||
&vboxcommon.StepUploadVersion{
|
||||
Path: b.config.VBoxVersionFile,
|
||||
Path: *b.config.VBoxVersionFile,
|
||||
},
|
||||
&vboxcommon.StepUploadGuestAdditions{
|
||||
GuestAdditionsMode: b.config.GuestAdditionsMode,
|
||||
|
|
|
@ -119,7 +119,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
WinRMPort: vboxcommon.SSHPort,
|
||||
},
|
||||
&vboxcommon.StepUploadVersion{
|
||||
Path: b.config.VBoxVersionFile,
|
||||
Path: *b.config.VBoxVersionFile,
|
||||
},
|
||||
&vboxcommon.StepUploadGuestAdditions{
|
||||
GuestAdditionsMode: b.config.GuestAdditionsMode,
|
||||
|
|
|
@ -40,7 +40,7 @@ func (c *VersionCommand) Run(args []string) int {
|
|||
var versionString bytes.Buffer
|
||||
fmt.Fprintf(&versionString, "Packer v%s", c.Version)
|
||||
if c.VersionPrerelease != "" {
|
||||
fmt.Fprintf(&versionString, ".%s", c.VersionPrerelease)
|
||||
fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease)
|
||||
|
||||
if c.Revision != "" {
|
||||
fmt.Fprintf(&versionString, " (%s)", c.Revision)
|
||||
|
|
|
@ -61,6 +61,10 @@ type abortStep struct {
|
|||
ui packer.Ui
|
||||
}
|
||||
|
||||
func (s abortStep) InnerStepName() string {
|
||||
return typeName(s.step)
|
||||
}
|
||||
|
||||
func (s abortStep) Run(state multistep.StateBag) multistep.StepAction {
|
||||
return s.step.Run(state)
|
||||
}
|
||||
|
@ -82,6 +86,10 @@ type askStep struct {
|
|||
ui packer.Ui
|
||||
}
|
||||
|
||||
func (s askStep) InnerStepName() string {
|
||||
return typeName(s.step)
|
||||
}
|
||||
|
||||
func (s askStep) Run(state multistep.StateBag) (action multistep.StepAction) {
|
||||
for {
|
||||
action = s.step.Run(state)
|
||||
|
|
|
@ -239,9 +239,9 @@ func (c *comm) newSession() (session *ssh.Session, err error) {
|
|||
}
|
||||
|
||||
if c.client == nil {
|
||||
err = errors.New("client not available")
|
||||
return nil, errors.New("client not available")
|
||||
} else {
|
||||
session, err = c.client.NewSession()
|
||||
return c.client.NewSession()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,8 @@ import (
|
|||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/masterzen/winrm"
|
||||
|
@ -129,6 +131,9 @@ func (c *Communicator) Upload(path string, input io.Reader, _ *os.FileInfo) erro
|
|||
|
||||
// UploadDir implementation of communicator.Communicator interface
|
||||
func (c *Communicator) UploadDir(dst string, src string, exclude []string) error {
|
||||
if !strings.HasSuffix(src, "/") {
|
||||
dst = fmt.Sprintf("%s\\%s", dst, filepath.Base(src))
|
||||
}
|
||||
log.Printf("Uploading dir '%s' to '%s'", src, dst)
|
||||
wcp, err := c.newCopyClient()
|
||||
if err != nil {
|
||||
|
|
|
@ -144,7 +144,8 @@ func (p *Provisioner) Prepare(raws ...interface{}) error {
|
|||
func (p *Provisioner) getVersion() error {
|
||||
out, err := exec.Command(p.config.Command, "--version").Output()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf(
|
||||
"Error running \"%s --version\": %s", p.config.Command, err.Error())
|
||||
}
|
||||
|
||||
versionRe := regexp.MustCompile(`\w (\d+\.\d+[.\d+]*)`)
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/packer/packer"
|
||||
|
@ -258,6 +259,18 @@ func TestAnsibleGetVersion(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAnsibleGetVersionError(t *testing.T) {
|
||||
var p Provisioner
|
||||
p.config.Command = "./test-fixtures/exit1"
|
||||
err := p.getVersion()
|
||||
if err == nil {
|
||||
t.Fatal("Should return error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "./test-fixtures/exit1 --version") {
|
||||
t.Fatal("Error message should include command name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnsibleLongMessages(t *testing.T) {
|
||||
if os.Getenv("PACKER_ACC") == "" {
|
||||
t.Skip("This test is only run with PACKER_ACC=1 and it requires Ansible to be installed")
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/sh
|
||||
|
||||
exit 1
|
|
@ -17,6 +17,13 @@ const (
|
|||
DebugLocationBeforeCleanup
|
||||
)
|
||||
|
||||
// StepWrapper is an interface that wrapped steps can implement to expose their
|
||||
// inner step names to the debug runner.
|
||||
type StepWrapper interface {
|
||||
// InnerStepName should return the human readable name of the wrapped step.
|
||||
InnerStepName() string
|
||||
}
|
||||
|
||||
// DebugPauseFn is the type signature for the function that is called
|
||||
// whenever the DebugRunner pauses. It allows the caller time to
|
||||
// inspect the state of the multi-step sequence at a given step.
|
||||
|
@ -56,8 +63,14 @@ func (r *DebugRunner) Run(state StateBag) {
|
|||
steps := make([]Step, len(r.Steps)*2)
|
||||
for i, step := range r.Steps {
|
||||
steps[i*2] = step
|
||||
name := ""
|
||||
if wrapped, ok := step.(StepWrapper); ok {
|
||||
name = wrapped.InnerStepName()
|
||||
} else {
|
||||
name = reflect.Indirect(reflect.ValueOf(step)).Type().Name()
|
||||
}
|
||||
steps[(i*2)+1] = &debugStepPause{
|
||||
reflect.Indirect(reflect.ValueOf(step)).Type().Name(),
|
||||
name,
|
||||
pauseFn,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -633,9 +633,10 @@
|
|||
"revision": "281073eb9eb092240d33ef253c404f1cca550309"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "9Vh2o3Vs6HzI8P04ks4kvIpegco=",
|
||||
"checksumSHA1": "5x1RX5m8SCkCRLyLL8wBc0qJpV8=",
|
||||
"path": "github.com/mitchellh/multistep",
|
||||
"revision": "162146fc57112954184d90266f4733e900ed05a5"
|
||||
"revision": "391576a156a54cfbb4cf5d5eda40cf6ffa3e3a4d",
|
||||
"revisionTime": "2017-03-16T18:53:39Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "VBo7ciCNRr7wNVFmBTW8sm4PQ14=",
|
||||
|
|
|
@ -9,18 +9,18 @@ import (
|
|||
var GitCommit string
|
||||
|
||||
// The main version number that is being run at the moment.
|
||||
const Version = "1.0"
|
||||
const Version = "1.1.0"
|
||||
|
||||
// A pre-release marker for the version. If this is "" (empty string)
|
||||
// then it means that it is a final release. Otherwise, this is a pre-release
|
||||
// such as "dev" (in development), "beta", "rc1", etc.
|
||||
const VersionPrerelease = "rc1"
|
||||
const VersionPrerelease = "dev"
|
||||
|
||||
func FormattedVersion() string {
|
||||
var versionString bytes.Buffer
|
||||
fmt.Fprintf(&versionString, "%s", Version)
|
||||
if VersionPrerelease != "" {
|
||||
fmt.Fprintf(&versionString, ".%s", VersionPrerelease)
|
||||
fmt.Fprintf(&versionString, "-%s", VersionPrerelease)
|
||||
|
||||
if GitCommit != "" {
|
||||
fmt.Fprintf(&versionString, " (%s)", GitCommit)
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
source "https://rubygems.org"
|
||||
|
||||
gem "middleman-hashicorp",
|
||||
git: "https://github.com/hashicorp/middleman-hashicorp.git"
|
||||
|
||||
gem "htmlbeautifier"
|
||||
gem "middleman-hashicorp", "0.3.13"
|
||||
|
|
|
@ -1,30 +1,17 @@
|
|||
GIT
|
||||
remote: https://github.com/hashicorp/middleman-hashicorp.git
|
||||
revision: 462267352881543bbc5d2606f1ca17a6165ac8ec
|
||||
specs:
|
||||
middleman-hashicorp (0.3.5)
|
||||
bootstrap-sass (~> 3.3)
|
||||
builder (~> 3.2)
|
||||
middleman (~> 3.4)
|
||||
middleman-livereload (~> 3.4)
|
||||
middleman-syntax (~> 3.0)
|
||||
redcarpet (~> 3.3)
|
||||
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
activesupport (4.2.7.1)
|
||||
activesupport (4.2.8)
|
||||
i18n (~> 0.7)
|
||||
json (~> 1.7, >= 1.7.7)
|
||||
minitest (~> 5.1)
|
||||
thread_safe (~> 0.3, >= 0.3.4)
|
||||
tzinfo (~> 1.1)
|
||||
autoprefixer-rails (6.5.3)
|
||||
autoprefixer-rails (6.7.6)
|
||||
execjs
|
||||
bootstrap-sass (3.3.7)
|
||||
autoprefixer-rails (>= 5.2.1)
|
||||
sass (>= 3.3.4)
|
||||
builder (3.2.2)
|
||||
builder (3.2.3)
|
||||
capybara (2.4.4)
|
||||
mime-types (>= 1.16)
|
||||
nokogiri (>= 1.3.3)
|
||||
|
@ -35,7 +22,7 @@ GEM
|
|||
coffee-script (2.4.1)
|
||||
coffee-script-source
|
||||
execjs
|
||||
coffee-script-source (1.10.0)
|
||||
coffee-script-source (1.12.2)
|
||||
compass (1.0.3)
|
||||
chunky_png (~> 1.2)
|
||||
compass-core (~> 1.0.2)
|
||||
|
@ -52,19 +39,18 @@ GEM
|
|||
eventmachine (>= 0.12.9)
|
||||
http_parser.rb (~> 0.6.0)
|
||||
erubis (2.7.0)
|
||||
eventmachine (1.2.0.1)
|
||||
eventmachine (1.2.3)
|
||||
execjs (2.7.0)
|
||||
ffi (1.9.14)
|
||||
ffi (1.9.18)
|
||||
haml (4.0.7)
|
||||
tilt
|
||||
hike (1.2.3)
|
||||
hooks (0.4.1)
|
||||
uber (~> 0.0.14)
|
||||
htmlbeautifier (1.2.0)
|
||||
http_parser.rb (0.6.0)
|
||||
i18n (0.7.0)
|
||||
json (1.8.3)
|
||||
kramdown (1.12.0)
|
||||
json (2.0.3)
|
||||
kramdown (1.13.2)
|
||||
listen (3.0.8)
|
||||
rb-fsevent (~> 0.9, >= 0.9.4)
|
||||
rb-inotify (~> 0.9, >= 0.9.7)
|
||||
|
@ -91,6 +77,14 @@ GEM
|
|||
rack (>= 1.4.5, < 2.0)
|
||||
thor (>= 0.15.2, < 2.0)
|
||||
tilt (~> 1.4.1, < 2.0)
|
||||
middleman-hashicorp (0.3.13)
|
||||
bootstrap-sass (~> 3.3)
|
||||
builder (~> 3.2)
|
||||
middleman (~> 3.4)
|
||||
middleman-livereload (~> 3.4)
|
||||
middleman-syntax (~> 3.0)
|
||||
redcarpet (~> 3.3)
|
||||
turbolinks (~> 5.0)
|
||||
middleman-livereload (3.4.6)
|
||||
em-websocket (~> 0.5.1)
|
||||
middleman-core (>= 3.3)
|
||||
|
@ -107,9 +101,9 @@ GEM
|
|||
mime-types-data (~> 3.2015)
|
||||
mime-types-data (3.2016.0521)
|
||||
mini_portile2 (2.1.0)
|
||||
minitest (5.9.1)
|
||||
minitest (5.10.1)
|
||||
multi_json (1.12.1)
|
||||
nokogiri (1.6.8.1)
|
||||
nokogiri (1.7.0.1)
|
||||
mini_portile2 (~> 2.1.0)
|
||||
padrino-helpers (0.12.8.1)
|
||||
i18n (~> 0.6, >= 0.6.7)
|
||||
|
@ -123,11 +117,11 @@ GEM
|
|||
rack-test (0.6.3)
|
||||
rack (>= 1.0)
|
||||
rb-fsevent (0.9.8)
|
||||
rb-inotify (0.9.7)
|
||||
rb-inotify (0.9.8)
|
||||
ffi (>= 0.5.0)
|
||||
redcarpet (3.3.4)
|
||||
rouge (2.0.6)
|
||||
sass (3.4.22)
|
||||
redcarpet (3.4.0)
|
||||
rouge (2.0.7)
|
||||
sass (3.4.23)
|
||||
sprockets (2.12.4)
|
||||
hike (~> 1.2)
|
||||
multi_json (~> 1.0)
|
||||
|
@ -138,9 +132,12 @@ GEM
|
|||
sprockets-sass (1.3.1)
|
||||
sprockets (~> 2.0)
|
||||
tilt (~> 1.1)
|
||||
thor (0.19.1)
|
||||
thread_safe (0.3.5)
|
||||
thor (0.19.4)
|
||||
thread_safe (0.3.6)
|
||||
tilt (1.4.1)
|
||||
turbolinks (5.0.1)
|
||||
turbolinks-source (~> 5)
|
||||
turbolinks-source (5.0.0)
|
||||
tzinfo (1.2.2)
|
||||
thread_safe (~> 0.1)
|
||||
uber (0.0.15)
|
||||
|
@ -154,8 +151,7 @@ PLATFORMS
|
|||
ruby
|
||||
|
||||
DEPENDENCIES
|
||||
htmlbeautifier
|
||||
middleman-hashicorp!
|
||||
middleman-hashicorp (= 0.3.13)
|
||||
|
||||
BUNDLED WITH
|
||||
1.13.6
|
||||
1.14.6
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
# Proprietary License
|
||||
|
||||
This license is temporary while a more official one is drafted. However,
|
||||
this should make it clear:
|
||||
|
||||
The text contents of this website are MPL 2.0 licensed.
|
||||
|
||||
The design contents of this website are proprietary and may not be reproduced
|
||||
or reused in any way other than to run the website locally. The license for
|
||||
the design is owned solely by HashiCorp, Inc.
|
|
@ -1,22 +1,14 @@
|
|||
all: build
|
||||
VERSION?="0.3.13"
|
||||
|
||||
init:
|
||||
bundle
|
||||
website:
|
||||
@echo "==> Starting website in Docker..."
|
||||
@docker run \
|
||||
--interactive \
|
||||
--rm \
|
||||
--tty \
|
||||
--publish "4567:4567" \
|
||||
--publish "35729:35729" \
|
||||
--volume "$(shell pwd):/website" \
|
||||
hashicorp/middleman-hashicorp:${VERSION}
|
||||
|
||||
docker-dev:
|
||||
docker run -it --expose 4567 -p 4567:4567 -v "$(PWD)":/usr/src/app -w /usr/src/app ruby:2.3.1 \
|
||||
bash -c "apt-get update && apt-get -qy install curl git libgmp3-dev nodejs && \
|
||||
gem install bundler && bundle install && make dev"
|
||||
|
||||
dev: init
|
||||
PACKER_DISABLE_DOWNLOAD_FETCH=true PACKER_VERSION=1.0 bundle exec middleman server
|
||||
|
||||
build: init
|
||||
PACKER_DISABLE_DOWNLOAD_FETCH=true PACKER_VERSION=1.0 bundle exec middleman build
|
||||
|
||||
format:
|
||||
bundle exec htmlbeautifier -t 2 source/*.erb
|
||||
bundle exec htmlbeautifier -t 2 source/layouts/*.erb
|
||||
@pandoc -v > /dev/null || echo "pandoc must be installed in order to format markdown content"
|
||||
pandoc -v > /dev/null && find . -iname "*.html.md" | xargs -I{} bash -c "pandoc -r markdown -w markdown --tab-stop=4 --atx-headers -s --columns=80 {} > {}.new"\; || true
|
||||
pandoc -v > /dev/null && find . -iname "*.html.md" | xargs -I{} bash -c "mv {}.new {}"\; || true
|
||||
.PHONY: website
|
||||
|
|
|
@ -1,33 +1,21 @@
|
|||
# Packer Website
|
||||
|
||||
This subdirectory contains the entire source for the [Packer website](http://www.packer.io).
|
||||
This is a [Middleman](http://middlemanapp.com) project, which builds a static
|
||||
site from these source files.
|
||||
This subdirectory contains the entire source for the [Packer Website][packer].
|
||||
This is a [Middleman][middleman] project, which builds a static site from these
|
||||
source files.
|
||||
|
||||
## Contributions Welcome!
|
||||
|
||||
If you find a typo or you feel like you can improve the HTML, CSS, or
|
||||
JavaScript, we welcome contributions. Feel free to open issues or pull
|
||||
requests like any normal GitHub project, and we'll merge it in.
|
||||
JavaScript, we welcome contributions. Feel free to open issues or pull requests
|
||||
like any normal GitHub project, and we'll merge it in.
|
||||
|
||||
## Running the Site Locally
|
||||
|
||||
Running the site locally is simple. Clone this repo and run the following
|
||||
commands:
|
||||
Running the site locally is simple. Clone this repo and run `make website`.
|
||||
|
||||
```
|
||||
make dev
|
||||
```
|
||||
Then open up `http://localhost:4567`. Note that some URLs you may need to append
|
||||
".html" to make them work (in the navigation).
|
||||
|
||||
Then open up `localhost:4567`. Note that some URLs you may need to append
|
||||
".html" to make them work (in the navigation and such).
|
||||
|
||||
## Keeping Tidy
|
||||
|
||||
To keep the source code nicely formatted, there is a `make format` target. This
|
||||
runs `htmlbeautify` and `pandoc` to reformat the source code so it's nicely formatted.
|
||||
|
||||
make format
|
||||
|
||||
Note that you will need to install pandoc yourself. `make format` will skip it
|
||||
if you don't have it installed.
|
||||
[middleman]: https://www.middlemanapp.com
|
||||
[packer]: https://www.packer.io
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
$script = <<SCRIPT
|
||||
sudo apt-get -y update
|
||||
|
||||
# RVM/Ruby
|
||||
sudo apt-get -qy install curl git libgmp3-dev nodejs
|
||||
gpg --keyserver hkp://keys.gnupg.net --recv-keys D39DC0E3
|
||||
# Install rvm and the latest version of ruby
|
||||
curl -sSL https://get.rvm.io | bash -s stable
|
||||
. ~/.bashrc
|
||||
. ~/.bash_profile
|
||||
rvm install ruby-2.3.1
|
||||
gem install bundler
|
||||
|
||||
# Middleman deps
|
||||
cd /vagrant
|
||||
make dev
|
||||
SCRIPT
|
||||
|
||||
Vagrant.configure(2) do |config|
|
||||
config.vm.box = "bento/ubuntu-14.04"
|
||||
config.vm.network "private_network", ip: "33.33.30.10"
|
||||
config.vm.provision "shell", inline: $script, privileged: false
|
||||
config.vm.synced_folder ".", "/vagrant", type: "rsync"
|
||||
end
|
|
@ -1,38 +0,0 @@
|
|||
require "rack"
|
||||
require "rack/contrib/not_found"
|
||||
require "rack/contrib/response_headers"
|
||||
require "rack/contrib/static_cache"
|
||||
require "rack/contrib/try_static"
|
||||
require "rack/protection"
|
||||
|
||||
# Protect against various bad things
|
||||
use Rack::Protection::JsonCsrf
|
||||
use Rack::Protection::RemoteReferrer
|
||||
use Rack::Protection::HttpOrigin
|
||||
use Rack::Protection::EscapedParams
|
||||
use Rack::Protection::XSSHeader
|
||||
use Rack::Protection::FrameOptions
|
||||
use Rack::Protection::PathTraversal
|
||||
use Rack::Protection::IPSpoofing
|
||||
|
||||
# Properly compress the output if the client can handle it.
|
||||
use Rack::Deflater
|
||||
|
||||
# Set the "forever expire" cache headers for these static assets. Since
|
||||
# we hash the contents of the assets to determine filenames, this is safe
|
||||
# to do.
|
||||
use Rack::StaticCache,
|
||||
:root => "build",
|
||||
:urls => ["/assets", "/javascripts"],
|
||||
:duration => 2,
|
||||
:versioning => false
|
||||
|
||||
# Try to find a static file that matches our request, since Middleman
|
||||
# statically generates everything.
|
||||
use Rack::TryStatic,
|
||||
:root => "build",
|
||||
:urls => ["/"],
|
||||
:try => [".html", "index.html", "/index.html"]
|
||||
|
||||
# 404 if we reached this point. Sad times.
|
||||
run Rack::NotFound.new(File.expand_path("../build/404.html", __FILE__))
|
|
@ -8,15 +8,16 @@
|
|||
"builders": [
|
||||
{
|
||||
"type": "docker",
|
||||
"image": "ruby:2.3-slim",
|
||||
"commit": "true"
|
||||
"image": "hashicorp/middleman-hashicorp:0.3.13",
|
||||
"discard": "true",
|
||||
"run_command": ["-d", "-i", "-t", "{{ .Image }}", "/bin/sh"]
|
||||
}
|
||||
],
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "file",
|
||||
"source": ".",
|
||||
"destination": "/app"
|
||||
"destination": "/website"
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
|
@ -27,16 +28,9 @@
|
|||
"FASTLY_API_KEY={{ user `fastly_api_key` }}"
|
||||
],
|
||||
"inline": [
|
||||
"apt-get -qq update",
|
||||
"apt-get -yqq install build-essential curl git libffi-dev wget nodejs",
|
||||
"apt-get -yqq install python-pip",
|
||||
"pip install s3cmd",
|
||||
"cd /app",
|
||||
|
||||
"bundle check || bundle install --jobs 7",
|
||||
"bundle check || bundle install",
|
||||
"bundle exec middleman build",
|
||||
|
||||
"/bin/bash ./scripts/deploy.sh"
|
||||
"/bin/sh ./scripts/deploy.sh"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
//= require jquery
|
||||
//= require bootstrap
|
||||
|
||||
//= require lib/Base
|
||||
|
||||
//= require hashicorp/mega-nav
|
||||
|
||||
//= require docs
|
||||
//= require app/Sidebar
|
||||
////= require app/Init
|
||||
|
|
|
@ -95,10 +95,6 @@ p {
|
|||
color: darken($green, 50%);
|
||||
}
|
||||
}
|
||||
|
||||
img {
|
||||
width: 100%;
|
||||
}
|
||||
}
|
||||
|
||||
a {
|
||||
|
|
|
@ -3,6 +3,9 @@
|
|||
|
||||
@import url("//fonts.googleapis.com/css?family=Inconsolata|Open+Sans:300,400,600");
|
||||
|
||||
// Mega Nav
|
||||
@import 'hashicorp/mega-nav';
|
||||
|
||||
@import "_helpers";
|
||||
@import "_reset";
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ each category, the available configuration keys are alphabetized.
|
|||
volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic
|
||||
volumes
|
||||
|
||||
- `root_device_name` (string) - The root device name. For example, `xvda`.
|
||||
- `root_device_name` (string) - The root device name. For example, `xvda`.
|
||||
|
||||
- `mount_path` (string) - The path where the volume will be mounted. This is
|
||||
where the chroot environment will be. This defaults to
|
||||
|
@ -375,10 +375,10 @@ provisioning commands to install the os and bootloader.
|
|||
``` {.javascript}
|
||||
{
|
||||
"type": "amazon-chroot",
|
||||
"ami_name": "packer-from-scratch {{timestamp}}"
|
||||
"ami_name": "packer-from-scratch {{timestamp}}",
|
||||
"from_scratch": true,
|
||||
"ami_virtualization_type": "hvm",
|
||||
"device_setup_commands": [
|
||||
"pre_mount_commands": [
|
||||
"parted {{.Device}} mklabel msdos mkpart primary 1M 100% set 1 boot on print",
|
||||
"mkfs.ext4 {{.Device}}1"
|
||||
],
|
||||
|
|
|
@ -49,15 +49,15 @@ builder.
|
|||
- `source_ami` (string) - The initial AMI used as a base for the newly
|
||||
created machine. `source_ami_filter` may be used instead to populate this
|
||||
automatically.
|
||||
|
||||
|
||||
- `ami_root_device` (block device mapping) - A block device mapping describing
|
||||
the root device of the AMI. This looks like the mappings in `ami_block_device_mapping`,
|
||||
except with an additional field:
|
||||
|
||||
- `source_device_name` (string) - The device name of the block device on the
|
||||
source instance to be used as the root device for the AMI. This must correspond
|
||||
to a block device in `launch_block_device_mapping`.
|
||||
|
||||
|
||||
- `source_device_name` (string) - The device name of the block device on the
|
||||
source instance to be used as the root device for the AMI. This must correspond
|
||||
to a block device in `launch_block_device_mapping`.
|
||||
|
||||
### Optional:
|
||||
|
||||
- `ami_block_device_mappings` (array of block device mappings) - Add one or
|
||||
|
|
|
@ -28,7 +28,7 @@ Packer supports the following builders at the moment:
|
|||
newcomers**. However, it is also the fastest way to build an EBS-backed AMI
|
||||
since no new EC2 instance needs to be launched.
|
||||
|
||||
- [amazon-ebssurrogate](/docs/builders/amazone-ebssurrogate.html) - Create EBS
|
||||
- [amazon-ebssurrogate](/docs/builders/amazon-ebssurrogate.html) - Create EBS
|
||||
-backed AMIs from scratch. Works similarly to the `chroot` builder but does
|
||||
not require running in AWS. This is an **advanced builder and should not be
|
||||
used by newcomers**.
|
||||
|
|
|
@ -88,8 +88,9 @@ access tokens:
|
|||
{
|
||||
"type": "digitalocean",
|
||||
"api_token": "YOUR API KEY",
|
||||
"image": "ubuntu-12-04-x64",
|
||||
"region": "nyc2",
|
||||
"size": "512mb"
|
||||
"image": "ubuntu-14-04-x64",
|
||||
"region": "nyc3",
|
||||
"size": "512mb",
|
||||
"ssh_username": "root"
|
||||
}
|
||||
```
|
||||
|
|
|
@ -145,6 +145,9 @@ builder.
|
|||
- `address` (string) - The name of a pre-allocated static external IP address.
|
||||
Note, must be the name and not the actual IP address.
|
||||
|
||||
- `disk_name` (string) - The name of the disk, if unset the instance name will be
|
||||
used.
|
||||
|
||||
- `disk_size` (integer) - The size of the disk in GB. This defaults to `10`,
|
||||
which is 10GB.
|
||||
|
||||
|
@ -177,15 +180,14 @@ builder.
|
|||
- `omit_external_ip` (boolean) - If true, the instance will not have an external IP.
|
||||
`use_internal_ip` must be true if this property is true.
|
||||
|
||||
- `preemptible` (boolean) - If true, launch a preembtible instance.
|
||||
|
||||
- `on_host_maintenance` (string) - Sets Host Maintenance Option. Valid
|
||||
choices are `MIGRATE` and `TERMINATE`. Please see [GCE Instance Scheduling
|
||||
Options](https://cloud.google.com/compute/docs/instances/setting-instance-scheduling-options),
|
||||
as not all machine_types support `MIGRATE` (i.e. machines with GPUs). The
|
||||
default value depends on preemtability.
|
||||
- when preemptible == true, defaults to `TERMINATE`
|
||||
- when preemptible == false, defaults to `MIGRATE`
|
||||
as not all machine_types support `MIGRATE` (i.e. machines with GPUs).
|
||||
If preemptible is true this can only be `TERMINATE`. If preemptible
|
||||
is false, it defaults to `MIGRATE`
|
||||
|
||||
- `preemptible` (boolean) - If true, launch a preembtible instance.
|
||||
|
||||
- `region` (string) - The region in which to launch the instance. Defaults to
|
||||
to the region hosting the specified `zone`.
|
||||
|
|
|
@ -144,8 +144,21 @@ builder.
|
|||
- `ssh_keypair_name` (string) - If specified, this is the key that will be
|
||||
used for SSH with the machine. By default, this is blank, and Packer will
|
||||
generate a temporary keypair.
|
||||
[`ssh_password`](/docs/templates/communicator.html#ssh_password) is used.
|
||||
[`ssh_private_key_file`](/docs/templates/communicator.html#ssh_private_key_file)
|
||||
must be specified with this.
|
||||
or `ssh_agent_auth` must be specified when `ssh_keypair_name` is utilized.
|
||||
|
||||
- `ssh_agent_auth` (boolean) - If true, the local SSH agent will be used to
|
||||
authenticate connections to the source instance. No temporary keypair will
|
||||
be created, and the values of `ssh_password` and `ssh_private_key_file` will
|
||||
be ignored. To use this option with a key pair already configured in the source
|
||||
image, leave the `ssh_keypair_name` blank. To associate an existing key pair
|
||||
with the source instance, set the `ssh_keypair_name` field to the name
|
||||
of the key pair.
|
||||
|
||||
- `temporary_key_pair_name` (string) - The name of the temporary key pair
|
||||
to generate. By default, Packer generates a name that looks like
|
||||
`packer_<UUID>`, where \<UUID\> is a 36 character unique identifier.
|
||||
|
||||
- `tenant_id` or `tenant_name` (string) - The tenant ID or name to boot the
|
||||
instance into. Some OpenStack installations require this. If not specified,
|
||||
|
|
|
@ -105,6 +105,14 @@ builder.
|
|||
- `disk_size` (integer) - The size, in megabytes, of the hard disk to create
|
||||
for the VM. By default, this is 40000 (about 40 GB).
|
||||
|
||||
- `disk_type` (string) - The type for image file based virtual disk drives,
|
||||
defaults to `expand`. Valid options are `expand` (expanding disk) that the
|
||||
image file is small initially and grows in size as you add data to it, and
|
||||
`plain` (plain disk) that the image file has a fixed size from the moment it
|
||||
is created (i.e the space is allocated for the full drive). Plain disks
|
||||
perform faster than expanding disks. `skip_compaction` will be set to true
|
||||
automatically for plain disks.
|
||||
|
||||
- `floppy_files` (array of strings) - A list of files to place onto a floppy
|
||||
disk that is attached when the VM is booted. This is most useful for
|
||||
unattended Windows installs, which look for an `Autounattend.xml` file on
|
||||
|
@ -218,9 +226,10 @@ builder.
|
|||
"5m", or five minutes.
|
||||
|
||||
- `skip_compaction` (boolean) - Virtual disk image is compacted at the end of
|
||||
the build process using `prl_disk_tool` utility. In certain rare cases, this
|
||||
might corrupt the resulting disk image. If you find this to be the case,
|
||||
you can disable compaction using this configuration value.
|
||||
the build process using `prl_disk_tool` utility (except for the case that
|
||||
`disk_type` is set to `plain`). In certain rare cases, this might corrupt
|
||||
the resulting disk image. If you find this to be the case, you can disable
|
||||
compaction using this configuration value.
|
||||
|
||||
- `vm_name` (string) - This is the name of the PVM directory for the new
|
||||
virtual machine, without the file extension. By default this is
|
||||
|
@ -273,7 +282,7 @@ proper key:
|
|||
|
||||
- `<leftAltOn>` `<rightAltOn>` - Simulates pressing and holding the alt key.
|
||||
|
||||
- `<leftCtrlOn>` `<rightCtrlOn>` - Simulates pressing and holding the ctrl key.
|
||||
- `<leftCtrlOn>` `<rightCtrlOn>` - Simulates pressing and holding the ctrl key.
|
||||
|
||||
- `<leftShiftOn>` `<rightShiftOn>` - Simulates pressing and holding the shift key.
|
||||
|
||||
|
@ -287,9 +296,9 @@ proper key:
|
|||
sending any additional keys. This is useful if you have to generally wait
|
||||
for the UI to update before typing more.
|
||||
|
||||
When using modifier keys `ctrl`, `alt`, `shift` ensure that you release them,
|
||||
otherwise they will be held down until the machine reboots. Use lowercase
|
||||
characters as well inside modifiers.
|
||||
When using modifier keys `ctrl`, `alt`, `shift` ensure that you release them,
|
||||
otherwise they will be held down until the machine reboots. Use lowercase
|
||||
characters as well inside modifiers.
|
||||
|
||||
For example: to simulate ctrl+c use `<leftCtrlOn>c<leftCtrlOff>`.
|
||||
|
||||
|
|
|
@ -172,13 +172,18 @@ Linux server and have not enabled X11 forwarding (`ssh -X`).
|
|||
is attached as the first floppy device. Currently, no support exists for
|
||||
creating sub-directories on the floppy. Wildcard characters (\*, ?,
|
||||
and \[\]) are allowed. Directory names are also allowed, which will add all
|
||||
the files found in the directory to the floppy.
|
||||
the files found in the directory to the floppy. The summary size of the
|
||||
listed files must not exceed 1.44 MB. The supported ways to move large
|
||||
files into the OS are using `http_directory` or [the file provisioner](
|
||||
https://www.packer.io/docs/provisioners/file.html).
|
||||
|
||||
- `floppy_dirs` (array of strings) - A list of directories to place onto
|
||||
the floppy disk recursively. This is similar to the `floppy_files` option
|
||||
except that the directory structure is preserved. This is useful for when
|
||||
your floppy disk includes drivers or if you just want to organize it's
|
||||
contents as a hierarchy. Wildcard characters (\*, ?, and \[\]) are allowed.
|
||||
The maximum summary size of all files in the listed directories are the
|
||||
same as in `floppy_files`.
|
||||
|
||||
- `format` (string) - Either "qcow2" or "raw", this specifies the output
|
||||
format of the virtual machine image. This defaults to `qcow2`.
|
||||
|
@ -417,7 +422,10 @@ command, they will be replaced by the proper key:
|
|||
- `<waitXX> ` - Add user defined time.Duration pause before sending any
|
||||
additional keys. For example `<wait10m>` or `<wait1m20s>`
|
||||
|
||||
When using modifier keys `ctrl`, `alt`, `shift` ensure that you release them, otherwise they will be held down until the machine reboots. Use lowercase characters as well inside modifiers. For example: to simulate ctrl+c use `<leftCtrlOn>c<leftCtrlOff>`.
|
||||
When using modifier keys `ctrl`, `alt`, `shift` ensure that you release them,
|
||||
otherwise they will be held down until the machine reboots. Use lowercase
|
||||
characters as well inside modifiers. For example: to simulate ctrl+c use
|
||||
`<leftCtrlOn>c<leftCtrlOff>`.
|
||||
|
||||
In addition to the special keys, each command to type is treated as a
|
||||
[configuration template](/docs/templates/configuration-templates.html). The
|
||||
|
|
|
@ -304,7 +304,8 @@ builder.
|
|||
upload a file that contains the VirtualBox version that was used to create
|
||||
the machine. This information can be useful for provisioning. By default
|
||||
this is ".vbox\_version", which will generally be upload it into the
|
||||
home directory.
|
||||
home directory. Set to an empty string to skip uploading this file, which
|
||||
can be useful when using the `none` communicator.
|
||||
|
||||
- `vm_name` (string) - This is the name of the OVF file for the new virtual
|
||||
machine, without the file extension. By default this is "packer-BUILDNAME",
|
||||
|
|
|
@ -266,7 +266,8 @@ builder.
|
|||
upload a file that contains the VirtualBox version that was used to create
|
||||
the machine. This information can be useful for provisioning. By default
|
||||
this is ".vbox\_version", which will generally be upload it into the
|
||||
home directory.
|
||||
home directory. Set to an empty string to skip uploading this file, which
|
||||
can be useful when using the `none` communicator.
|
||||
|
||||
- `vm_name` (string) - This is the name of the virtual machine when it is
|
||||
imported as well as the name of the OVF file when the virtual machine
|
||||
|
|
|
@ -46,13 +46,6 @@ The configuration allows you to specify and access the artifact in Atlas.
|
|||
|
||||
### Required:
|
||||
|
||||
- `token` (string) - Your access token for the Atlas API.
|
||||
|
||||
-> Login to Atlas to [generate an Atlas
|
||||
Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to
|
||||
configure your token is to set it to the `ATLAS_TOKEN` environment variable, but
|
||||
you can also use `token` configuration option.
|
||||
|
||||
- `artifact` (string) - The shorthand tag for your artifact that maps to
|
||||
Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`.
|
||||
You must have access to the organization—hashicorp in this example—in order
|
||||
|
@ -69,6 +62,13 @@ you can also use `token` configuration option.
|
|||
|
||||
### Optional:
|
||||
|
||||
- `token` (string) - Your access token for the Atlas API.
|
||||
|
||||
-> Login to Atlas to [generate an Atlas
|
||||
Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to
|
||||
configure your token is to set it to the `ATLAS_TOKEN` environment variable, but
|
||||
you can also use `token` configuration option.
|
||||
|
||||
- `atlas_url` (string) - Override the base URL for Atlas. This is useful if
|
||||
you're using Atlas Enterprise in your own network. Defaults to
|
||||
`https://atlas.hashicorp.com/api/v1`.
|
||||
|
|
|
@ -54,7 +54,11 @@ know.
|
|||
|
||||
First, the destination directory must already exist. If you need to create it,
|
||||
use a shell provisioner just prior to the file provisioner in order to create
|
||||
the directory.
|
||||
the directory. If the destination directory does not exist, the file
|
||||
provisioner may succeed, but it will have undefined results. Note that the
|
||||
`docker` builder does not have this requirement. It will create any needed
|
||||
destination directories, but it's generally best practice to not rely on this
|
||||
behavior.
|
||||
|
||||
Next, the existence of a trailing slash on the source path will determine
|
||||
whether the directory name will be embedded within the destination, or whether
|
||||
|
@ -91,20 +95,21 @@ lrwxr-xr-x 1 mwhooker staff 5 Jan 27 17:10 file1link -> file1
|
|||
|
||||
```json
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell-local",
|
||||
"command": "mkdir -p toupload; tar cf toupload/files.tar files"
|
||||
},
|
||||
{
|
||||
"destination": "/tmp/",
|
||||
"source": "./toupload",
|
||||
"type": "file"
|
||||
},
|
||||
{
|
||||
"inline": [
|
||||
"cd /tmp && tar xf toupload/files.tar",
|
||||
],
|
||||
"type": "shell"
|
||||
}
|
||||
{
|
||||
"type": "shell-local",
|
||||
"command": "mkdir -p toupload; tar cf toupload/files.tar files"
|
||||
},
|
||||
{
|
||||
"destination": "/tmp/",
|
||||
"source": "./toupload",
|
||||
"type": "file"
|
||||
},
|
||||
{
|
||||
"inline": [
|
||||
"cd /tmp && tar xf toupload/files.tar",
|
||||
"rm toupload/files.tar"
|
||||
],
|
||||
"type": "shell"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
|
|
@ -68,7 +68,7 @@ array.
|
|||
"image": "ubuntu-14-04-x64",
|
||||
"region": "nyc3",
|
||||
"size": "512mb",
|
||||
"ssh_username": "ubuntu"
|
||||
"ssh_username": "root"
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -106,7 +106,7 @@ The entire template should now look like this:
|
|||
"image": "ubuntu-14-04-x64",
|
||||
"region": "nyc3",
|
||||
"size": "512mb",
|
||||
"ssh_username": "ubuntu"
|
||||
"ssh_username": "root"
|
||||
}],
|
||||
"provisioners": [{
|
||||
"type": "shell",
|
||||
|
|
|
@ -34,12 +34,14 @@
|
|||
<noscript><iframe src="https://www.googletagmanager.com/ns.html?id=GTM-NR2SD7C"
|
||||
height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript>
|
||||
<!-- End Google Tag Manager (noscript) -->
|
||||
|
||||
<%= mega_nav :packer %>
|
||||
|
||||
<div id="header" class="navigation white <%= current_page.data.page_title == "home" ? "" : "navbar-static-top" %>">
|
||||
<div class="container-fluid">
|
||||
<div class="container">
|
||||
<div class="navbar-header">
|
||||
<div class="navbar-brand">
|
||||
<a class="logo" href="/">Packer</a>
|
||||
<a class="by-hashicorp white" href="https://www.hashicorp.com/"><span class="svg-wrap">by</span><%= partial "layouts/svg/svg-by-hashicorp" %><%= partial "layouts/svg/svg-hashicorp-logo" %>Hashicorp</a>
|
||||
</div>
|
||||
<button class="navbar-toggle white" type="button">
|
||||
<span class="sr-only">Toggle navigation</span>
|
||||
|
@ -72,7 +74,7 @@
|
|||
<%= yield %>
|
||||
<div class="clearfix"></div>
|
||||
<footer id="footer" class="navigation white">
|
||||
<div class="container-fluid">
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<div class="col-xs-12">
|
||||
<% # current_page.path does not have an extension, but
|
||||
|
|
Loading…
Reference in New Issue