Merge branch 'master' into new_header

This commit is contained in:
captainill 2015-12-20 00:31:38 -08:00
commit 4035aa4eed
27 changed files with 105 additions and 45 deletions

View File

@ -18,4 +18,3 @@ matrix:
fast_finish: true fast_finish: true
allow_failures: allow_failures:
- go: tip - go: tip
- go: 1.5

View File

@ -22,6 +22,7 @@ IMPROVEMENTS:
* builder/amazon: Add support for `ebs_optimized` [GH-2806] * builder/amazon: Add support for `ebs_optimized` [GH-2806]
* builder/amazon: You can now specify `0` for `spot_price` to switch to on demand instances [GH-2845] * builder/amazon: You can now specify `0` for `spot_price` to switch to on demand instances [GH-2845]
* builder/google: `account_file` can now be provided as a JSON string [GH-2811] * builder/google: `account_file` can now be provided as a JSON string [GH-2811]
* builder/google: added support for `preemptible` instances [GH-2982]
* builder/parallels: Improve support for Parallels 11 [GH-2662] * builder/parallels: Improve support for Parallels 11 [GH-2662]
* builder/parallels: Parallels disks are now compacted by default [GH-2731] * builder/parallels: Parallels disks are now compacted by default [GH-2731]
* builder/parallels: Packer will look for Parallels in `/Applications/Parallels Desktop.app` if it is not detected automatically [GH-2839] * builder/parallels: Packer will look for Parallels in `/Applications/Parallels Desktop.app` if it is not detected automatically [GH-2839]
@ -30,6 +31,7 @@ IMPROVEMENTS:
* builder/qemu: qcow2 images can now be compressed [GH-2748] * builder/qemu: qcow2 images can now be compressed [GH-2748]
* builder/qemu: Now specifies `virtio-scsi` by default [GH-2422] * builder/qemu: Now specifies `virtio-scsi` by default [GH-2422]
* builder/qemu: Now checks for version-specific options [GH-2376] * builder/qemu: Now checks for version-specific options [GH-2376]
* builder/docker-import: Can now import Artifice artifacts [GH-2718]
* provisioner/puppet: Now accepts the `extra_arguments` parameter [GH-2635] * provisioner/puppet: Now accepts the `extra_arguments` parameter [GH-2635]
* post-processor/atlas: Added support for compile ID. [GH-2775] * post-processor/atlas: Added support for compile ID. [GH-2775]

View File

@ -42,15 +42,19 @@ for your operating system or [compile Packer yourself](#developing-packer).
After Packer is installed, create your first template, which tells Packer After Packer is installed, create your first template, which tells Packer
what platforms to build images for and how you want to build them. In our what platforms to build images for and how you want to build them. In our
case, we'll create a simple AMI that has Redis pre-installed. Save this case, we'll create a simple AMI that has Redis pre-installed. Save this
file as `quick-start.json`. Be sure to replace any credentials with your file as `quick-start.json`. Export your AWS credentials as the
own. `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables.
```json ```json
{ {
"variables": {
"access_key": "{{env `AWS_ACCESS_KEY_ID`}}",
"secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}"
},
"builders": [{ "builders": [{
"type": "amazon-ebs", "type": "amazon-ebs",
"access_key": "YOUR KEY HERE", "access_key": "{{user `access_key`}}",
"secret_key": "YOUR SECRET KEY HERE", "secret_key": "{{user `access_key`}}",
"region": "us-east-1", "region": "us-east-1",
"source_ami": "ami-de0d9eb7", "source_ami": "ami-de0d9eb7",
"instance_type": "t1.micro", "instance_type": "t1.micro",

View File

@ -36,7 +36,7 @@ func (a *Artifact) Id() string {
} }
func (a *Artifact) String() string { func (a *Artifact) String() string {
return fmt.Sprintf("A snapshot was created: '%v' in region '%v'", a.snapshotName, a.regionName) return fmt.Sprintf("A snapshot was created: '%v' (ID: %v) in region '%v'", a.snapshotName, a.snapshotId, a.regionName)
} }
func (a *Artifact) State(name string) interface{} { func (a *Artifact) State(name string) interface{} {

View File

@ -25,7 +25,7 @@ func TestArtifactId(t *testing.T) {
func TestArtifactString(t *testing.T) { func TestArtifactString(t *testing.T) {
a := &Artifact{"packer-foobar", 42, "San Francisco", nil} a := &Artifact{"packer-foobar", 42, "San Francisco", nil}
expected := "A snapshot was created: 'packer-foobar' in region 'San Francisco'" expected := "A snapshot was created: 'packer-foobar' (ID: 42) in region 'San Francisco'"
if a.String() != expected { if a.String() != expected {
t.Fatalf("artifact string should match: %v", expected) t.Fatalf("artifact string should match: %v", expected)

View File

@ -31,6 +31,7 @@ type Config struct {
MachineType string `mapstructure:"machine_type"` MachineType string `mapstructure:"machine_type"`
Metadata map[string]string `mapstructure:"metadata"` Metadata map[string]string `mapstructure:"metadata"`
Network string `mapstructure:"network"` Network string `mapstructure:"network"`
Preemptible bool `mapstructure:"preemptible"`
SourceImage string `mapstructure:"source_image"` SourceImage string `mapstructure:"source_image"`
SourceImageProjectId string `mapstructure:"source_image_project_id"` SourceImageProjectId string `mapstructure:"source_image_project_id"`
RawStateTimeout string `mapstructure:"state_timeout"` RawStateTimeout string `mapstructure:"state_timeout"`

View File

@ -93,6 +93,21 @@ func TestConfigPrepare(t *testing.T) {
"SO VERY BAD", "SO VERY BAD",
true, true,
}, },
{
"preemptible",
nil,
false,
},
{
"preemptible",
false,
false,
},
{
"preemptible",
"SO VERY BAD",
true,
},
} }
for _, tc := range cases { for _, tc := range cases {

View File

@ -47,6 +47,7 @@ type InstanceConfig struct {
Metadata map[string]string Metadata map[string]string
Name string Name string
Network string Network string
Preemptible bool
Tags []string Tags []string
Zone string Zone string
} }

View File

@ -255,6 +255,9 @@ func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) {
Network: network.SelfLink, Network: network.SelfLink,
}, },
}, },
Scheduling: &compute.Scheduling{
Preemptible: c.Preemptible,
},
ServiceAccounts: []*compute.ServiceAccount{ ServiceAccounts: []*compute.ServiceAccount{
&compute.ServiceAccount{ &compute.ServiceAccount{
Email: "default", Email: "default",

View File

@ -59,6 +59,7 @@ func (s *StepCreateInstance) Run(state multistep.StateBag) multistep.StepAction
Metadata: config.getInstanceMetadata(sshPublicKey), Metadata: config.getInstanceMetadata(sshPublicKey),
Name: name, Name: name,
Network: config.Network, Network: config.Network,
Preemptible: config.Preemptible,
Tags: config.Tags, Tags: config.Tags,
Zone: config.Zone, Zone: config.Zone,
}) })

View File

@ -88,6 +88,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
AvailabilityZone: b.config.AvailabilityZone, AvailabilityZone: b.config.AvailabilityZone,
UserData: b.config.UserData, UserData: b.config.UserData,
UserDataFile: b.config.UserDataFile, UserDataFile: b.config.UserDataFile,
ConfigDrive: b.config.ConfigDrive,
}, },
&StepWaitForRackConnect{ &StepWaitForRackConnect{
Wait: b.config.RackconnectWait, Wait: b.config.RackconnectWait,

View File

@ -25,6 +25,8 @@ type RunConfig struct {
UserData string `mapstructure:"user_data"` UserData string `mapstructure:"user_data"`
UserDataFile string `mapstructure:"user_data_file"` UserDataFile string `mapstructure:"user_data_file"`
ConfigDrive bool `mapstructure:"config_drive"`
// Not really used, but here for BC // Not really used, but here for BC
OpenstackProvider string `mapstructure:"openstack_provider"` OpenstackProvider string `mapstructure:"openstack_provider"`
UseFloatingIp bool `mapstructure:"use_floating_ip"` UseFloatingIp bool `mapstructure:"use_floating_ip"`

View File

@ -45,7 +45,7 @@ func (s *stepCreateImage) Run(state multistep.StateBag) multistep.StepAction {
state.Put("image", imageId) state.Put("image", imageId)
// Wait for the image to become ready // Wait for the image to become ready
ui.Say("Waiting for image to become ready...") ui.Say(fmt.Sprintf("Waiting for image %s (image id: %s) to become ready...", config.ImageName, imageId))
if err := WaitForImage(client, imageId); err != nil { if err := WaitForImage(client, imageId); err != nil {
err := fmt.Errorf("Error waiting for image: %s", err) err := fmt.Errorf("Error waiting for image: %s", err)
state.Put("error", err) state.Put("error", err)

View File

@ -47,8 +47,8 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
return multistep.ActionHalt return multistep.ActionHalt
} }
ui.Say("Creating temporary keypair for this instance...")
keyName := fmt.Sprintf("packer %s", uuid.TimeOrderedUUID()) keyName := fmt.Sprintf("packer %s", uuid.TimeOrderedUUID())
ui.Say(fmt.Sprintf("Creating temporary keypair: %s ...", keyName))
keypair, err := keypairs.Create(computeClient, keypairs.CreateOpts{ keypair, err := keypairs.Create(computeClient, keypairs.CreateOpts{
Name: keyName, Name: keyName,
}).Extract() }).Extract()
@ -62,6 +62,8 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
return multistep.ActionHalt return multistep.ActionHalt
} }
ui.Say(fmt.Sprintf("Created temporary keypair: %s", keyName))
// If we're in debug mode, output the private key to the working // If we're in debug mode, output the private key to the working
// directory. // directory.
if s.Debug { if s.Debug {
@ -120,7 +122,7 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) {
return return
} }
ui.Say("Deleting temporary keypair...") ui.Say(fmt.Sprintf("Deleting temporary keypair: %s ...", s.keyName))
err = keypairs.Delete(computeClient, s.keyName).ExtractErr() err = keypairs.Delete(computeClient, s.keyName).ExtractErr()
if err != nil { if err != nil {
ui.Error(fmt.Sprintf( ui.Error(fmt.Sprintf(

View File

@ -19,7 +19,7 @@ type StepRunSourceServer struct {
AvailabilityZone string AvailabilityZone string
UserData string UserData string
UserDataFile string UserDataFile string
ConfigDrive bool
server *servers.Server server *servers.Server
} }
@ -62,6 +62,7 @@ func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction
Networks: networks, Networks: networks,
AvailabilityZone: s.AvailabilityZone, AvailabilityZone: s.AvailabilityZone,
UserData: userData, UserData: userData,
ConfigDrive: s.ConfigDrive,
}, },
KeyName: keyName, KeyName: keyName,
@ -112,7 +113,7 @@ func (s *StepRunSourceServer) Cleanup(state multistep.StateBag) {
return return
} }
ui.Say("Terminating the source server...") ui.Say(fmt.Sprintf("Terminating the source server: %s ...", s.server.ID))
if err := servers.Delete(computeClient, s.server.ID).ExtractErr(); err != nil { if err := servers.Delete(computeClient, s.server.ID).ExtractErr(); err != nil {
ui.Error(fmt.Sprintf("Error terminating server, may still be around: %s", err)) ui.Error(fmt.Sprintf("Error terminating server, may still be around: %s", err))
return return

View File

@ -31,14 +31,14 @@ func (s *StepStopServer) Run(state multistep.StateBag) multistep.StepAction {
return multistep.ActionHalt return multistep.ActionHalt
} }
ui.Say("Stopping server...") ui.Say(fmt.Sprintf("Stopping server: %s ...", server.ID))
if err := startstop.Stop(client, server.ID).ExtractErr(); err != nil { if err := startstop.Stop(client, server.ID).ExtractErr(); err != nil {
err = fmt.Errorf("Error stopping server: %s", err) err = fmt.Errorf("Error stopping server: %s", err)
state.Put("error", err) state.Put("error", err)
return multistep.ActionHalt return multistep.ActionHalt
} }
ui.Message("Waiting for server to stop...") ui.Message(fmt.Sprintf("Waiting for server to stop: %s ...", server.ID))
stateChange := StateChangeConf{ stateChange := StateChangeConf{
Pending: []string{"ACTIVE"}, Pending: []string{"ACTIVE"},
Target: []string{"SHUTOFF", "STOPPED"}, Target: []string{"SHUTOFF", "STOPPED"},

View File

@ -58,6 +58,9 @@ type Config struct {
// This can be set high to allow for reboots. // This can be set high to allow for reboots.
RawStartRetryTimeout string `mapstructure:"start_retry_timeout"` RawStartRetryTimeout string `mapstructure:"start_retry_timeout"`
// Whether to clean scripts up
SkipClean bool `mapstructure:"skip_clean"`
startRetryTimeout time.Duration startRetryTimeout time.Duration
ctx interpolate.Context ctx interpolate.Context
} }
@ -271,6 +274,8 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
return fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) return fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus)
} }
if !p.config.SkipClean {
// Delete the temporary file we created. We retry this a few times // Delete the temporary file we created. We retry this a few times
// since if the above rebooted we have to wait until the reboot // since if the above rebooted we have to wait until the reboot
// completes. // completes.
@ -296,6 +301,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
p.config.RemotePath) p.config.RemotePath)
} }
} }
}
return nil return nil
} }

View File

@ -69,7 +69,7 @@ builder.
device mappings to the AMI. The block device mappings allow for keys: device mappings to the AMI. The block device mappings allow for keys:
- `device_name` (string) - The device name exposed to the instance (for - `device_name` (string) - The device name exposed to the instance (for
example, "/dev/sdh" or "xvdh") example, "/dev/sdh" or "xvdh"). Required when specifying `volume_size`.
- `virtual_name` (string) - The virtual device name. See the documentation on - `virtual_name` (string) - The virtual device name. See the documentation on
[Block Device [Block Device
Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html)

View File

@ -90,7 +90,7 @@ builder.
device mappings to the AMI. The block device mappings allow for keys: device mappings to the AMI. The block device mappings allow for keys:
- `device_name` (string) - The device name exposed to the instance (for - `device_name` (string) - The device name exposed to the instance (for
example, "/dev/sdh" or "xvdh") example, "/dev/sdh" or "xvdh"). Required when specifying `volume_size`.
- `virtual_name` (string) - The virtual device name. See the documentation on - `virtual_name` (string) - The virtual device name. See the documentation on
[Block Device [Block Device
Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html)

View File

@ -144,6 +144,8 @@ builder.
- `use_internal_ip` (boolean) - If true, use the instance's internal IP - `use_internal_ip` (boolean) - If true, use the instance's internal IP
instead of its external IP during building. instead of its external IP during building.
- `preemptible` (boolean) - If true, launch a preembtible instance.
## Gotchas ## Gotchas
Centos images have root ssh access disabled by default. Set `ssh_username` to Centos images have root ssh access disabled by default. Set `ssh_username` to

View File

@ -103,6 +103,9 @@ builder.
- `metadata` (object of key/value strings) - Glance metadata that will be - `metadata` (object of key/value strings) - Glance metadata that will be
applied to the image. applied to the image.
- `config_drive` (boolean) - Whether or not nova should use ConfigDrive for
cloud-init metadata.
## Basic Example: Rackspace public cloud ## Basic Example: Rackspace public cloud
Here is a basic example. This is a working example to build a Ubuntu 12.04 LTS Here is a basic example. This is a working example to build a Ubuntu 12.04 LTS

View File

@ -12,7 +12,7 @@ page_title: 'Atlas Post-Processor'
Type: `atlas` Type: `atlas`
The Atlas post-processor uploads artifacts from your packer builds to Atlas for The Atlas post-processor uploads artifacts from your packer builds to Atlas for
hosting. Artifacts hosted in Atlas are are automatically made available for use hosting. Artifacts hosted in Atlas are automatically made available for use
with Vagrant and Terraform, and Atlas provides additional features for managing with Vagrant and Terraform, and Atlas provides additional features for managing
versions and releases. [Learn more about packer in versions and releases. [Learn more about packer in
Atlas.](https://atlas.hashicorp.com/help/packer/features) Atlas.](https://atlas.hashicorp.com/help/packer/features)

View File

@ -29,6 +29,11 @@ This post-processor has only optional configuration:
- `login_server` (string) - The server address to login to. - `login_server` (string) - The server address to login to.
Note: When using _Docker Hub_ or _Quay_ registry servers, `login` must to be
set to `true` and `login_email`, `login_username`, **and** `login_password`
must to be set to your registry credentials. When using Docker Hub,
`login_server` can be omitted.
-&gt; **Note:** If you login using the credentials above, the post-processor -&gt; **Note:** If you login using the credentials above, the post-processor
will automatically log you out afterwards (just the server specified). will automatically log you out afterwards (just the server specified).

View File

@ -18,6 +18,12 @@ uploaded from your local machine to the remote machine. Ansible is run in [local
mode](http://docs.ansible.com/playbooks_delegation.html#local-playbooks) via the mode](http://docs.ansible.com/playbooks_delegation.html#local-playbooks) via the
`ansible-playbook` command. `ansible-playbook` command.
-&gt; **Note:** Ansible will *not* be installed automatically by this
provisioner. This provisioner expects that Ansible is already installed on the
machine. It is common practice to use the [shell
provisioner](/docs/provisioners/shell.html) before the Ansible provisioner to do
this.
## Basic Example ## Basic Example
The example below is fully functional. The example below is fully functional.
@ -67,6 +73,7 @@ specified host you're buiding. The `--limit` argument can be provided in the
`extra_arguments` option. `extra_arguments` option.
An example inventory file may look like: An example inventory file may look like:
```{.text} ```{.text}
[chi-dbservers] [chi-dbservers]
db-01 ansible_connection=local db-01 ansible_connection=local

View File

@ -44,7 +44,7 @@ The available configuration options are listed below. All elements are required.
- `direction` (string) - The direction of the file transfer. This defaults to - `direction` (string) - The direction of the file transfer. This defaults to
"upload." If it is set to "download" then the file "source" in the machine "upload." If it is set to "download" then the file "source" in the machine
wll be downloaded locally to "destination" will be downloaded locally to "destination"
## Directory Uploads ## Directory Uploads

View File

@ -78,9 +78,10 @@ Optional parameters:
**Important:** If you customize this, be sure to include something like the **Important:** If you customize this, be sure to include something like the
`-e` flag, otherwise individual steps failing won't fail the provisioner. `-e` flag, otherwise individual steps failing won't fail the provisioner.
- `remote_path` (string) - The path where the script will be uploaded to in - `remote_path` (string) - The filename where the script will be uploaded
the machine. This defaults to "/tmp/script.sh". This value must be a to in the machine. This defaults to "/tmp/script_nnn.sh" where "nnn" is
writable location and any parent directories must already exist. a randomly generated number. This value must be a writable location and
any parent directories must already exist.
- `start_retry_timeout` (string) - The amount of time to attempt to *start* - `start_retry_timeout` (string) - The amount of time to attempt to *start*
the remote process. By default this is "5m" or 5 minutes. This setting the remote process. By default this is "5m" or 5 minutes. This setting
@ -88,6 +89,10 @@ Optional parameters:
system reboot. Set this to a higher value if reboots take a longer amount system reboot. Set this to a higher value if reboots take a longer amount
of time. of time.
- `skip_clean` (boolean) - If true, specifies that the helper scripts
uploaded to the system will not be removed by Packer. This defaults to
false (clean scripts from the system).
## Execute Command Example ## Execute Command Example
To many new users, the `execute_command` is puzzling. However, it provides an To many new users, the `execute_command` is puzzling. However, it provides an

View File

@ -82,7 +82,7 @@ sequence definition. Sequence definitions are used to chain together multiple
post-processors. An example is shown below, where the artifact of a build is post-processors. An example is shown below, where the artifact of a build is
compressed then uploaded, but the compressed result is not kept. compressed then uploaded, but the compressed result is not kept.
It is very important that any post processors that need to be ran in order, be sequenced! It is very important that any post processors that need to be run in order, be sequenced!
``` {.javascript} ``` {.javascript}
{ {