diff --git a/.travis.yml b/.travis.yml index 497bbe1ba..5880a73a4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,4 +18,3 @@ matrix: fast_finish: true allow_failures: - go: tip - - go: 1.5 diff --git a/CHANGELOG.md b/CHANGELOG.md index 6399411ce..990efe9f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ IMPROVEMENTS: * builder/amazon: Add support for `ebs_optimized` [GH-2806] * builder/amazon: You can now specify `0` for `spot_price` to switch to on demand instances [GH-2845] * builder/google: `account_file` can now be provided as a JSON string [GH-2811] + * builder/google: added support for `preemptible` instances [GH-2982] * builder/parallels: Improve support for Parallels 11 [GH-2662] * builder/parallels: Parallels disks are now compacted by default [GH-2731] * builder/parallels: Packer will look for Parallels in `/Applications/Parallels Desktop.app` if it is not detected automatically [GH-2839] @@ -30,6 +31,7 @@ IMPROVEMENTS: * builder/qemu: qcow2 images can now be compressed [GH-2748] * builder/qemu: Now specifies `virtio-scsi` by default [GH-2422] * builder/qemu: Now checks for version-specific options [GH-2376] + * builder/docker-import: Can now import Artifice artifacts [GH-2718] * provisioner/puppet: Now accepts the `extra_arguments` parameter [GH-2635] * post-processor/atlas: Added support for compile ID. [GH-2775] diff --git a/README.md b/README.md index fd562eb9a..fa23df693 100644 --- a/README.md +++ b/README.md @@ -42,15 +42,19 @@ for your operating system or [compile Packer yourself](#developing-packer). After Packer is installed, create your first template, which tells Packer what platforms to build images for and how you want to build them. In our case, we'll create a simple AMI that has Redis pre-installed. Save this -file as `quick-start.json`. Be sure to replace any credentials with your -own. +file as `quick-start.json`. Export your AWS credentials as the +`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables. ```json { + "variables": { + "access_key": "{{env `AWS_ACCESS_KEY_ID`}}", + "secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}" + }, "builders": [{ "type": "amazon-ebs", - "access_key": "YOUR KEY HERE", - "secret_key": "YOUR SECRET KEY HERE", + "access_key": "{{user `access_key`}}", + "secret_key": "{{user `access_key`}}", "region": "us-east-1", "source_ami": "ami-de0d9eb7", "instance_type": "t1.micro", diff --git a/builder/digitalocean/artifact.go b/builder/digitalocean/artifact.go index 6abb561c9..8f6e81161 100644 --- a/builder/digitalocean/artifact.go +++ b/builder/digitalocean/artifact.go @@ -36,7 +36,7 @@ func (a *Artifact) Id() string { } func (a *Artifact) String() string { - return fmt.Sprintf("A snapshot was created: '%v' in region '%v'", a.snapshotName, a.regionName) + return fmt.Sprintf("A snapshot was created: '%v' (ID: %v) in region '%v'", a.snapshotName, a.snapshotId, a.regionName) } func (a *Artifact) State(name string) interface{} { diff --git a/builder/digitalocean/artifact_test.go b/builder/digitalocean/artifact_test.go index 7ea586111..d8523154d 100644 --- a/builder/digitalocean/artifact_test.go +++ b/builder/digitalocean/artifact_test.go @@ -25,7 +25,7 @@ func TestArtifactId(t *testing.T) { func TestArtifactString(t *testing.T) { a := &Artifact{"packer-foobar", 42, "San Francisco", nil} - expected := "A snapshot was created: 'packer-foobar' in region 'San Francisco'" + expected := "A snapshot was created: 'packer-foobar' (ID: 42) in region 'San Francisco'" if a.String() != expected { t.Fatalf("artifact string should match: %v", expected) diff --git a/builder/googlecompute/config.go b/builder/googlecompute/config.go index 4ca45c69f..e79a92e64 100644 --- a/builder/googlecompute/config.go +++ b/builder/googlecompute/config.go @@ -31,6 +31,7 @@ type Config struct { MachineType string `mapstructure:"machine_type"` Metadata map[string]string `mapstructure:"metadata"` Network string `mapstructure:"network"` + Preemptible bool `mapstructure:"preemptible"` SourceImage string `mapstructure:"source_image"` SourceImageProjectId string `mapstructure:"source_image_project_id"` RawStateTimeout string `mapstructure:"state_timeout"` diff --git a/builder/googlecompute/config_test.go b/builder/googlecompute/config_test.go index 581c1425b..372c85004 100644 --- a/builder/googlecompute/config_test.go +++ b/builder/googlecompute/config_test.go @@ -93,6 +93,21 @@ func TestConfigPrepare(t *testing.T) { "SO VERY BAD", true, }, + { + "preemptible", + nil, + false, + }, + { + "preemptible", + false, + false, + }, + { + "preemptible", + "SO VERY BAD", + true, + }, } for _, tc := range cases { diff --git a/builder/googlecompute/driver.go b/builder/googlecompute/driver.go index be697fe6b..6a9c3d651 100644 --- a/builder/googlecompute/driver.go +++ b/builder/googlecompute/driver.go @@ -47,6 +47,7 @@ type InstanceConfig struct { Metadata map[string]string Name string Network string + Preemptible bool Tags []string Zone string } diff --git a/builder/googlecompute/driver_gce.go b/builder/googlecompute/driver_gce.go index d7b745d58..b1e4d8d45 100644 --- a/builder/googlecompute/driver_gce.go +++ b/builder/googlecompute/driver_gce.go @@ -255,6 +255,9 @@ func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) { Network: network.SelfLink, }, }, + Scheduling: &compute.Scheduling{ + Preemptible: c.Preemptible, + }, ServiceAccounts: []*compute.ServiceAccount{ &compute.ServiceAccount{ Email: "default", diff --git a/builder/googlecompute/step_create_instance.go b/builder/googlecompute/step_create_instance.go index 939925c58..891072438 100644 --- a/builder/googlecompute/step_create_instance.go +++ b/builder/googlecompute/step_create_instance.go @@ -59,6 +59,7 @@ func (s *StepCreateInstance) Run(state multistep.StateBag) multistep.StepAction Metadata: config.getInstanceMetadata(sshPublicKey), Name: name, Network: config.Network, + Preemptible: config.Preemptible, Tags: config.Tags, Zone: config.Zone, }) diff --git a/builder/openstack/builder.go b/builder/openstack/builder.go index 9f4c9e7bc..72610e73e 100644 --- a/builder/openstack/builder.go +++ b/builder/openstack/builder.go @@ -88,6 +88,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe AvailabilityZone: b.config.AvailabilityZone, UserData: b.config.UserData, UserDataFile: b.config.UserDataFile, + ConfigDrive: b.config.ConfigDrive, }, &StepWaitForRackConnect{ Wait: b.config.RackconnectWait, diff --git a/builder/openstack/run_config.go b/builder/openstack/run_config.go index a8b8638dc..536245323 100644 --- a/builder/openstack/run_config.go +++ b/builder/openstack/run_config.go @@ -25,6 +25,8 @@ type RunConfig struct { UserData string `mapstructure:"user_data"` UserDataFile string `mapstructure:"user_data_file"` + ConfigDrive bool `mapstructure:"config_drive"` + // Not really used, but here for BC OpenstackProvider string `mapstructure:"openstack_provider"` UseFloatingIp bool `mapstructure:"use_floating_ip"` diff --git a/builder/openstack/step_create_image.go b/builder/openstack/step_create_image.go index 9b6ac0cd8..61607b2f2 100644 --- a/builder/openstack/step_create_image.go +++ b/builder/openstack/step_create_image.go @@ -45,7 +45,7 @@ func (s *stepCreateImage) Run(state multistep.StateBag) multistep.StepAction { state.Put("image", imageId) // Wait for the image to become ready - ui.Say("Waiting for image to become ready...") + ui.Say(fmt.Sprintf("Waiting for image %s (image id: %s) to become ready...", config.ImageName, imageId)) if err := WaitForImage(client, imageId); err != nil { err := fmt.Errorf("Error waiting for image: %s", err) state.Put("error", err) diff --git a/builder/openstack/step_key_pair.go b/builder/openstack/step_key_pair.go index f17d76f35..32d7430ef 100644 --- a/builder/openstack/step_key_pair.go +++ b/builder/openstack/step_key_pair.go @@ -47,8 +47,8 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - ui.Say("Creating temporary keypair for this instance...") keyName := fmt.Sprintf("packer %s", uuid.TimeOrderedUUID()) + ui.Say(fmt.Sprintf("Creating temporary keypair: %s ...", keyName)) keypair, err := keypairs.Create(computeClient, keypairs.CreateOpts{ Name: keyName, }).Extract() @@ -62,6 +62,8 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } + ui.Say(fmt.Sprintf("Created temporary keypair: %s", keyName)) + // If we're in debug mode, output the private key to the working // directory. if s.Debug { @@ -120,7 +122,7 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) { return } - ui.Say("Deleting temporary keypair...") + ui.Say(fmt.Sprintf("Deleting temporary keypair: %s ...", s.keyName)) err = keypairs.Delete(computeClient, s.keyName).ExtractErr() if err != nil { ui.Error(fmt.Sprintf( diff --git a/builder/openstack/step_run_source_server.go b/builder/openstack/step_run_source_server.go index f78fe4f90..20145929d 100644 --- a/builder/openstack/step_run_source_server.go +++ b/builder/openstack/step_run_source_server.go @@ -19,8 +19,8 @@ type StepRunSourceServer struct { AvailabilityZone string UserData string UserDataFile string - - server *servers.Server + ConfigDrive bool + server *servers.Server } func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction { @@ -62,6 +62,7 @@ func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction Networks: networks, AvailabilityZone: s.AvailabilityZone, UserData: userData, + ConfigDrive: s.ConfigDrive, }, KeyName: keyName, @@ -112,7 +113,7 @@ func (s *StepRunSourceServer) Cleanup(state multistep.StateBag) { return } - ui.Say("Terminating the source server...") + ui.Say(fmt.Sprintf("Terminating the source server: %s ...", s.server.ID)) if err := servers.Delete(computeClient, s.server.ID).ExtractErr(); err != nil { ui.Error(fmt.Sprintf("Error terminating server, may still be around: %s", err)) return diff --git a/builder/openstack/step_stop_server.go b/builder/openstack/step_stop_server.go index 298d0bc0a..d04a10f60 100644 --- a/builder/openstack/step_stop_server.go +++ b/builder/openstack/step_stop_server.go @@ -31,14 +31,14 @@ func (s *StepStopServer) Run(state multistep.StateBag) multistep.StepAction { return multistep.ActionHalt } - ui.Say("Stopping server...") + ui.Say(fmt.Sprintf("Stopping server: %s ...", server.ID)) if err := startstop.Stop(client, server.ID).ExtractErr(); err != nil { err = fmt.Errorf("Error stopping server: %s", err) state.Put("error", err) return multistep.ActionHalt } - ui.Message("Waiting for server to stop...") + ui.Message(fmt.Sprintf("Waiting for server to stop: %s ...", server.ID)) stateChange := StateChangeConf{ Pending: []string{"ACTIVE"}, Target: []string{"SHUTOFF", "STOPPED"}, diff --git a/provisioner/shell/provisioner.go b/provisioner/shell/provisioner.go index 3c32b3fc2..78826b638 100644 --- a/provisioner/shell/provisioner.go +++ b/provisioner/shell/provisioner.go @@ -58,6 +58,9 @@ type Config struct { // This can be set high to allow for reboots. RawStartRetryTimeout string `mapstructure:"start_retry_timeout"` + // Whether to clean scripts up + SkipClean bool `mapstructure:"skip_clean"` + startRetryTimeout time.Duration ctx interpolate.Context } @@ -271,29 +274,32 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) } - // Delete the temporary file we created. We retry this a few times - // since if the above rebooted we have to wait until the reboot - // completes. - err = p.retryable(func() error { - cmd = &packer.RemoteCmd{ - Command: fmt.Sprintf("rm -f %s", p.config.RemotePath), - } - if err := comm.Start(cmd); err != nil { - return fmt.Errorf( - "Error removing temporary script at %s: %s", - p.config.RemotePath, err) - } - cmd.Wait() - return nil - }) - if err != nil { - return err - } + if !p.config.SkipClean { - if cmd.ExitStatus != 0 { - return fmt.Errorf( - "Error removing temporary script at %s!", - p.config.RemotePath) + // Delete the temporary file we created. We retry this a few times + // since if the above rebooted we have to wait until the reboot + // completes. + err = p.retryable(func() error { + cmd = &packer.RemoteCmd{ + Command: fmt.Sprintf("rm -f %s", p.config.RemotePath), + } + if err := comm.Start(cmd); err != nil { + return fmt.Errorf( + "Error removing temporary script at %s: %s", + p.config.RemotePath, err) + } + cmd.Wait() + return nil + }) + if err != nil { + return err + } + + if cmd.ExitStatus != 0 { + return fmt.Errorf( + "Error removing temporary script at %s!", + p.config.RemotePath) + } } } diff --git a/website/source/docs/builders/amazon-ebs.html.markdown b/website/source/docs/builders/amazon-ebs.html.markdown index 860040ee7..6ee9adf88 100644 --- a/website/source/docs/builders/amazon-ebs.html.markdown +++ b/website/source/docs/builders/amazon-ebs.html.markdown @@ -69,7 +69,7 @@ builder. device mappings to the AMI. The block device mappings allow for keys: - `device_name` (string) - The device name exposed to the instance (for - example, "/dev/sdh" or "xvdh") + example, "/dev/sdh" or "xvdh"). Required when specifying `volume_size`. - `virtual_name` (string) - The virtual device name. See the documentation on [Block Device Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) diff --git a/website/source/docs/builders/amazon-instance.html.markdown b/website/source/docs/builders/amazon-instance.html.markdown index 989052b1a..a1530996a 100644 --- a/website/source/docs/builders/amazon-instance.html.markdown +++ b/website/source/docs/builders/amazon-instance.html.markdown @@ -90,7 +90,7 @@ builder. device mappings to the AMI. The block device mappings allow for keys: - `device_name` (string) - The device name exposed to the instance (for - example, "/dev/sdh" or "xvdh") + example, "/dev/sdh" or "xvdh"). Required when specifying `volume_size`. - `virtual_name` (string) - The virtual device name. See the documentation on [Block Device Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) diff --git a/website/source/docs/builders/googlecompute.html.markdown b/website/source/docs/builders/googlecompute.html.markdown index c97cd672a..fe45ee01b 100644 --- a/website/source/docs/builders/googlecompute.html.markdown +++ b/website/source/docs/builders/googlecompute.html.markdown @@ -144,6 +144,8 @@ builder. - `use_internal_ip` (boolean) - If true, use the instance's internal IP instead of its external IP during building. +- `preemptible` (boolean) - If true, launch a preembtible instance. + ## Gotchas Centos images have root ssh access disabled by default. Set `ssh_username` to diff --git a/website/source/docs/builders/openstack.html.markdown b/website/source/docs/builders/openstack.html.markdown index 12a1ca882..cb54b52d2 100644 --- a/website/source/docs/builders/openstack.html.markdown +++ b/website/source/docs/builders/openstack.html.markdown @@ -103,6 +103,9 @@ builder. - `metadata` (object of key/value strings) - Glance metadata that will be applied to the image. +- `config_drive` (boolean) - Whether or not nova should use ConfigDrive for + cloud-init metadata. + ## Basic Example: Rackspace public cloud Here is a basic example. This is a working example to build a Ubuntu 12.04 LTS diff --git a/website/source/docs/post-processors/atlas.html.markdown b/website/source/docs/post-processors/atlas.html.markdown index 705623131..7940b42bc 100644 --- a/website/source/docs/post-processors/atlas.html.markdown +++ b/website/source/docs/post-processors/atlas.html.markdown @@ -12,7 +12,7 @@ page_title: 'Atlas Post-Processor' Type: `atlas` The Atlas post-processor uploads artifacts from your packer builds to Atlas for -hosting. Artifacts hosted in Atlas are are automatically made available for use +hosting. Artifacts hosted in Atlas are automatically made available for use with Vagrant and Terraform, and Atlas provides additional features for managing versions and releases. [Learn more about packer in Atlas.](https://atlas.hashicorp.com/help/packer/features) diff --git a/website/source/docs/post-processors/docker-push.html.markdown b/website/source/docs/post-processors/docker-push.html.markdown index 9657e27b7..3a5247320 100644 --- a/website/source/docs/post-processors/docker-push.html.markdown +++ b/website/source/docs/post-processors/docker-push.html.markdown @@ -29,6 +29,11 @@ This post-processor has only optional configuration: - `login_server` (string) - The server address to login to. +Note: When using _Docker Hub_ or _Quay_ registry servers, `login` must to be +set to `true` and `login_email`, `login_username`, **and** `login_password` +must to be set to your registry credentials. When using Docker Hub, +`login_server` can be omitted. + -> **Note:** If you login using the credentials above, the post-processor will automatically log you out afterwards (just the server specified). diff --git a/website/source/docs/provisioners/ansible-local.html.markdown b/website/source/docs/provisioners/ansible-local.html.markdown index c0cec6641..d7ed58846 100644 --- a/website/source/docs/provisioners/ansible-local.html.markdown +++ b/website/source/docs/provisioners/ansible-local.html.markdown @@ -18,6 +18,12 @@ uploaded from your local machine to the remote machine. Ansible is run in [local mode](http://docs.ansible.com/playbooks_delegation.html#local-playbooks) via the `ansible-playbook` command. +-> **Note:** Ansible will *not* be installed automatically by this +provisioner. This provisioner expects that Ansible is already installed on the +machine. It is common practice to use the [shell +provisioner](/docs/provisioners/shell.html) before the Ansible provisioner to do +this. + ## Basic Example The example below is fully functional. @@ -67,6 +73,7 @@ specified host you're buiding. The `--limit` argument can be provided in the `extra_arguments` option. An example inventory file may look like: + ```{.text} [chi-dbservers] db-01 ansible_connection=local diff --git a/website/source/docs/provisioners/file.html.markdown b/website/source/docs/provisioners/file.html.markdown index 7799721a5..bb66fc220 100644 --- a/website/source/docs/provisioners/file.html.markdown +++ b/website/source/docs/provisioners/file.html.markdown @@ -44,7 +44,7 @@ The available configuration options are listed below. All elements are required. - `direction` (string) - The direction of the file transfer. This defaults to "upload." If it is set to "download" then the file "source" in the machine - wll be downloaded locally to "destination" + will be downloaded locally to "destination" ## Directory Uploads diff --git a/website/source/docs/provisioners/shell.html.markdown b/website/source/docs/provisioners/shell.html.markdown index 9cd05ef12..710c201ca 100644 --- a/website/source/docs/provisioners/shell.html.markdown +++ b/website/source/docs/provisioners/shell.html.markdown @@ -78,9 +78,10 @@ Optional parameters: **Important:** If you customize this, be sure to include something like the `-e` flag, otherwise individual steps failing won't fail the provisioner. -- `remote_path` (string) - The path where the script will be uploaded to in - the machine. This defaults to "/tmp/script.sh". This value must be a - writable location and any parent directories must already exist. +- `remote_path` (string) - The filename where the script will be uploaded + to in the machine. This defaults to "/tmp/script_nnn.sh" where "nnn" is + a randomly generated number. This value must be a writable location and + any parent directories must already exist. - `start_retry_timeout` (string) - The amount of time to attempt to *start* the remote process. By default this is "5m" or 5 minutes. This setting @@ -88,6 +89,10 @@ Optional parameters: system reboot. Set this to a higher value if reboots take a longer amount of time. +- `skip_clean` (boolean) - If true, specifies that the helper scripts + uploaded to the system will not be removed by Packer. This defaults to + false (clean scripts from the system). + ## Execute Command Example To many new users, the `execute_command` is puzzling. However, it provides an diff --git a/website/source/docs/templates/post-processors.html.markdown b/website/source/docs/templates/post-processors.html.markdown index c6926b5f1..7065b4dae 100644 --- a/website/source/docs/templates/post-processors.html.markdown +++ b/website/source/docs/templates/post-processors.html.markdown @@ -82,7 +82,7 @@ sequence definition. Sequence definitions are used to chain together multiple post-processors. An example is shown below, where the artifact of a build is compressed then uploaded, but the compressed result is not kept. -It is very important that any post processors that need to be ran in order, be sequenced! +It is very important that any post processors that need to be run in order, be sequenced! ``` {.javascript} {