Merge pull request #1 from mitchellh/master
Updating from official repo
This commit is contained in:
commit
101f68f003
16
.travis.yml
16
.travis.yml
|
@ -1,15 +1,31 @@
|
|||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- tip
|
||||
|
||||
install: make updatedeps
|
||||
|
||||
script:
|
||||
- GOMAXPROCS=2 make test
|
||||
#- go test -race ./...
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
notifications:
|
||||
irc:
|
||||
channels:
|
||||
- "irc.freenode.org#packer-tool"
|
||||
skip_join: true
|
||||
use_notice: true
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
|
47
CHANGELOG.md
47
CHANGELOG.md
|
@ -1,6 +1,53 @@
|
|||
## 0.8.0 (unreleased)
|
||||
|
||||
FEATURES:
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* builder/docker: Fixed hang on prompt while copying script
|
||||
* builder/virtualbox: Added SCSI support
|
||||
* postprocessor/vagrant-cloud: Fixed failing on response
|
||||
* provisioner/puppet-masterless: Allow manifest_file to be a directory
|
||||
* provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call
|
||||
|
||||
## 0.7.5 (December 9, 2014)
|
||||
|
||||
FEATURES:
|
||||
|
||||
* **New command: `packer push`**: Push template and files to HashiCorp's
|
||||
Atlas for building your templates automatically.
|
||||
* **New post-processor: `atlas`**: Send artifact to HashiCorp's Atlas for
|
||||
versioning and storing artifacts. These artifacts can then be queried
|
||||
using the API, Terraform, etc.
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* builder/googlecompute: Support for ubuntu-os-cloud project
|
||||
* builder/googlecompute: Support for OAuth2 to avoid client secrets file
|
||||
* builder/googlecompute: GCE image from persistant disk instead of tarball
|
||||
* builder/qemu: Checksum type "none" can be used
|
||||
* provisioner/chef: Generate a node name if none available
|
||||
* provisioner/chef: Added ssl_verify_mode configuration
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* builder/parallels: Fixed attachment of ISO to cdrom device
|
||||
* builder/parallels: Fixed boot load ordering
|
||||
* builder/digitalocean: Fixed decoding of size
|
||||
* builder/digitalocean: Fixed missing content-type header in request
|
||||
* builder/digitalocean: Fixed use of private IP
|
||||
* builder/digitalocean: Fixed the artifact ID generation
|
||||
* builder/vsphere: Fixed credential escaping
|
||||
* builder/qemu: Fixed use of CDROM with disk_image
|
||||
* builder/aws: Fixed IP address for SSH in VPC
|
||||
* builder/aws: Fixed issue with multiple block devices
|
||||
* builder/vmware: Upload VMX to ESX5 after editing
|
||||
* communicator/docker: Fix handling of symlinks during upload
|
||||
* provisioner/chef: Fixed use of sudo in some cases
|
||||
* core: Fixed build name interpolation
|
||||
* postprocessor/vagrant: Fixed check for Vagrantfile template
|
||||
|
||||
## 0.7.2 (October 28, 2014)
|
||||
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
# Packer
|
||||
|
||||
[![Build Status](https://travis-ci.org/mitchellh/packer.svg?branch=master)](https://travis-ci.org/mitchellh/packer)
|
||||
|
||||
* Website: http://www.packer.io
|
||||
* IRC: `#packer-tool` on Freenode
|
||||
* Mailing list: [Google Groups](http://groups.google.com/group/packer-tool)
|
||||
|
|
|
@ -2,12 +2,13 @@ package common
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/aws"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/goamz/aws"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
// Artifact is an artifact implementation that contains built AMIs.
|
||||
|
@ -53,7 +54,12 @@ func (a *Artifact) String() string {
|
|||
}
|
||||
|
||||
func (a *Artifact) State(name string) interface{} {
|
||||
switch name {
|
||||
case "atlas.artifact.metadata":
|
||||
return a.stateAtlasMetadata()
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Artifact) Destroy() error {
|
||||
|
@ -79,3 +85,13 @@ func (a *Artifact) Destroy() error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) stateAtlasMetadata() interface{} {
|
||||
metadata := make(map[string]string)
|
||||
for region, imageId := range a.Amis {
|
||||
k := fmt.Sprintf("region.%s", region)
|
||||
metadata[k] = imageId
|
||||
}
|
||||
|
||||
return metadata
|
||||
}
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
func TestArtifact_Impl(t *testing.T) {
|
||||
|
@ -26,6 +28,24 @@ func TestArtifactId(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestArtifactState_atlasMetadata(t *testing.T) {
|
||||
a := &Artifact{
|
||||
Amis: map[string]string{
|
||||
"east": "foo",
|
||||
"west": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
actual := a.State("atlas.artifact.metadata")
|
||||
expected := map[string]string{
|
||||
"region.east": "foo",
|
||||
"region.west": "bar",
|
||||
}
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArtifactString(t *testing.T) {
|
||||
expected := `AMIs were created:
|
||||
|
||||
|
|
|
@ -60,12 +60,12 @@ func (b *BlockDevices) Prepare(t *packer.ConfigTemplate) []error {
|
|||
|
||||
var errs []error
|
||||
for outer, bds := range lists {
|
||||
for i, bd := range bds {
|
||||
for i := 0; i < len(bds); i++ {
|
||||
templates := map[string]*string{
|
||||
"device_name": &bd.DeviceName,
|
||||
"snapshot_id": &bd.SnapshotId,
|
||||
"virtual_name": &bd.VirtualName,
|
||||
"volume_type": &bd.VolumeType,
|
||||
"device_name": &bds[i].DeviceName,
|
||||
"snapshot_id": &bds[i].SnapshotId,
|
||||
"virtual_name": &bds[i].VirtualName,
|
||||
"volume_type": &bds[i].VolumeType,
|
||||
}
|
||||
|
||||
errs := make([]error, 0)
|
||||
|
|
|
@ -16,14 +16,14 @@ func SSHAddress(e *ec2.EC2, port int, private bool) func(multistep.StateBag) (st
|
|||
for j := 0; j < 2; j++ {
|
||||
var host string
|
||||
i := state.Get("instance").(*ec2.Instance)
|
||||
if i.DNSName != "" {
|
||||
host = i.DNSName
|
||||
} else if i.VpcId != "" {
|
||||
if i.VpcId != "" {
|
||||
if i.PublicIpAddress != "" && !private {
|
||||
host = i.PublicIpAddress
|
||||
} else {
|
||||
host = i.PrivateIpAddress
|
||||
}
|
||||
} else if i.DNSName != "" {
|
||||
host = i.DNSName
|
||||
}
|
||||
|
||||
if host != "" {
|
||||
|
|
|
@ -3,6 +3,7 @@ package digitalocean
|
|||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Artifact struct {
|
||||
|
@ -29,8 +30,7 @@ func (*Artifact) Files() []string {
|
|||
}
|
||||
|
||||
func (a *Artifact) Id() string {
|
||||
// mimicing the aws builder
|
||||
return fmt.Sprintf("%s:%s", a.regionName, a.snapshotName)
|
||||
return strconv.FormatUint(uint64(a.snapshotId), 10)
|
||||
}
|
||||
|
||||
func (a *Artifact) String() string {
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
package digitalocean
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
func TestArtifact_Impl(t *testing.T) {
|
||||
|
@ -13,6 +14,15 @@ func TestArtifact_Impl(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestArtifactId(t *testing.T) {
|
||||
a := &Artifact{"packer-foobar", 42, "San Francisco", nil}
|
||||
expected := "42"
|
||||
|
||||
if a.Id() != expected {
|
||||
t.Fatalf("artifact ID should match: %v", expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArtifactString(t *testing.T) {
|
||||
a := &Artifact{"packer-foobar", 42, "San Francisco", nil}
|
||||
expected := "A snapshot was created: 'packer-foobar' in region 'San Francisco'"
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
const DefaultImage = "ubuntu-12-04-x64"
|
||||
|
||||
// see https://api.digitalocean.com/regions/?client_id=[client_id]&api_key=[api_key]
|
||||
// name="New York", id=1
|
||||
const DefaultRegion = "nyc1"
|
||||
// name="New York 3", id=8
|
||||
const DefaultRegion = "nyc3"
|
||||
|
||||
// see https://api.digitalocean.com/sizes/?client_id=[client_id]&api_key=[api_key]
|
||||
// name="512MB", id=66 (the smallest droplet size)
|
||||
|
|
|
@ -75,7 +75,7 @@ func (c *Communicator) Upload(dst string, src io.Reader, fi *os.FileInfo) error
|
|||
// Copy the file into place by copying the temporary file we put
|
||||
// into the shared folder into the proper location in the container
|
||||
cmd := &packer.RemoteCmd{
|
||||
Command: fmt.Sprintf("cp %s/%s %s", c.ContainerDir,
|
||||
Command: fmt.Sprintf("command cp %s/%s %s", c.ContainerDir,
|
||||
filepath.Base(tempfile.Name()), dst),
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error
|
|||
|
||||
// Make the directory, then copy into it
|
||||
cmd := &packer.RemoteCmd{
|
||||
Command: fmt.Sprintf("set -e; mkdir -p %s; cp -R %s/* %s",
|
||||
Command: fmt.Sprintf("set -e; mkdir -p %s; command cp -R %s/* %s",
|
||||
containerDst, containerSrc, containerDst),
|
||||
}
|
||||
if err := c.Start(cmd); err != nil {
|
||||
|
|
|
@ -49,6 +49,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
|
||||
// Build the steps.
|
||||
steps := []multistep.Step{
|
||||
new(StepCheckExistingImage),
|
||||
&StepCreateSSHKey{
|
||||
Debug: b.config.PackerDebug,
|
||||
DebugKeyPath: fmt.Sprintf("gce_%s.pem", b.config.PackerBuildName),
|
||||
|
|
|
@ -19,7 +19,6 @@ type Config struct {
|
|||
AccountFile string `mapstructure:"account_file"`
|
||||
ProjectId string `mapstructure:"project_id"`
|
||||
|
||||
BucketName string `mapstructure:"bucket_name"`
|
||||
DiskName string `mapstructure:"disk_name"`
|
||||
DiskSizeGb int64 `mapstructure:"disk_size"`
|
||||
ImageName string `mapstructure:"image_name"`
|
||||
|
@ -109,7 +108,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
|||
templates := map[string]*string{
|
||||
"account_file": &c.AccountFile,
|
||||
|
||||
"bucket_name": &c.BucketName,
|
||||
"disk_name": &c.DiskName,
|
||||
"image_name": &c.ImageName,
|
||||
"image_description": &c.ImageDescription,
|
||||
|
@ -135,11 +133,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
|||
}
|
||||
|
||||
// Process required parameters.
|
||||
if c.BucketName == "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("a bucket_name must be specified"))
|
||||
}
|
||||
|
||||
if c.ProjectId == "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("a project_id must be specified"))
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
func testConfig(t *testing.T) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"account_file": testAccountFile(t),
|
||||
"bucket_name": "foo",
|
||||
"project_id": "hashicorp",
|
||||
"source_image": "foo",
|
||||
"zone": "us-east-1a",
|
||||
|
@ -57,17 +56,6 @@ func TestConfigPrepare(t *testing.T) {
|
|||
true,
|
||||
},
|
||||
|
||||
{
|
||||
"bucket_name",
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"bucket_name",
|
||||
"good",
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
"private_key_file",
|
||||
"/tmp/i/should/not/exist",
|
||||
|
|
|
@ -4,6 +4,10 @@ package googlecompute
|
|||
// with GCE. The Driver interface exists mostly to allow a mock implementation
|
||||
// to be used to test the steps.
|
||||
type Driver interface {
|
||||
// ImageExists returns true if the specified image exists. If an error
|
||||
// occurs calling the API, this method returns false.
|
||||
ImageExists(name string) bool
|
||||
|
||||
// CreateImage creates an image from the given disk in Google Compute
|
||||
// Engine.
|
||||
CreateImage(name, description, zone, disk string) <-chan error
|
||||
|
|
|
@ -7,9 +7,13 @@ import (
|
|||
"time"
|
||||
|
||||
"code.google.com/p/google-api-go-client/compute/v1"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
|
||||
// oauth2 "github.com/rasa/oauth2-fork-b3f9a68"
|
||||
"github.com/rasa/oauth2-fork-b3f9a68"
|
||||
|
||||
// oauth2 "github.com/rasa/oauth2-fork-b3f9a68/google"
|
||||
"github.com/rasa/oauth2-fork-b3f9a68/google"
|
||||
)
|
||||
|
||||
// driverGCE is a Driver implementation that actually talks to GCE.
|
||||
|
@ -60,6 +64,13 @@ func NewDriverGCE(ui packer.Ui, p string, a *accountFile) (Driver, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (d *driverGCE) ImageExists(name string) bool {
|
||||
_, err := d.service.Images.Get(d.projectId, name).Do()
|
||||
// The API may return an error for reasons other than the image not
|
||||
// existing, but this heuristic is sufficient for now.
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (d *driverGCE) CreateImage(name, description, zone, disk string) <-chan error {
|
||||
image := &compute.Image{
|
||||
Description: description,
|
||||
|
|
|
@ -3,6 +3,9 @@ package googlecompute
|
|||
// DriverMock is a Driver implementation that is a mocked out so that
|
||||
// it can be used for tests.
|
||||
type DriverMock struct {
|
||||
ImageExistsName string
|
||||
ImageExistsResult bool
|
||||
|
||||
CreateImageName string
|
||||
CreateImageDesc string
|
||||
CreateImageZone string
|
||||
|
@ -37,6 +40,11 @@ type DriverMock struct {
|
|||
WaitForInstanceErrCh <-chan error
|
||||
}
|
||||
|
||||
func (d *DriverMock) ImageExists(name string) bool {
|
||||
d.ImageExistsName = name
|
||||
return d.ImageExistsResult
|
||||
}
|
||||
|
||||
func (d *DriverMock) CreateImage(name, description, zone, disk string) <-chan error {
|
||||
d.CreateImageName = name
|
||||
d.CreateImageDesc = description
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
package googlecompute
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
// StepCheckExistingImage represents a Packer build step that checks if the
|
||||
// target image already exists, and aborts immediately if so.
|
||||
type StepCheckExistingImage int
|
||||
|
||||
// Run executes the Packer build step that checks if the image already exists.
|
||||
func (s *StepCheckExistingImage) Run(state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
driver := state.Get("driver").(Driver)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Checking image does not exist...")
|
||||
exists := driver.ImageExists(config.ImageName)
|
||||
if exists {
|
||||
err := fmt.Errorf("Image %s already exists", config.ImageName)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Cleanup.
|
||||
func (s *StepCheckExistingImage) Cleanup(state multistep.StateBag) {}
|
|
@ -0,0 +1,32 @@
|
|||
package googlecompute
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/multistep"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStepCheckExistingImage_impl(t *testing.T) {
|
||||
var _ multistep.Step = new(StepCheckExistingImage)
|
||||
}
|
||||
|
||||
func TestStepCheckExistingImage(t *testing.T) {
|
||||
state := testState(t)
|
||||
step := new(StepCheckExistingImage)
|
||||
defer step.Cleanup(state)
|
||||
|
||||
state.Put("instance_name", "foo")
|
||||
|
||||
config := state.Get("config").(*Config)
|
||||
driver := state.Get("driver").(*DriverMock)
|
||||
driver.ImageExistsResult = true
|
||||
|
||||
// run the step
|
||||
if action := step.Run(state); action != multistep.ActionHalt {
|
||||
t.Fatalf("bad action: %#v", action)
|
||||
}
|
||||
|
||||
// Verify state
|
||||
if driver.ImageExistsName != config.ImageName {
|
||||
t.Fatalf("bad: %#v", driver.ImageExistsName)
|
||||
}
|
||||
}
|
|
@ -55,6 +55,7 @@ func NewDriver() (Driver, error) {
|
|||
var drivers map[string]Driver
|
||||
var prlctlPath string
|
||||
var supportedVersions []string
|
||||
dhcp_lease_file := "/Library/Preferences/Parallels/parallels_dhcp_leases"
|
||||
|
||||
if runtime.GOOS != "darwin" {
|
||||
return nil, fmt.Errorf(
|
||||
|
@ -75,10 +76,12 @@ func NewDriver() (Driver, error) {
|
|||
"10": &Parallels10Driver{
|
||||
Parallels9Driver: Parallels9Driver{
|
||||
PrlctlPath: prlctlPath,
|
||||
dhcp_lease_file: dhcp_lease_file,
|
||||
},
|
||||
},
|
||||
"9": &Parallels9Driver{
|
||||
PrlctlPath: prlctlPath,
|
||||
dhcp_lease_file: dhcp_lease_file,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -18,6 +19,8 @@ import (
|
|||
type Parallels9Driver struct {
|
||||
// This is the path to the "prlctl" application.
|
||||
PrlctlPath string
|
||||
// The path to the parallels_dhcp_leases file
|
||||
dhcp_lease_file string
|
||||
}
|
||||
|
||||
func (d *Parallels9Driver) Import(name, srcPath, dstDir string, reassignMac bool) error {
|
||||
|
@ -276,31 +279,43 @@ func (d *Parallels9Driver) Mac(vmName string) (string, error) {
|
|||
}
|
||||
|
||||
// Finds the IP address of a VM connected that uses DHCP by its MAC address
|
||||
//
|
||||
// Parses the file /Library/Preferences/Parallels/parallels_dhcp_leases
|
||||
// file contain a list of DHCP leases given by Parallels Desktop
|
||||
// Example line:
|
||||
// 10.211.55.181="1418921112,1800,001c42f593fb,ff42f593fb000100011c25b9ff001c42f593fb"
|
||||
// IP Address ="Lease expiry, Lease time, MAC, MAC or DUID"
|
||||
func (d *Parallels9Driver) IpAddress(mac string) (string, error) {
|
||||
var stdout bytes.Buffer
|
||||
dhcp_lease_file := "/Library/Preferences/Parallels/parallels_dhcp_leases"
|
||||
|
||||
if len(mac) != 12 {
|
||||
return "", fmt.Errorf("Not a valid MAC address: %s. It should be exactly 12 digits.", mac)
|
||||
}
|
||||
|
||||
cmd := exec.Command("grep", "-i", mac, dhcp_lease_file)
|
||||
cmd.Stdout = &stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
leases, err := ioutil.ReadFile(d.dhcp_lease_file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
stdoutString := strings.TrimSpace(stdout.String())
|
||||
re := regexp.MustCompile("(.*)=.*")
|
||||
ipMatch := re.FindAllStringSubmatch(stdoutString, 1)
|
||||
|
||||
if len(ipMatch) != 1 {
|
||||
return "", fmt.Errorf("IP lease not found for MAC address %s in: %s\n", mac, dhcp_lease_file)
|
||||
re := regexp.MustCompile("(.*)=\"(.*),(.*)," + strings.ToLower(mac) + ",.*\"")
|
||||
mostRecentIp := ""
|
||||
mostRecentLease := uint64(0)
|
||||
for _, l := range re.FindAllStringSubmatch(string(leases), -1) {
|
||||
ip := l[1]
|
||||
expiry, _ := strconv.ParseUint(l[2], 10, 64)
|
||||
leaseTime, _ := strconv.ParseUint(l[3], 10, 32)
|
||||
log.Printf("Found lease: %s for MAC: %s, expiring at %d, leased for %d s.\n", ip, mac, expiry, leaseTime)
|
||||
if mostRecentLease <= expiry-leaseTime {
|
||||
mostRecentIp = ip
|
||||
mostRecentLease = expiry - leaseTime
|
||||
}
|
||||
}
|
||||
|
||||
ip := ipMatch[0][1]
|
||||
log.Printf("Found IP lease: %s for MAC address %s\n", ip, mac)
|
||||
return ip, nil
|
||||
if len(mostRecentIp) == 0 {
|
||||
return "", fmt.Errorf("IP lease not found for MAC address %s in: %s\n", mac, d.dhcp_lease_file)
|
||||
}
|
||||
|
||||
log.Printf("Found IP lease: %s for MAC address %s\n", mostRecentIp, mac)
|
||||
return mostRecentIp, nil
|
||||
}
|
||||
|
||||
func (d *Parallels9Driver) ToolsIsoPath(k string) (string, error) {
|
||||
|
|
|
@ -1,9 +1,60 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParallels9Driver_impl(t *testing.T) {
|
||||
var _ Driver = new(Parallels9Driver)
|
||||
}
|
||||
|
||||
func TestIpAddress(t *testing.T) {
|
||||
tf, err := ioutil.TempFile("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
defer os.Remove(tf.Name())
|
||||
|
||||
d := Parallels9Driver{
|
||||
dhcp_lease_file: tf.Name(),
|
||||
}
|
||||
|
||||
// No lease should be found in an empty file
|
||||
ip, err := d.IpAddress("123456789012")
|
||||
if err == nil {
|
||||
t.Fatalf("Found IP: \"%v\". No IP should be found!\n", ip)
|
||||
}
|
||||
|
||||
// The most recent lease, 10.211.55.126 should be found
|
||||
c := []byte(`
|
||||
[vnic0]
|
||||
10.211.55.125="1418288000,1800,001c4235240c,ff4235240c000100011c1c10e7001c4235240c"
|
||||
10.211.55.126="1418288969,1800,001c4235240c,ff4235240c000100011c1c11ad001c4235240c"
|
||||
10.211.55.254="1411712008,1800,001c42a51419,01001c42a51419"
|
||||
`)
|
||||
ioutil.WriteFile(tf.Name(), c, 0666)
|
||||
ip, err = d.IpAddress("001C4235240c")
|
||||
if err != nil {
|
||||
t.Fatalf("Error: %v\n", err)
|
||||
}
|
||||
if ip != "10.211.55.126" {
|
||||
t.Fatalf("Should have found 10.211.55.126, not %s!\n", ip)
|
||||
}
|
||||
|
||||
// The most recent lease, 10.211.55.124 should be found
|
||||
c = []byte(`[vnic0]
|
||||
10.211.55.124="1418288969,1800,001c4235240c,ff4235240c000100011c1c11ad001c4235240c"
|
||||
10.211.55.125="1418288000,1800,001c4235240c,ff4235240c000100011c1c10e7001c4235240c"
|
||||
10.211.55.254="1411712008,1800,001c42a51419,01001c42a51419"
|
||||
`)
|
||||
ioutil.WriteFile(tf.Name(), c, 0666)
|
||||
ip, err = d.IpAddress("001c4235240c")
|
||||
if err != nil {
|
||||
t.Fatalf("Error: %v\n", err)
|
||||
}
|
||||
if ip != "10.211.55.124" {
|
||||
t.Fatalf("Should have found 10.211.55.124, not %s!\n", ip)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -256,6 +256,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
new(stepHTTPServer),
|
||||
new(stepCreateVM),
|
||||
new(stepCreateDisk),
|
||||
new(stepSetBootOrder),
|
||||
new(stepAttachISO),
|
||||
¶llelscommon.StepAttachParallelsTools{
|
||||
ParallelsToolsMode: b.config.ParallelsToolsMode,
|
||||
|
|
|
@ -17,9 +17,8 @@ import (
|
|||
// vmName string
|
||||
//
|
||||
// Produces:
|
||||
type stepAttachISO struct {
|
||||
cdromDevice string
|
||||
}
|
||||
// attachedIso bool
|
||||
type stepAttachISO struct{}
|
||||
|
||||
func (s *stepAttachISO) Run(state multistep.StateBag) multistep.StepAction {
|
||||
driver := state.Get("driver").(parallelscommon.Driver)
|
||||
|
@ -27,76 +26,42 @@ func (s *stepAttachISO) Run(state multistep.StateBag) multistep.StepAction {
|
|||
ui := state.Get("ui").(packer.Ui)
|
||||
vmName := state.Get("vmName").(string)
|
||||
|
||||
// Attach the disk to the controller
|
||||
ui.Say("Attaching ISO to the new CD/DVD drive...")
|
||||
cdrom, err := driver.DeviceAddCdRom(vmName, isoPath)
|
||||
|
||||
if err != nil {
|
||||
// Attach the disk to the cdrom0 device. We couldn't use a separated device because it is failed to boot in PD9 [GH-1667]
|
||||
ui.Say("Attaching ISO to the default CD/DVD ROM device...")
|
||||
command := []string{
|
||||
"set", vmName,
|
||||
"--device-set", "cdrom0",
|
||||
"--image", isoPath,
|
||||
"--enable", "--connect",
|
||||
}
|
||||
if err := driver.Prlctl(command...); err != nil {
|
||||
err := fmt.Errorf("Error attaching ISO: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set new boot order
|
||||
ui.Say("Setting the boot order...")
|
||||
command := []string{
|
||||
"set", vmName,
|
||||
"--device-bootorder", fmt.Sprintf("hdd0 %s cdrom0 net0", cdrom),
|
||||
}
|
||||
|
||||
if err := driver.Prlctl(command...); err != nil {
|
||||
err := fmt.Errorf("Error setting the boot order: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Disable 'cdrom0' device
|
||||
ui.Say("Disabling default CD/DVD drive...")
|
||||
command = []string{
|
||||
"set", vmName,
|
||||
"--device-set", "cdrom0", "--disable",
|
||||
}
|
||||
|
||||
if err := driver.Prlctl(command...); err != nil {
|
||||
err := fmt.Errorf("Error disabling default CD/DVD drive: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Track the device name so that we can can delete later
|
||||
s.cdromDevice = cdrom
|
||||
// Set some state so we know to remove
|
||||
state.Put("attachedIso", true)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepAttachISO) Cleanup(state multistep.StateBag) {
|
||||
if _, ok := state.GetOk("attachedIso"); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
driver := state.Get("driver").(parallelscommon.Driver)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
vmName := state.Get("vmName").(string)
|
||||
|
||||
// Enable 'cdrom0' device back
|
||||
log.Println("Enabling default CD/DVD drive...")
|
||||
// Detach ISO by setting an empty string image.
|
||||
log.Println("Detaching ISO from the default CD/DVD ROM device...")
|
||||
command := []string{
|
||||
"set", vmName,
|
||||
"--device-set", "cdrom0", "--enable", "--disconnect",
|
||||
}
|
||||
|
||||
if err := driver.Prlctl(command...); err != nil {
|
||||
ui.Error(fmt.Sprintf("Error enabling default CD/DVD drive: %s", err))
|
||||
}
|
||||
|
||||
// Detach ISO
|
||||
if s.cdromDevice == "" {
|
||||
return
|
||||
}
|
||||
|
||||
log.Println("Detaching ISO...")
|
||||
command = []string{
|
||||
"set", vmName,
|
||||
"--device-del", s.cdromDevice,
|
||||
"--device-set", "cdrom0",
|
||||
"--image", "", "--disconnect", "--enable",
|
||||
}
|
||||
|
||||
if err := driver.Prlctl(command...); err != nil {
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
package iso
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/multistep"
|
||||
parallelscommon "github.com/mitchellh/packer/builder/parallels/common"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
// This step sets the device boot order for the virtual machine.
|
||||
//
|
||||
// Uses:
|
||||
// driver Driver
|
||||
// ui packer.Ui
|
||||
// vmName string
|
||||
//
|
||||
// Produces:
|
||||
type stepSetBootOrder struct{}
|
||||
|
||||
func (s *stepSetBootOrder) Run(state multistep.StateBag) multistep.StepAction {
|
||||
driver := state.Get("driver").(parallelscommon.Driver)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
vmName := state.Get("vmName").(string)
|
||||
|
||||
// Set new boot order
|
||||
ui.Say("Setting the boot order...")
|
||||
command := []string{
|
||||
"set", vmName,
|
||||
"--device-bootorder", fmt.Sprintf("hdd0 cdrom0 net0"),
|
||||
}
|
||||
|
||||
if err := driver.Prlctl(command...); err != nil {
|
||||
err := fmt.Errorf("Error setting the boot order: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepSetBootOrder) Cleanup(state multistep.StateBag) {}
|
|
@ -19,6 +19,9 @@ type Driver interface {
|
|||
// Create a SATA controller.
|
||||
CreateSATAController(vm string, controller string) error
|
||||
|
||||
// Create a SCSI controller.
|
||||
CreateSCSIController(vm string, controller string) error
|
||||
|
||||
// Delete a VM by name
|
||||
Delete(string) error
|
||||
|
||||
|
|
|
@ -36,6 +36,18 @@ func (d *VBox42Driver) CreateSATAController(vmName string, name string) error {
|
|||
return d.VBoxManage(command...)
|
||||
}
|
||||
|
||||
func (d *VBox42Driver) CreateSCSIController(vmName string, name string) error {
|
||||
|
||||
command := []string{
|
||||
"storagectl", vmName,
|
||||
"--name", name,
|
||||
"--add", "scsi",
|
||||
"--controller", "LSILogic",
|
||||
}
|
||||
|
||||
return d.VBoxManage(command...)
|
||||
}
|
||||
|
||||
func (d *VBox42Driver) Delete(name string) error {
|
||||
return d.VBoxManage("unregistervm", name, "--delete")
|
||||
}
|
||||
|
|
|
@ -9,6 +9,10 @@ type DriverMock struct {
|
|||
CreateSATAControllerController string
|
||||
CreateSATAControllerErr error
|
||||
|
||||
CreateSCSIControllerVM string
|
||||
CreateSCSIControllerController string
|
||||
CreateSCSIControllerErr error
|
||||
|
||||
DeleteCalled bool
|
||||
DeleteName string
|
||||
DeleteErr error
|
||||
|
@ -49,6 +53,12 @@ func (d *DriverMock) CreateSATAController(vm string, controller string) error {
|
|||
return d.CreateSATAControllerErr
|
||||
}
|
||||
|
||||
func (d *DriverMock) CreateSCSIController(vm string, controller string) error {
|
||||
d.CreateSCSIControllerVM = vm
|
||||
d.CreateSCSIControllerController = vm
|
||||
return d.CreateSCSIControllerErr
|
||||
}
|
||||
|
||||
func (d *DriverMock) Delete(name string) error {
|
||||
d.DeleteCalled = true
|
||||
d.DeleteName = name
|
||||
|
|
|
@ -158,9 +158,9 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" {
|
||||
if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" && b.config.HardDriveInterface != "scsi" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("hard_drive_interface can only be ide or sata"))
|
||||
errs, errors.New("hard_drive_interface can only be ide, sata, or scsi"))
|
||||
}
|
||||
|
||||
if b.config.ISOChecksumType == "" {
|
||||
|
|
|
@ -63,12 +63,25 @@ func (s *stepCreateDisk) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
}
|
||||
|
||||
if config.HardDriveInterface == "scsi" {
|
||||
if err := driver.CreateSCSIController(vmName, "SCSI Controller"); err != nil {
|
||||
err := fmt.Errorf("Error creating disk controller: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
// Attach the disk to the controller
|
||||
controllerName := "IDE Controller"
|
||||
if config.HardDriveInterface == "sata" {
|
||||
controllerName = "SATA Controller"
|
||||
}
|
||||
|
||||
if config.HardDriveInterface == "scsi" {
|
||||
controllerName = "SCSI Controller"
|
||||
}
|
||||
|
||||
command = []string{
|
||||
"storageattach", vmName,
|
||||
"--storagectl", controllerName,
|
||||
|
|
|
@ -368,6 +368,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
SkipFloppy: true,
|
||||
},
|
||||
&vmwcommon.StepCleanVMX{},
|
||||
&StepUploadVMX{
|
||||
RemoteType: b.config.RemoteType,
|
||||
},
|
||||
&vmwcommon.StepCompactDisk{
|
||||
Skip: b.config.SkipCompaction,
|
||||
},
|
||||
|
|
|
@ -56,6 +56,10 @@ func (d *ESX5Driver) IsRunning(string) (bool, error) {
|
|||
return strings.Contains(state, "Powered on"), nil
|
||||
}
|
||||
|
||||
func (d *ESX5Driver) ReloadVM() error {
|
||||
return d.sh("vim-cmd", "vmsvc/reload", d.vmId)
|
||||
}
|
||||
|
||||
func (d *ESX5Driver) Start(vmxPathLocal string, headless bool) error {
|
||||
for i := 0; i < 20; i++ {
|
||||
err := d.sh("vim-cmd", "vmsvc/power.on", d.vmId)
|
||||
|
|
|
@ -17,4 +17,10 @@ type RemoteDriver interface {
|
|||
|
||||
// Removes a VM from inventory specified by the path to the VMX given.
|
||||
Unregister(string) error
|
||||
|
||||
// Uploads a local file to remote side.
|
||||
upload(dst, src string) error
|
||||
|
||||
// Reload VM on remote side.
|
||||
ReloadVM() error
|
||||
}
|
||||
|
|
|
@ -19,6 +19,10 @@ type RemoteDriverMock struct {
|
|||
UnregisterCalled bool
|
||||
UnregisterPath string
|
||||
UnregisterErr error
|
||||
|
||||
uploadErr error
|
||||
|
||||
ReloadVMErr error
|
||||
}
|
||||
|
||||
func (d *RemoteDriverMock) UploadISO(path string, checksum string, checksumType string) (string, error) {
|
||||
|
@ -38,3 +42,11 @@ func (d *RemoteDriverMock) Unregister(path string) error {
|
|||
d.UnregisterPath = path
|
||||
return d.UnregisterErr
|
||||
}
|
||||
|
||||
func (d *RemoteDriverMock) upload(dst, src string) error {
|
||||
return d.uploadErr
|
||||
}
|
||||
|
||||
func (d *RemoteDriverMock) ReloadVM() error {
|
||||
return d.ReloadVMErr
|
||||
}
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
package iso
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
vmwcommon "github.com/mitchellh/packer/builder/vmware/common"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
|
||||
// This step upload the VMX to the remote host
|
||||
//
|
||||
// Uses:
|
||||
// driver Driver
|
||||
// ui packer.Ui
|
||||
// vmx_path string
|
||||
//
|
||||
// Produces:
|
||||
// <nothing>
|
||||
type StepUploadVMX struct{
|
||||
RemoteType string
|
||||
}
|
||||
|
||||
func (c *StepUploadVMX) Run(state multistep.StateBag) multistep.StepAction {
|
||||
driver := state.Get("driver").(vmwcommon.Driver)
|
||||
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
vmxPath := state.Get("vmx_path").(string)
|
||||
|
||||
if c.RemoteType == "esx5" {
|
||||
remoteDriver, ok := driver.(RemoteDriver)
|
||||
if ok {
|
||||
remoteVmxPath := filepath.ToSlash(filepath.Join(fmt.Sprintf("%s",remoteDriver), filepath.Base(vmxPath)))
|
||||
if err := remoteDriver.upload(remoteVmxPath, vmxPath); err != nil {
|
||||
state.Put("error", fmt.Errorf("Error writing VMX: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
if err := remoteDriver.ReloadVM(); err != nil {
|
||||
ui.Error(fmt.Sprintf("Error reload VM: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (StepUploadVMX) Cleanup(multistep.StateBag) {}
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
// Builder implements packer.Builder and builds the actual VirtualBox
|
||||
// Builder implements packer.Builder and builds the actual VMware
|
||||
// images.
|
||||
type Builder struct {
|
||||
config *Config
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
||||
const fixturesDir = "./test-fixtures"
|
||||
|
||||
func fatalCommand(t *testing.T, m Meta) {
|
||||
ui := m.Ui.(*cli.MockUi)
|
||||
t.Fatalf(
|
||||
"Bad exit code.\n\nStdout:\n\n%s\n\nStderr:\n\n%s",
|
||||
ui.OutputWriter.String(),
|
||||
ui.ErrorWriter.String())
|
||||
}
|
||||
|
||||
func testFixture(n string) string {
|
||||
return filepath.Join(fixturesDir, n)
|
||||
}
|
||||
|
||||
func testMeta(t *testing.T) Meta {
|
||||
return Meta{
|
||||
Ui: new(cli.MockUi),
|
||||
}
|
||||
}
|
|
@ -0,0 +1,331 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/atlas-go/archive"
|
||||
"github.com/hashicorp/atlas-go/v1"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
// archiveTemplateEntry is the name the template always takes within the slug.
|
||||
const archiveTemplateEntry = ".packer-template"
|
||||
|
||||
type PushCommand struct {
|
||||
Meta
|
||||
|
||||
client *atlas.Client
|
||||
|
||||
// For tests:
|
||||
uploadFn pushUploadFn
|
||||
}
|
||||
|
||||
// pushUploadFn is the callback type used for tests to stub out the uploading
|
||||
// logic of the push command.
|
||||
type pushUploadFn func(
|
||||
io.Reader, *uploadOpts) (<-chan struct{}, <-chan error, error)
|
||||
|
||||
func (c *PushCommand) Run(args []string) int {
|
||||
var create bool
|
||||
var token string
|
||||
|
||||
f := flag.NewFlagSet("push", flag.ContinueOnError)
|
||||
f.Usage = func() { c.Ui.Error(c.Help()) }
|
||||
f.BoolVar(&create, "create", false, "create")
|
||||
f.StringVar(&token, "token", "", "token")
|
||||
if err := f.Parse(args); err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
args = f.Args()
|
||||
if len(args) != 1 {
|
||||
f.Usage()
|
||||
return 1
|
||||
}
|
||||
|
||||
// Read the template
|
||||
tpl, err := packer.ParseTemplateFile(args[0], nil)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
// Validate some things
|
||||
if tpl.Push.Name == "" {
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"The 'push' section must be specified in the template with\n" +
|
||||
"at least the 'name' option set."))
|
||||
return 1
|
||||
}
|
||||
|
||||
// Determine our token
|
||||
if token == "" {
|
||||
token = tpl.Push.Token
|
||||
}
|
||||
|
||||
// Build our client
|
||||
defer func() { c.client = nil }()
|
||||
c.client = atlas.DefaultClient()
|
||||
if tpl.Push.Address != "" {
|
||||
c.client, err = atlas.NewClient(tpl.Push.Address)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"Error setting up API client: %s", err))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if token != "" {
|
||||
c.client.Token = token
|
||||
}
|
||||
|
||||
// Build the archiving options
|
||||
var opts archive.ArchiveOpts
|
||||
opts.Include = tpl.Push.Include
|
||||
opts.Exclude = tpl.Push.Exclude
|
||||
opts.VCS = tpl.Push.VCS
|
||||
opts.Extra = map[string]string{
|
||||
archiveTemplateEntry: args[0],
|
||||
}
|
||||
|
||||
// Determine the path we're archiving. This logic is a bit complicated
|
||||
// as there are three possibilities:
|
||||
//
|
||||
// 1.) BaseDir is an absolute path, just use that.
|
||||
//
|
||||
// 2.) BaseDir is empty, so we use the directory of the template.
|
||||
//
|
||||
// 3.) BaseDir is relative, so we use the path relative to the directory
|
||||
// of the template.
|
||||
//
|
||||
path := tpl.Push.BaseDir
|
||||
if path == "" || !filepath.IsAbs(path) {
|
||||
tplPath, err := filepath.Abs(args[0])
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error determining path to archive: %s", err))
|
||||
return 1
|
||||
}
|
||||
tplPath = filepath.Dir(tplPath)
|
||||
if path != "" {
|
||||
tplPath = filepath.Join(tplPath, path)
|
||||
}
|
||||
path, err = filepath.Abs(tplPath)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error determining path to archive: %s", err))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
// Find the Atlas post-processors, if possible
|
||||
var atlasPPs []packer.RawPostProcessorConfig
|
||||
for _, list := range tpl.PostProcessors {
|
||||
for _, pp := range list {
|
||||
if pp.Type == "atlas" {
|
||||
atlasPPs = append(atlasPPs, pp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build the upload options
|
||||
var uploadOpts uploadOpts
|
||||
uploadOpts.Slug = tpl.Push.Name
|
||||
uploadOpts.Builds = make(map[string]*uploadBuildInfo)
|
||||
for _, b := range tpl.Builders {
|
||||
info := &uploadBuildInfo{Type: b.Type}
|
||||
|
||||
// Determine if we're artifacting this build
|
||||
for _, pp := range atlasPPs {
|
||||
if !pp.Skip(b.Name) {
|
||||
info.Artifact = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
uploadOpts.Builds[b.Name] = info
|
||||
}
|
||||
|
||||
// Warn about builds not having post-processors.
|
||||
var badBuilds []string
|
||||
for name, b := range uploadOpts.Builds {
|
||||
if b.Artifact {
|
||||
continue
|
||||
}
|
||||
|
||||
badBuilds = append(badBuilds, name)
|
||||
}
|
||||
if len(badBuilds) > 0 {
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"Warning! One or more of the builds in this template does not\n"+
|
||||
"have an Atlas post-processor. Artifacts from this template will\n"+
|
||||
"not appear in the Atlas artifact registry.\n\n"+
|
||||
"This is just a warning. Atlas will still build your template\n"+
|
||||
"and assume other post-processors are sending the artifacts where\n"+
|
||||
"they need to go.\n\n"+
|
||||
"Builds: %s\n\n", strings.Join(badBuilds, ", ")))
|
||||
}
|
||||
|
||||
// Create the build config if it doesn't currently exist.
|
||||
if err := c.create(uploadOpts.Slug, create); err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
return 1
|
||||
}
|
||||
|
||||
// Start the archiving process
|
||||
r, err := archive.CreateArchive(path, &opts)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error archiving: %s", err))
|
||||
return 1
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Start the upload process
|
||||
doneCh, uploadErrCh, err := c.upload(r, &uploadOpts)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error starting upload: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
// Make a ctrl-C channel
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, os.Interrupt)
|
||||
defer signal.Stop(sigCh)
|
||||
|
||||
err = nil
|
||||
select {
|
||||
case err = <-uploadErrCh:
|
||||
err = fmt.Errorf("Error uploading: %s", err)
|
||||
case <-sigCh:
|
||||
err = fmt.Errorf("Push cancelled from Ctrl-C")
|
||||
case <-doneCh:
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
return 1
|
||||
}
|
||||
|
||||
c.Ui.Output(fmt.Sprintf("Push successful to '%s'", tpl.Push.Name))
|
||||
return 0
|
||||
}
|
||||
|
||||
func (*PushCommand) Help() string {
|
||||
helpText := `
|
||||
Usage: packer push [options] TEMPLATE
|
||||
|
||||
Push the template and the files it needs to a Packer build service.
|
||||
This will not initiate any builds, it will only update the templates
|
||||
used for builds.
|
||||
|
||||
The configuration about what is pushed is configured within the
|
||||
template's "push" section.
|
||||
|
||||
Options:
|
||||
|
||||
-create Create the build configuration if it doesn't exist.
|
||||
|
||||
-token=<token> Access token to use to upload. If blank, the
|
||||
ATLAS_TOKEN environmental variable will be used.
|
||||
`
|
||||
|
||||
return strings.TrimSpace(helpText)
|
||||
}
|
||||
|
||||
func (*PushCommand) Synopsis() string {
|
||||
return "push template files to a Packer build service"
|
||||
}
|
||||
|
||||
func (c *PushCommand) create(name string, create bool) error {
|
||||
if c.uploadFn != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Separate the slug into the user and name components
|
||||
user, name, err := atlas.ParseSlug(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Malformed push name: %s", err)
|
||||
}
|
||||
|
||||
// Check if it exists. If so, we're done.
|
||||
if _, err := c.client.BuildConfig(user, name); err == nil {
|
||||
return nil
|
||||
} else if err != atlas.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
// Otherwise, show an error if we're not creating.
|
||||
if !create {
|
||||
return fmt.Errorf(
|
||||
"Push target doesn't exist: %s. Either create this online via\n"+
|
||||
"the website or pass the -create flag.", name)
|
||||
}
|
||||
|
||||
// Create it
|
||||
if err := c.client.CreateBuildConfig(user, name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PushCommand) upload(
|
||||
r *archive.Archive, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
if c.uploadFn != nil {
|
||||
return c.uploadFn(r, opts)
|
||||
}
|
||||
|
||||
// Separate the slug into the user and name components
|
||||
user, name, err := atlas.ParseSlug(opts.Slug)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("upload: %s", err)
|
||||
}
|
||||
|
||||
// Get the app
|
||||
bc, err := c.client.BuildConfig(user, name)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("upload: %s", err)
|
||||
}
|
||||
|
||||
// Build the version to send up
|
||||
version := atlas.BuildConfigVersion{
|
||||
User: bc.User,
|
||||
Name: bc.Name,
|
||||
Builds: make([]atlas.BuildConfigBuild, 0, len(opts.Builds)),
|
||||
}
|
||||
for name, info := range opts.Builds {
|
||||
version.Builds = append(version.Builds, atlas.BuildConfigBuild{
|
||||
Name: name,
|
||||
Type: info.Type,
|
||||
Artifact: info.Artifact,
|
||||
})
|
||||
}
|
||||
|
||||
// Start the upload
|
||||
doneCh, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
err := c.client.UploadBuildConfigVersion(&version, r, r.Size)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
return doneCh, errCh, nil
|
||||
}
|
||||
|
||||
type uploadOpts struct {
|
||||
URL string
|
||||
Slug string
|
||||
Builds map[string]*uploadBuildInfo
|
||||
}
|
||||
|
||||
type uploadBuildInfo struct {
|
||||
Type string
|
||||
Artifact bool
|
||||
}
|
|
@ -0,0 +1,186 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPush_noArgs(t *testing.T) {
|
||||
c := &PushCommand{Meta: testMeta(t)}
|
||||
code := c.Run(nil)
|
||||
if code != 1 {
|
||||
t.Fatalf("bad: %#v", code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush_multiArgs(t *testing.T) {
|
||||
c := &PushCommand{Meta: testMeta(t)}
|
||||
code := c.Run([]string{"one", "two"})
|
||||
if code != 1 {
|
||||
t.Fatalf("bad: %#v", code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush(t *testing.T) {
|
||||
var actual []string
|
||||
var actualOpts *uploadOpts
|
||||
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
actual = testArchive(t, r)
|
||||
actualOpts = opts
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
close(doneCh)
|
||||
return doneCh, nil, nil
|
||||
}
|
||||
|
||||
c := &PushCommand{
|
||||
Meta: testMeta(t),
|
||||
uploadFn: uploadFn,
|
||||
}
|
||||
|
||||
args := []string{filepath.Join(testFixture("push"), "template.json")}
|
||||
if code := c.Run(args); code != 0 {
|
||||
fatalCommand(t, c.Meta)
|
||||
}
|
||||
|
||||
expected := []string{
|
||||
archiveTemplateEntry,
|
||||
"template.json",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
|
||||
expectedBuilds := map[string]*uploadBuildInfo{
|
||||
"dummy": &uploadBuildInfo{
|
||||
Type: "dummy",
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(actualOpts.Builds, expectedBuilds) {
|
||||
t.Fatalf("bad: %#v", actualOpts.Builds)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush_builds(t *testing.T) {
|
||||
var actualOpts *uploadOpts
|
||||
uploadFn := func(
|
||||
r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
actualOpts = opts
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
close(doneCh)
|
||||
return doneCh, nil, nil
|
||||
}
|
||||
|
||||
c := &PushCommand{
|
||||
Meta: testMeta(t),
|
||||
uploadFn: uploadFn,
|
||||
}
|
||||
|
||||
args := []string{filepath.Join(testFixture("push-builds"), "template.json")}
|
||||
if code := c.Run(args); code != 0 {
|
||||
fatalCommand(t, c.Meta)
|
||||
}
|
||||
|
||||
expectedBuilds := map[string]*uploadBuildInfo{
|
||||
"dummy": &uploadBuildInfo{
|
||||
Type: "dummy",
|
||||
Artifact: true,
|
||||
},
|
||||
"foo": &uploadBuildInfo{
|
||||
Type: "dummy",
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(actualOpts.Builds, expectedBuilds) {
|
||||
t.Fatalf("bad: %#v", actualOpts.Builds)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush_noName(t *testing.T) {
|
||||
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
c := &PushCommand{
|
||||
Meta: testMeta(t),
|
||||
uploadFn: uploadFn,
|
||||
}
|
||||
|
||||
args := []string{filepath.Join(testFixture("push-no-name"), "template.json")}
|
||||
if code := c.Run(args); code != 1 {
|
||||
fatalCommand(t, c.Meta)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush_uploadError(t *testing.T) {
|
||||
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
return nil, nil, fmt.Errorf("bad")
|
||||
}
|
||||
|
||||
c := &PushCommand{
|
||||
Meta: testMeta(t),
|
||||
uploadFn: uploadFn,
|
||||
}
|
||||
|
||||
args := []string{filepath.Join(testFixture("push"), "template.json")}
|
||||
if code := c.Run(args); code != 1 {
|
||||
fatalCommand(t, c.Meta)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush_uploadErrorCh(t *testing.T) {
|
||||
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
errCh := make(chan error, 1)
|
||||
errCh <- fmt.Errorf("bad")
|
||||
return nil, errCh, nil
|
||||
}
|
||||
|
||||
c := &PushCommand{
|
||||
Meta: testMeta(t),
|
||||
uploadFn: uploadFn,
|
||||
}
|
||||
|
||||
args := []string{filepath.Join(testFixture("push"), "template.json")}
|
||||
if code := c.Run(args); code != 1 {
|
||||
fatalCommand(t, c.Meta)
|
||||
}
|
||||
}
|
||||
|
||||
func testArchive(t *testing.T, r io.Reader) []string {
|
||||
// Finish the archiving process in-memory
|
||||
var buf bytes.Buffer
|
||||
if _, err := io.Copy(&buf, r); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
gzipR, err := gzip.NewReader(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
tarR := tar.NewReader(gzipR)
|
||||
|
||||
// Read all the entries
|
||||
result := make([]string, 0, 5)
|
||||
for {
|
||||
hdr, err := tarR.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
result = append(result, hdr.Name)
|
||||
}
|
||||
|
||||
sort.Strings(result)
|
||||
return result
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
{
|
||||
"builders": [
|
||||
{"type": "dummy"},
|
||||
{"type": "dummy", "name": "foo"}
|
||||
],
|
||||
|
||||
"post-processors": [{
|
||||
"type": "atlas",
|
||||
"only": ["dummy"]
|
||||
}],
|
||||
|
||||
"push": {
|
||||
"name": "foo/bar"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"builders": [{"type": "dummy"}]
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"builders": [{"type": "dummy"}],
|
||||
|
||||
"push": {
|
||||
"name": "foo/bar"
|
||||
}
|
||||
}
|
|
@ -50,6 +50,12 @@ func init() {
|
|||
}, nil
|
||||
},
|
||||
|
||||
"push": func() (cli.Command, error) {
|
||||
return &command.PushCommand{
|
||||
Meta: meta,
|
||||
}, nil
|
||||
},
|
||||
|
||||
"validate": func() (cli.Command, error) {
|
||||
return &command.ValidateCommand{
|
||||
Meta: meta,
|
||||
|
|
|
@ -336,7 +336,7 @@ func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, fi *
|
|||
var mode os.FileMode
|
||||
var size int64
|
||||
|
||||
if fi != nil {
|
||||
if fi != nil && (*fi).Mode().IsRegular() {
|
||||
mode = (*fi).Mode().Perm()
|
||||
size = (*fi).Size()
|
||||
} else {
|
||||
|
|
1
main.go
1
main.go
|
@ -166,6 +166,7 @@ func wrappedMain() int {
|
|||
Commands: Commands,
|
||||
HelpFunc: cli.BasicHelpFunc("packer"),
|
||||
HelpWriter: os.Stdout,
|
||||
Version: Version,
|
||||
}
|
||||
|
||||
exitCode, err := cli.Run()
|
||||
|
|
|
@ -24,6 +24,7 @@ type rawTemplate struct {
|
|||
Description string
|
||||
Builders []map[string]interface{}
|
||||
Hooks map[string][]string
|
||||
Push PushConfig
|
||||
PostProcessors []interface{} `mapstructure:"post-processors"`
|
||||
Provisioners []map[string]interface{}
|
||||
Variables map[string]interface{}
|
||||
|
@ -36,10 +37,22 @@ type Template struct {
|
|||
Variables map[string]RawVariable
|
||||
Builders map[string]RawBuilderConfig
|
||||
Hooks map[string][]string
|
||||
Push *PushConfig
|
||||
PostProcessors [][]RawPostProcessorConfig
|
||||
Provisioners []RawProvisionerConfig
|
||||
}
|
||||
|
||||
// PushConfig is the configuration structure for the push settings.
|
||||
type PushConfig struct {
|
||||
Name string
|
||||
Address string
|
||||
BaseDir string `mapstructure:"base_dir"`
|
||||
Include []string
|
||||
Exclude []string
|
||||
Token string
|
||||
VCS bool
|
||||
}
|
||||
|
||||
// The RawBuilderConfig struct represents a raw, unprocessed builder
|
||||
// configuration. It contains the name of the builder as well as the
|
||||
// raw configuration. If requested, this is used to compile into a full
|
||||
|
@ -154,6 +167,7 @@ func ParseTemplate(data []byte, vars map[string]string) (t *Template, err error)
|
|||
t.Variables = make(map[string]RawVariable)
|
||||
t.Builders = make(map[string]RawBuilderConfig)
|
||||
t.Hooks = rawTpl.Hooks
|
||||
t.Push = &rawTpl.Push
|
||||
t.PostProcessors = make([][]RawPostProcessorConfig, len(rawTpl.PostProcessors))
|
||||
t.Provisioners = make([]RawProvisionerConfig, len(rawTpl.Provisioners))
|
||||
|
||||
|
|
|
@ -541,6 +541,41 @@ func TestParseTemplate_ProvisionerPauseBefore(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParseTemplateFile_push(t *testing.T) {
|
||||
data := `
|
||||
{
|
||||
"builders": [{"type": "something"}],
|
||||
|
||||
"push": {
|
||||
"name": "hello",
|
||||
"include": ["one"],
|
||||
"exclude": ["two"]
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
tf, err := ioutil.TempFile("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
tf.Write([]byte(data))
|
||||
tf.Close()
|
||||
|
||||
result, err := ParseTemplateFile(tf.Name(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
expected := &PushConfig{
|
||||
Name: "hello",
|
||||
Include: []string{"one"},
|
||||
Exclude: []string{"two"},
|
||||
}
|
||||
if !reflect.DeepEqual(result.Push, expected) {
|
||||
t.Fatalf("bad: %#v", result.Push)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTemplate_Variables(t *testing.T) {
|
||||
data := `
|
||||
{
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/packer/packer/plugin"
|
||||
"github.com/mitchellh/packer/post-processor/atlas"
|
||||
)
|
||||
|
||||
func main() {
|
||||
server, err := plugin.Server()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
server.RegisterPostProcessor(new(atlas.PostProcessor))
|
||||
server.Serve()
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
package main
|
|
@ -0,0 +1,37 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const BuilderId = "packer.post-processor.atlas"
|
||||
|
||||
type Artifact struct {
|
||||
Name string
|
||||
Type string
|
||||
Version int
|
||||
}
|
||||
|
||||
func (*Artifact) BuilderId() string {
|
||||
return BuilderId
|
||||
}
|
||||
|
||||
func (a *Artifact) Files() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) Id() string {
|
||||
return fmt.Sprintf("%s/%s/%d", a.Name, a.Type, a.Version)
|
||||
}
|
||||
|
||||
func (a *Artifact) String() string {
|
||||
return fmt.Sprintf("%s/%s (v%d)", a.Name, a.Type, a.Version)
|
||||
}
|
||||
|
||||
func (*Artifact) State(name string) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) Destroy() error {
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,258 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/atlas-go/archive"
|
||||
"github.com/hashicorp/atlas-go/v1"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/mitchellh/packer/common"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
const BuildEnvKey = "ATLAS_BUILD_ID"
|
||||
|
||||
// Artifacts can return a string for this state key and the post-processor
|
||||
// will use automatically use this as the type. The user's value overrides
|
||||
// this if `artifact_type_override` is set to true.
|
||||
const ArtifactStateType = "atlas.artifact.type"
|
||||
|
||||
// Artifacts can return a map[string]string for this state key and this
|
||||
// post-processor will automatically merge it into the metadata for any
|
||||
// uploaded artifact versions.
|
||||
const ArtifactStateMetadata = "atlas.artifact.metadata"
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
|
||||
Artifact string
|
||||
Type string `mapstructure:"artifact_type"`
|
||||
TypeOverride bool `mapstructure:"artifact_type_override"`
|
||||
Metadata map[string]string
|
||||
|
||||
ServerAddr string `mapstructure:"server_address"`
|
||||
Token string
|
||||
|
||||
// This shouldn't ever be set outside of unit tests.
|
||||
Test bool `mapstructure:"test"`
|
||||
|
||||
tpl *packer.ConfigTemplate
|
||||
user, name string
|
||||
buildId int
|
||||
}
|
||||
|
||||
type PostProcessor struct {
|
||||
config Config
|
||||
client *atlas.Client
|
||||
}
|
||||
|
||||
func (p *PostProcessor) Configure(raws ...interface{}) error {
|
||||
_, err := common.DecodeConfig(&p.config, raws...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.config.tpl, err = packer.NewConfigTemplate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.config.tpl.UserVars = p.config.PackerUserVars
|
||||
|
||||
templates := map[string]*string{
|
||||
"artifact": &p.config.Artifact,
|
||||
"type": &p.config.Type,
|
||||
"server_address": &p.config.ServerAddr,
|
||||
"token": &p.config.Token,
|
||||
}
|
||||
|
||||
errs := new(packer.MultiError)
|
||||
for key, ptr := range templates {
|
||||
*ptr, err = p.config.tpl.Process(*ptr, nil)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error processing %s: %s", key, err))
|
||||
}
|
||||
}
|
||||
|
||||
required := map[string]*string{
|
||||
"artifact": &p.config.Artifact,
|
||||
"artifact_type": &p.config.Type,
|
||||
}
|
||||
|
||||
for key, ptr := range required {
|
||||
if *ptr == "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("%s must be set", key))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs.Errors) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
p.config.user, p.config.name, err = atlas.ParseSlug(p.config.Artifact)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we have a build ID, save it
|
||||
if v := os.Getenv(BuildEnvKey); v != "" {
|
||||
raw, err := strconv.ParseInt(v, 0, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error parsing build ID: %s", err)
|
||||
}
|
||||
|
||||
p.config.buildId = int(raw)
|
||||
}
|
||||
|
||||
// Build the client
|
||||
p.client = atlas.DefaultClient()
|
||||
if p.config.ServerAddr != "" {
|
||||
p.client, err = atlas.NewClient(p.config.ServerAddr)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error initializing client: %s", err))
|
||||
return errs
|
||||
}
|
||||
}
|
||||
if p.config.Token != "" {
|
||||
p.client.Token = p.config.Token
|
||||
}
|
||||
|
||||
if !p.config.Test {
|
||||
// Verify the client
|
||||
if err := p.client.Verify(); err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error initializing client: %s", err))
|
||||
return errs
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
|
||||
if _, err := p.client.Artifact(p.config.user, p.config.name); err != nil {
|
||||
if err != atlas.ErrNotFound {
|
||||
return nil, false, fmt.Errorf(
|
||||
"Error finding artifact: %s", err)
|
||||
}
|
||||
|
||||
// Artifact doesn't exist, create it
|
||||
ui.Message(fmt.Sprintf("Creating artifact: %s", p.config.Artifact))
|
||||
_, err = p.client.CreateArtifact(p.config.user, p.config.name)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf(
|
||||
"Error creating artifact: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
opts := &atlas.UploadArtifactOpts{
|
||||
User: p.config.user,
|
||||
Name: p.config.name,
|
||||
Type: p.config.Type,
|
||||
ID: artifact.Id(),
|
||||
Metadata: p.metadata(artifact),
|
||||
BuildID: p.config.buildId,
|
||||
}
|
||||
|
||||
if fs := artifact.Files(); len(fs) > 0 {
|
||||
var archiveOpts archive.ArchiveOpts
|
||||
|
||||
// We have files. We want to compress/upload them. If we have just
|
||||
// one file, then we use it as-is. Otherwise, we compress all of
|
||||
// them into a single file.
|
||||
var path string
|
||||
if len(fs) == 1 {
|
||||
path = fs[0]
|
||||
} else {
|
||||
path = longestCommonPrefix(fs)
|
||||
if path == "" {
|
||||
return nil, false, fmt.Errorf(
|
||||
"No common prefix for achiving files: %v", fs)
|
||||
}
|
||||
|
||||
// Modify the archive options to only include the files
|
||||
// that are in our file list.
|
||||
include := make([]string, 0, len(fs))
|
||||
for i, f := range fs {
|
||||
include[i] = strings.Replace(f, path, "", 1)
|
||||
}
|
||||
archiveOpts.Include = include
|
||||
}
|
||||
|
||||
r, err := archive.CreateArchive(path, &archiveOpts)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf(
|
||||
"Error archiving artifact: %s", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
opts.File = r
|
||||
opts.FileSize = r.Size
|
||||
}
|
||||
|
||||
ui.Message("Uploading artifact version...")
|
||||
var av *atlas.ArtifactVersion
|
||||
doneCh := make(chan struct{})
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
var err error
|
||||
av, err = p.client.UploadArtifact(opts)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return nil, false, fmt.Errorf("Error uploading: %s", err)
|
||||
case <-doneCh:
|
||||
}
|
||||
|
||||
return &Artifact{
|
||||
Name: p.config.Artifact,
|
||||
Type: p.config.Type,
|
||||
Version: av.Version,
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
func (p *PostProcessor) metadata(artifact packer.Artifact) map[string]string {
|
||||
var metadata map[string]string
|
||||
metadataRaw := artifact.State(ArtifactStateMetadata)
|
||||
if metadataRaw != nil {
|
||||
if err := mapstructure.Decode(metadataRaw, &metadata); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
if p.config.Metadata != nil {
|
||||
// If we have no extra metadata, just return as-is
|
||||
if metadata == nil {
|
||||
return p.config.Metadata
|
||||
}
|
||||
|
||||
// Merge the metadata
|
||||
for k, v := range p.config.Metadata {
|
||||
metadata[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return metadata
|
||||
}
|
||||
|
||||
func (p *PostProcessor) artifactType(artifact packer.Artifact) string {
|
||||
if !p.config.TypeOverride {
|
||||
if v := artifact.State(ArtifactStateType); v != nil {
|
||||
return v.(string)
|
||||
}
|
||||
}
|
||||
|
||||
return p.config.Type
|
||||
}
|
|
@ -0,0 +1,136 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
func TestPostProcessorConfigure(t *testing.T) {
|
||||
var p PostProcessor
|
||||
if err := p.Configure(validDefaults()); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if p.client == nil {
|
||||
t.Fatal("should have client")
|
||||
}
|
||||
if p.client.Token != "" {
|
||||
t.Fatal("should not have token")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorConfigure_buildId(t *testing.T) {
|
||||
defer os.Setenv(BuildEnvKey, os.Getenv(BuildEnvKey))
|
||||
os.Setenv(BuildEnvKey, "5")
|
||||
|
||||
var p PostProcessor
|
||||
if err := p.Configure(validDefaults()); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if p.config.buildId != 5 {
|
||||
t.Fatalf("bad: %#v", p.config.buildId)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorMetadata(t *testing.T) {
|
||||
var p PostProcessor
|
||||
if err := p.Configure(validDefaults()); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
artifact := new(packer.MockArtifact)
|
||||
metadata := p.metadata(artifact)
|
||||
if len(metadata) > 0 {
|
||||
t.Fatalf("bad: %#v", metadata)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorMetadata_artifact(t *testing.T) {
|
||||
config := validDefaults()
|
||||
config["metadata"] = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
|
||||
var p PostProcessor
|
||||
if err := p.Configure(config); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
artifact := new(packer.MockArtifact)
|
||||
artifact.StateValues = map[string]interface{}{
|
||||
ArtifactStateMetadata: map[interface{}]interface{}{
|
||||
"bar": "baz",
|
||||
},
|
||||
}
|
||||
|
||||
metadata := p.metadata(artifact)
|
||||
expected := map[string]string{
|
||||
"foo": "bar",
|
||||
"bar": "baz",
|
||||
}
|
||||
if !reflect.DeepEqual(metadata, expected) {
|
||||
t.Fatalf("bad: %#v", metadata)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorMetadata_config(t *testing.T) {
|
||||
config := validDefaults()
|
||||
config["metadata"] = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
|
||||
var p PostProcessor
|
||||
if err := p.Configure(config); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
artifact := new(packer.MockArtifact)
|
||||
metadata := p.metadata(artifact)
|
||||
expected := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
if !reflect.DeepEqual(metadata, expected) {
|
||||
t.Fatalf("bad: %#v", metadata)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorType(t *testing.T) {
|
||||
var p PostProcessor
|
||||
if err := p.Configure(validDefaults()); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
artifact := new(packer.MockArtifact)
|
||||
actual := p.artifactType(artifact)
|
||||
if actual != "foo" {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorType_artifact(t *testing.T) {
|
||||
var p PostProcessor
|
||||
if err := p.Configure(validDefaults()); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
artifact := new(packer.MockArtifact)
|
||||
artifact.StateValues = map[string]interface{}{
|
||||
ArtifactStateType: "bar",
|
||||
}
|
||||
actual := p.artifactType(artifact)
|
||||
if actual != "bar" {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func validDefaults() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"artifact": "mitchellh/test",
|
||||
"artifact_type": "foo",
|
||||
"test": true,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// longestCommonPrefix finds the longest common prefix for all the strings
|
||||
// given as an argument, or returns the empty string if a prefix can't be
|
||||
// found.
|
||||
//
|
||||
// This function just uses brute force instead of a more optimized algorithm.
|
||||
func longestCommonPrefix(vs []string) string {
|
||||
var length int64
|
||||
// Find the shortest string
|
||||
var shortest string
|
||||
length = math.MaxUint32
|
||||
for _, v := range vs {
|
||||
if int64(len(v)) < length {
|
||||
shortest = v
|
||||
length = int64(len(v))
|
||||
}
|
||||
}
|
||||
|
||||
// Now go through and find a prefix to all the strings using this
|
||||
// short string, which itself must contain the prefix.
|
||||
for i := len(shortest); i > 0; i-- {
|
||||
// We only care about prefixes with path seps
|
||||
if shortest[i-1] != '/' {
|
||||
continue
|
||||
}
|
||||
|
||||
bad := false
|
||||
prefix := shortest[0 : i]
|
||||
for _, v := range vs {
|
||||
if !strings.HasPrefix(v, prefix) {
|
||||
bad = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !bad {
|
||||
return prefix
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLongestCommonPrefix(t *testing.T) {
|
||||
cases := []struct {
|
||||
Input []string
|
||||
Output string
|
||||
}{
|
||||
{
|
||||
[]string{"foo", "bar"},
|
||||
"",
|
||||
},
|
||||
{
|
||||
[]string{"foo", "foobar"},
|
||||
"",
|
||||
},
|
||||
{
|
||||
[]string{"foo/", "foo/bar"},
|
||||
"foo/",
|
||||
},
|
||||
{
|
||||
[]string{"/foo/", "/bar"},
|
||||
"/",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
actual := longestCommonPrefix(tc.Input)
|
||||
if actual != tc.Output {
|
||||
t.Fatalf("bad: %#v\n\n%#v", actual, tc.Input)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -25,7 +25,7 @@ func (s *stepCreateProvider) Run(state multistep.StateBag) multistep.StepAction
|
|||
providerName := state.Get("providerName").(string)
|
||||
downloadUrl := state.Get("boxDownloadUrl").(string)
|
||||
|
||||
path := fmt.Sprintf("box/%s/version/%v/providers", box.Tag, version.Number)
|
||||
path := fmt.Sprintf("box/%s/version/%v/providers", box.Tag, version.Version)
|
||||
|
||||
provider := &Provider{Name: providerName}
|
||||
|
||||
|
@ -86,7 +86,7 @@ func (s *stepCreateProvider) Cleanup(state multistep.StateBag) {
|
|||
ui.Say("Cleaning up provider")
|
||||
ui.Message(fmt.Sprintf("Deleting provider: %s", s.name))
|
||||
|
||||
path := fmt.Sprintf("box/%s/version/%v/provider/%s", box.Tag, version.Number, s.name)
|
||||
path := fmt.Sprintf("box/%s/version/%v/provider/%s", box.Tag, version.Version, s.name)
|
||||
|
||||
// No need for resp from the cleanup DELETE
|
||||
_, err := client.Delete(path)
|
||||
|
|
|
@ -9,11 +9,9 @@ import (
|
|||
type Version struct {
|
||||
Version string `json:"version"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Number uint `json:"number,omitempty"`
|
||||
}
|
||||
|
||||
type stepCreateVersion struct {
|
||||
number uint // number of the version, if needed in cleanup
|
||||
}
|
||||
|
||||
func (s *stepCreateVersion) Run(state multistep.StateBag) multistep.StepAction {
|
||||
|
@ -52,9 +50,6 @@ func (s *stepCreateVersion) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Save the number for cleanup
|
||||
s.number = version.Number
|
||||
|
||||
state.Put("version", version)
|
||||
|
||||
return multistep.ActionContinue
|
||||
|
@ -63,15 +58,8 @@ func (s *stepCreateVersion) Run(state multistep.StateBag) multistep.StepAction {
|
|||
func (s *stepCreateVersion) Cleanup(state multistep.StateBag) {
|
||||
client := state.Get("client").(*VagrantCloudClient)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
config := state.Get("config").(Config)
|
||||
box := state.Get("box").(*Box)
|
||||
|
||||
// If we didn't save the version number, it likely doesn't exist or
|
||||
// already existed
|
||||
if s.number == 0 {
|
||||
ui.Message("Version was not created or previously existed, not deleting")
|
||||
return
|
||||
}
|
||||
version := state.Get("version").(*Version)
|
||||
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
|
@ -82,10 +70,10 @@ func (s *stepCreateVersion) Cleanup(state multistep.StateBag) {
|
|||
return
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("box/%s/version/%v", box.Tag, s.number)
|
||||
path := fmt.Sprintf("box/%s/version/%v", box.Tag, version.Version)
|
||||
|
||||
ui.Say("Cleaning up version")
|
||||
ui.Message(fmt.Sprintf("Deleting version: %s", config.Version))
|
||||
ui.Message(fmt.Sprintf("Deleting version: %s", version.Version))
|
||||
|
||||
// No need for resp from the cleanup DELETE
|
||||
_, err := client.Delete(path)
|
||||
|
|
|
@ -22,7 +22,7 @@ func (s *stepPrepareUpload) Run(state multistep.StateBag) multistep.StepAction {
|
|||
provider := state.Get("provider").(*Provider)
|
||||
artifactFilePath := state.Get("artifactFilePath").(string)
|
||||
|
||||
path := fmt.Sprintf("box/%s/version/%v/provider/%s/upload", box.Tag, version.Number, provider.Name)
|
||||
path := fmt.Sprintf("box/%s/version/%v/provider/%s/upload", box.Tag, version.Version, provider.Name)
|
||||
upload := &Upload{}
|
||||
|
||||
ui.Say(fmt.Sprintf("Preparing upload of box: %s", artifactFilePath))
|
||||
|
|
|
@ -24,7 +24,7 @@ func (s *stepReleaseVersion) Run(state multistep.StateBag) multistep.StepAction
|
|||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("box/%s/version/%v/release", box.Tag, version.Number)
|
||||
path := fmt.Sprintf("box/%s/version/%v/release", box.Tag, version.Version)
|
||||
|
||||
resp, err := client.Put(path)
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ func (s *stepVerifyUpload) Run(state multistep.StateBag) multistep.StepAction {
|
|||
upload := state.Get("upload").(*Upload)
|
||||
provider := state.Get("provider").(*Provider)
|
||||
|
||||
path := fmt.Sprintf("box/%s/version/%v/provider/%s", box.Tag, version.Number, provider.Name)
|
||||
path := fmt.Sprintf("box/%s/version/%v/provider/%s", box.Tag, version.Version, provider.Name)
|
||||
|
||||
providerCheck := &Provider{}
|
||||
|
||||
|
|
|
@ -113,14 +113,8 @@ func (p *PostProcessor) PostProcessProvider(name string, provider Provider, ui p
|
|||
// Write our Vagrantfile
|
||||
var customVagrantfile string
|
||||
if config.VagrantfileTemplate != "" {
|
||||
vagrantfilePath, err := config.tpl.Process(config.VagrantfileTemplate, nil)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf(
|
||||
"Using custom Vagrantfile: %s", vagrantfilePath))
|
||||
customBytes, err := ioutil.ReadFile(vagrantfilePath)
|
||||
ui.Message(fmt.Sprintf("Using custom Vagrantfile: %s", config.VagrantfileTemplate))
|
||||
customBytes, err := ioutil.ReadFile(config.VagrantfileTemplate)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
@ -200,11 +194,29 @@ func (p *PostProcessor) configureSingle(config *Config, raws ...interface{}) err
|
|||
// Accumulate any errors
|
||||
errs := common.CheckUnusedConfig(md)
|
||||
|
||||
templates := map[string]*string{
|
||||
"vagrantfile_template": &config.VagrantfileTemplate,
|
||||
}
|
||||
|
||||
for key, ptr := range templates {
|
||||
*ptr, err = config.tpl.Process(*ptr, nil)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing %s: %s", key, err))
|
||||
}
|
||||
}
|
||||
|
||||
validates := map[string]*string{
|
||||
"output": &config.OutputPath,
|
||||
"vagrantfile_template": &config.VagrantfileTemplate,
|
||||
}
|
||||
|
||||
if config.VagrantfileTemplate != "" {
|
||||
_, err := os.Stat(config.VagrantfileTemplate)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(errs, fmt.Errorf("vagrantfile_template '%s' does not exist", config.VagrantfileTemplate))
|
||||
}
|
||||
}
|
||||
|
||||
for n, ptr := range validates {
|
||||
if err := config.tpl.Validate(*ptr); err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"bytes"
|
||||
"compress/flate"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
@ -82,16 +84,22 @@ func TestPostProcessorPrepare_outputPath(t *testing.T) {
|
|||
func TestPostProcessorPrepare_subConfigs(t *testing.T) {
|
||||
var p PostProcessor
|
||||
|
||||
f, err := ioutil.TempFile("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
// Default
|
||||
c := testConfig()
|
||||
c["compression_level"] = 42
|
||||
c["vagrantfile_template"] = "foo"
|
||||
c["vagrantfile_template"] = f.Name()
|
||||
c["override"] = map[string]interface{}{
|
||||
"aws": map[string]interface{}{
|
||||
"compression_level": 7,
|
||||
},
|
||||
}
|
||||
err := p.Configure(c)
|
||||
err = p.Configure(c)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -100,7 +108,7 @@ func TestPostProcessorPrepare_subConfigs(t *testing.T) {
|
|||
t.Fatalf("bad: %#v", p.configs[""].CompressionLevel)
|
||||
}
|
||||
|
||||
if p.configs[""].VagrantfileTemplate != "foo" {
|
||||
if p.configs[""].VagrantfileTemplate != f.Name() {
|
||||
t.Fatalf("bad: %#v", p.configs[""].VagrantfileTemplate)
|
||||
}
|
||||
|
||||
|
@ -108,11 +116,30 @@ func TestPostProcessorPrepare_subConfigs(t *testing.T) {
|
|||
t.Fatalf("bad: %#v", p.configs["aws"].CompressionLevel)
|
||||
}
|
||||
|
||||
if p.configs["aws"].VagrantfileTemplate != "foo" {
|
||||
if p.configs["aws"].VagrantfileTemplate != f.Name() {
|
||||
t.Fatalf("bad: %#v", p.configs["aws"].VagrantfileTemplate)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorPrepare_vagrantfileTemplateExists(t *testing.T) {
|
||||
var p PostProcessor
|
||||
|
||||
f, err := ioutil.TempFile("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
c := testConfig()
|
||||
c["vagrantfile_template"] = f.Name()
|
||||
|
||||
os.Remove(f.Name())
|
||||
|
||||
err = p.Configure(c)
|
||||
if err == nil {
|
||||
t.Fatal("expected an error since vagrantfile_template does not exist")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorPostProcess_badId(t *testing.T) {
|
||||
artifact := &packer.MockArtifact{
|
||||
BuilderIdValue: "invalid.packer",
|
||||
|
@ -124,6 +151,41 @@ func TestPostProcessorPostProcess_badId(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorPostProcess_vagrantfileUserVariable(t *testing.T) {
|
||||
var p PostProcessor
|
||||
|
||||
f, err := ioutil.TempFile("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
c := map[string]interface{}{
|
||||
"packer_user_variables": map[string]string{
|
||||
"foo": f.Name(),
|
||||
},
|
||||
|
||||
"vagrantfile_template": "{{user `foo`}}",
|
||||
}
|
||||
err = p.Configure(c)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
a := &packer.MockArtifact{
|
||||
BuilderIdValue: "packer.parallels",
|
||||
}
|
||||
a2, _, err := p.PostProcess(testUi(), a)
|
||||
if a2 != nil {
|
||||
for _, fn := range a2.Files() {
|
||||
defer os.Remove(fn)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProviderForName(t *testing.T) {
|
||||
if v, ok := providerForName("virtualbox").(*VBoxProvider); !ok {
|
||||
t.Fatalf("bad: %#v", v)
|
||||
|
|
|
@ -22,6 +22,7 @@ type Config struct {
|
|||
common.PackerConfig `mapstructure:",squash"`
|
||||
|
||||
ChefEnvironment string `mapstructure:"chef_environment"`
|
||||
SslVerifyMode string `mapstructure:"ssl_verify_mode"`
|
||||
ConfigTemplate string `mapstructure:"config_template"`
|
||||
ExecuteCommand string `mapstructure:"execute_command"`
|
||||
InstallCommand string `mapstructure:"install_command"`
|
||||
|
@ -50,6 +51,7 @@ type ConfigTemplate struct {
|
|||
ValidationKeyPath string
|
||||
ValidationClientName string
|
||||
ChefEnvironment string
|
||||
SslVerifyMode string
|
||||
}
|
||||
|
||||
type ExecuteTemplate struct {
|
||||
|
@ -79,6 +81,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error {
|
|||
|
||||
templates := map[string]*string{
|
||||
"chef_environment": &p.config.ChefEnvironment,
|
||||
"ssl_verify_mode": &p.config.SslVerifyMode,
|
||||
"config_template": &p.config.ConfigTemplate,
|
||||
"node_name": &p.config.NodeName,
|
||||
"staging_dir": &p.config.StagingDir,
|
||||
|
@ -214,7 +217,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
|
|||
}
|
||||
|
||||
configPath, err := p.createConfig(
|
||||
ui, comm, nodeName, serverUrl, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment)
|
||||
ui, comm, nodeName, serverUrl, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment, p.config.SslVerifyMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating Chef config file: %s", err)
|
||||
}
|
||||
|
@ -268,7 +271,7 @@ func (p *Provisioner) uploadDirectory(ui packer.Ui, comm packer.Communicator, ds
|
|||
return comm.UploadDir(dst, src, nil)
|
||||
}
|
||||
|
||||
func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, remoteKeyPath string, validationClientName string, chefEnvironment string) (string, error) {
|
||||
func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, remoteKeyPath string, validationClientName string, chefEnvironment string, sslVerifyMode string) (string, error) {
|
||||
ui.Message("Creating configuration file 'client.rb'")
|
||||
|
||||
// Read the template
|
||||
|
@ -294,6 +297,7 @@ func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeN
|
|||
ValidationKeyPath: remoteKeyPath,
|
||||
ValidationClientName: validationClientName,
|
||||
ChefEnvironment: chefEnvironment,
|
||||
SslVerifyMode: sslVerifyMode,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -574,4 +578,7 @@ node_name "{{.NodeName}}"
|
|||
{{if ne .ChefEnvironment ""}}
|
||||
environment "{{.ChefEnvironment}}"
|
||||
{{end}}
|
||||
{{if ne .SslVerifyMode ""}}
|
||||
ssl_verify_mode :{{.SslVerifyMode}}
|
||||
{{end}}
|
||||
`
|
||||
|
|
|
@ -176,13 +176,10 @@ func (p *Provisioner) Prepare(raws ...interface{}) error {
|
|||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("A manifest_file must be specified."))
|
||||
} else {
|
||||
info, err := os.Stat(p.config.ManifestFile)
|
||||
_, err := os.Stat(p.config.ManifestFile)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("manifest_file is invalid: %s", err))
|
||||
} else if info.IsDir() {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("manifest_file must point to a file"))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -114,7 +114,14 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
|
|||
ui.Say("Provisioning with Salt...")
|
||||
if !p.config.SkipBootstrap {
|
||||
cmd := &packer.RemoteCmd{
|
||||
Command: fmt.Sprintf("wget -O - https://bootstrap.saltstack.com | sudo sh -s %s", p.config.BootstrapArgs),
|
||||
Command: fmt.Sprintf("curl -L https://bootstrap.saltstack.com -o /tmp/install_salt.sh"),
|
||||
}
|
||||
ui.Message(fmt.Sprintf("Downloading saltstack bootstrap to /tmp/install_salt.sh"))
|
||||
if err = cmd.StartWithUi(comm, ui); err != nil {
|
||||
return fmt.Errorf("Unable to download Salt: %d", err)
|
||||
}
|
||||
cmd = &packer.RemoteCmd{
|
||||
Command: fmt.Sprintf("sudo sh /tmp/install_salt.sh %s", p.config.BootstrapArgs),
|
||||
}
|
||||
ui.Message(fmt.Sprintf("Installing Salt with command %s", cmd))
|
||||
if err = cmd.StartWithUi(comm, ui); err != nil {
|
||||
|
@ -174,7 +181,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
|
|||
}
|
||||
|
||||
ui.Message("Running highstate")
|
||||
cmd := &packer.RemoteCmd{Command: "sudo salt-call --local state.highstate -l info"}
|
||||
cmd := &packer.RemoteCmd{Command: "sudo salt-call --local state.highstate -l info --retcode-passthrough"}
|
||||
if err = cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 {
|
||||
if err == nil {
|
||||
err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus)
|
||||
|
|
|
@ -17,20 +17,20 @@ load test_helper
|
|||
[[ "$output" == *"Packer v"* ]]
|
||||
|
||||
run packer -v
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Packer v"* ]]
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ ([0-9]+\.[0-9]+) ]]
|
||||
|
||||
run packer --version
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Packer v"* ]]
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ ([0-9]+\.[0-9]+) ]]
|
||||
}
|
||||
|
||||
@test "cli: packer version show help" {
|
||||
run packer version -h
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"usage: packer version"* ]]
|
||||
[[ "$output" == *"Packer v"* ]]
|
||||
|
||||
run packer version --help
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"usage: packer version"* ]]
|
||||
[[ "$output" == *"Packer v"* ]]
|
||||
}
|
||||
|
|
|
@ -4,9 +4,9 @@ package main
|
|||
var GitCommit string
|
||||
|
||||
// The main version number that is being run at the moment.
|
||||
const Version = "0.8.0"
|
||||
const Version = "0.7.5"
|
||||
|
||||
// A pre-release marker for the version. If this is "" (empty string)
|
||||
// then it means that it is a final release. Otherwise, this is a pre-release
|
||||
// such as "dev" (in development), "beta", "rc1", etc.
|
||||
const VersionPrerelease = "dev"
|
||||
const VersionPrerelease = ""
|
||||
|
|
|
@ -30,7 +30,7 @@ in your account, it is up to you to use, delete, etc. the AMI.
|
|||
## How Does it Work?
|
||||
|
||||
This builder works by creating a new EBS volume from an existing source AMI
|
||||
and attaching it into an already-running EC2 instance. One attached, a
|
||||
and attaching it into an already-running EC2 instance. Once attached, a
|
||||
[chroot](http://en.wikipedia.org/wiki/Chroot) is used to provision the
|
||||
system within that volume. After provisioning, the volume is detached,
|
||||
snapshotted, and an AMI is made.
|
||||
|
@ -54,8 +54,8 @@ each category, the available configuration keys are alphabetized.
|
|||
### Required:
|
||||
|
||||
* `access_key` (string) - The access key used to communicate with AWS.
|
||||
If not specified, Packer will use the environment variables
|
||||
`AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set.
|
||||
If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file
|
||||
or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set.
|
||||
If the environmental variables aren't set and Packer is running on
|
||||
an EC2 instance, Packer will check the instance metadata for IAM role
|
||||
keys.
|
||||
|
@ -66,8 +66,8 @@ each category, the available configuration keys are alphabetized.
|
|||
[configuration templates](/docs/templates/configuration-templates.html) for more info)
|
||||
|
||||
* `secret_key` (string) - The secret key used to communicate with AWS.
|
||||
If not specified, Packer will use the environment variables
|
||||
`AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set.
|
||||
If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file
|
||||
or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set.
|
||||
If the environmental variables aren't set and Packer is running on
|
||||
an EC2 instance, Packer will check the instance metadata for IAM role
|
||||
keys.
|
||||
|
|
|
@ -34,8 +34,8 @@ each category, the available configuration keys are alphabetized.
|
|||
### Required:
|
||||
|
||||
* `access_key` (string) - The access key used to communicate with AWS.
|
||||
If not specified, Packer will use the environment variables
|
||||
`AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set.
|
||||
If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file
|
||||
or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set.
|
||||
|
||||
* `ami_name` (string) - The name of the resulting AMI that will appear
|
||||
when managing AMIs in the AWS console or via APIs. This must be unique.
|
||||
|
@ -49,8 +49,8 @@ each category, the available configuration keys are alphabetized.
|
|||
to launch the EC2 instance to create the AMI.
|
||||
|
||||
* `secret_key` (string) - The secret key used to communicate with AWS.
|
||||
If not specified, Packer will use the environment variables
|
||||
`AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set.
|
||||
If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file
|
||||
or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set.
|
||||
|
||||
* `source_ami` (string) - The initial AMI used as a base for the newly
|
||||
created machine.
|
||||
|
@ -118,11 +118,11 @@ each category, the available configuration keys are alphabetized.
|
|||
described above. Note that if this is specified, you must omit the
|
||||
`security_group_id`.
|
||||
|
||||
* `spot_price` (string) - The maximum hourly price to launch a spot instance
|
||||
to create the AMI. It is a type of instances that EC2 starts when the maximum
|
||||
price that you specify exceeds the current spot price. Spot price will be
|
||||
updated based on available spot instance capacity and current spot Instance
|
||||
requests. It may save you some costs. You can set this to "auto" for
|
||||
* `spot_price` (string) - The maximum hourly price to pay for a spot instance
|
||||
to create the AMI. Spot instances are a type of instance that EC2 starts when
|
||||
the current spot price is less than the maximum price you specify. Spot price
|
||||
will be updated based on available spot instance capacity and current spot
|
||||
instance requests. It may save you some costs. You can set this to "auto" for
|
||||
Packer to automatically discover the best spot price.
|
||||
|
||||
* `spot_price_auto_product` (string) - Required if `spot_price` is set to
|
||||
|
|
|
@ -39,8 +39,8 @@ each category, the available configuration keys are alphabetized.
|
|||
### Required:
|
||||
|
||||
* `access_key` (string) - The access key used to communicate with AWS.
|
||||
If not specified, Packer will use the environment variables
|
||||
`AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set.
|
||||
If not specified, Packer will use the key from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file
|
||||
or fall back to environment variables `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` (in that order), if set.
|
||||
|
||||
* `account_id` (string) - Your AWS account ID. This is required for bundling
|
||||
the AMI. This is _not the same_ as the access key. You can find your
|
||||
|
@ -61,8 +61,8 @@ each category, the available configuration keys are alphabetized.
|
|||
This bucket will be created if it doesn't exist.
|
||||
|
||||
* `secret_key` (string) - The secret key used to communicate with AWS.
|
||||
If not specified, Packer will use the environment variables
|
||||
`AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set.
|
||||
If not specified, Packer will use the secret from any [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file
|
||||
or fall back to environment variables `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY` (in that order), if set.
|
||||
|
||||
* `source_ami` (string) - The initial AMI used as a base for the newly
|
||||
created machine.
|
||||
|
|
|
@ -33,7 +33,7 @@ much easier to use and Amazon generally recommends EBS-backed images nowadays.
|
|||
|
||||
## Using an IAM Instance Profile
|
||||
|
||||
If AWS keys are not specified in the template or through environment variables
|
||||
If AWS keys are not specified in the template, a [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file or through environment variables
|
||||
Packer will use credentials provided by the instance's IAM profile, if it has one.
|
||||
|
||||
The following policy document provides the minimal set permissions necessary for Packer to work:
|
||||
|
|
|
@ -66,8 +66,8 @@ each category, the available configuration keys are alphabetized.
|
|||
|
||||
* `region` (string) - The name (or slug) of the region to launch the droplet in.
|
||||
Consequently, this is the region where the snapshot will be available.
|
||||
This defaults to "nyc1", which is the slug for "New York 1".
|
||||
See https://developers.digitalocean.com/regions/ for the accepted region names/slugs.
|
||||
This defaults to "nyc3", which is the slug for "New York 3".
|
||||
See https://developers.digitalocean.com/v2/#regions for the accepted region names/slugs.
|
||||
|
||||
* `region_id` (integer) - The ID of the region to launch the droplet in. Consequently,
|
||||
this is the region where the snapshot will be available.
|
||||
|
@ -75,7 +75,7 @@ each category, the available configuration keys are alphabetized.
|
|||
|
||||
* `size` (string) - The name (or slug) of the droplet size to use.
|
||||
This defaults to "512mb", which is the slug for "512MB".
|
||||
See https://developers.digitalocean.com/sizes/ for the accepted size names/slugs.
|
||||
See https://developers.digitalocean.com/#sizes for the accepted size names/slugs.
|
||||
|
||||
* `size_id` (integer) - The ID of the droplet size to use.
|
||||
This setting is deprecated. Use `size` instead.
|
||||
|
|
|
@ -73,7 +73,6 @@ existing GCE image. The account file is obtained in the previous section.
|
|||
```javascript
|
||||
{
|
||||
"type": "googlecompute",
|
||||
"bucket_name": "my-project-packer-images",
|
||||
"account_file": "account.json",
|
||||
"project_id": "my-project",
|
||||
"source_image": "debian-7-wheezy-v20140718",
|
||||
|
@ -88,9 +87,6 @@ each category, the available options are alphabetized and described.
|
|||
|
||||
### Required:
|
||||
|
||||
* `bucket_name` (string) - The Google Cloud Storage bucket to store the
|
||||
images that are created. The bucket must already exist in your project
|
||||
|
||||
* `project_id` (string) - The project ID that will be used to launch instances
|
||||
and store images.
|
||||
|
||||
|
|
|
@ -38,11 +38,6 @@ each category, the available configuration keys are alphabetized.
|
|||
If not specified, Packer will use the environment variables
|
||||
`SDK_PASSWORD` or `OS_PASSWORD` (in that order), if set.
|
||||
|
||||
* `provider` (string) - The provider used to connect to the OpenStack service.
|
||||
If not specified, Packer will use the environment variable
|
||||
`SDK_PROVIDER`, if set.
|
||||
For Rackspace this should be `rackspace-us` or `rackspace-uk`.
|
||||
|
||||
* `source_image` (string) - The ID or full URL to the base image to use.
|
||||
This is the image that will be used to launch a new server and provision it.
|
||||
|
||||
|
@ -70,18 +65,19 @@ each category, the available configuration keys are alphabetized.
|
|||
* `networks` (array of strings) - A list of networks by UUID to attach
|
||||
to this instance.
|
||||
|
||||
* `openstack_provider` (string)
|
||||
* `openstack_provider` (string) - A name of a provider that has a slightly
|
||||
different API model. Currently supported values are "openstack" (default),
|
||||
and "rackspace".
|
||||
|
||||
* `project` (string) - The project name to boot the instance into. Some
|
||||
OpenStack installations require this.
|
||||
If not specified, Packer will use the environment variables
|
||||
`SDK_PROJECT` or `OS_TENANT_NAME` (in that order), if set.
|
||||
|
||||
* `provider` (string) - A name of a provider that has a slightly
|
||||
different API model. Currently supported values are "openstack" (default),
|
||||
and "rackspace".
|
||||
If not specified, Packer will use the environment variables
|
||||
`SDK_PROVIDER` or `OS_AUTH_URL` (in that order), if set.
|
||||
* `provider` (string) - The provider used to connect to the OpenStack service.
|
||||
If not specified, Packer will use the environment variables `SDK_PROVIDER`
|
||||
or `OS_AUTH_URL` (in that order), if set.
|
||||
For Rackspace this should be `rackspace-us` or `rackspace-uk`.
|
||||
|
||||
* `proxy_url` (string)
|
||||
|
||||
|
@ -124,7 +120,6 @@ Ubuntu 12.04 LTS (Precise Pangolin) on Rackspace OpenStack cloud offering.
|
|||
"api_key": "",
|
||||
"openstack_provider": "rackspace",
|
||||
"provider": "rackspace-us",
|
||||
"openstack_provider":"rackspace",
|
||||
"region": "DFW",
|
||||
"ssh_username": "root",
|
||||
"image_name": "Test image",
|
||||
|
|
|
@ -111,17 +111,22 @@ each category, the available options are alphabetized and described.
|
|||
five seconds and one minute 30 seconds, respectively. If this isn't specified,
|
||||
the default is 10 seconds.
|
||||
|
||||
* `disk_size` (integer) - The size, in megabytes, of the hard disk to create
|
||||
for the VM. By default, this is 40000 (about 40 GB).
|
||||
* `disk_cache` (string) - The cache mode to use for disk. Allowed values
|
||||
values include any of "writethrough", "writeback", "none", "unsafe" or
|
||||
"directsync".
|
||||
|
||||
* `disk_image` (boolean) - Packer defaults to building from an ISO file,
|
||||
this parameter controls whether the ISO URL supplied is actually a bootable
|
||||
QEMU image. When this value is set to true, the machine will clone the
|
||||
source, resize it according to `disk_size` and boot the image.
|
||||
|
||||
* `disk_interface` (string) - The interface to use for the disk. Allowed
|
||||
values include any of "ide," "scsi" or "virtio." Note also that any boot
|
||||
commands or kickstart type scripts must have proper adjustments for
|
||||
resulting device names. The Qemu builder uses "virtio" by default.
|
||||
|
||||
* `disk_cache` (string) - The cache mode to use for disk. Allowed values
|
||||
values include any of "writethrough", "writeback", "none", "unsafe" or
|
||||
"directsync".
|
||||
* `disk_size` (integer) - The size, in megabytes, of the hard disk to create
|
||||
for the VM. By default, this is 40000 (about 40 GB).
|
||||
|
||||
* `floppy_files` (array of strings) - A list of files to place onto a floppy
|
||||
disk that is attached when the VM is booted. This is most useful
|
||||
|
@ -260,11 +265,6 @@ qemu-system-x86 command. The arguments are all printed for review.
|
|||
Packer will choose a randomly available port in this range to use as the
|
||||
host port.
|
||||
|
||||
* `disk_image` (boolean) - Packer defaults to building from an ISO file,
|
||||
this parameter controls whether the ISO URL supplied is actually a bootable
|
||||
QEMU image. When this value is set to true, the machine will clone the
|
||||
source, resize it according to `disk_size` and boot the image.
|
||||
|
||||
## Boot Command
|
||||
|
||||
The `boot_command` configuration is very important: it specifies the keys
|
||||
|
|
|
@ -136,7 +136,8 @@ each category, the available options are alphabetized and described.
|
|||
|
||||
* `hard_drive_interface` (string) - The type of controller that the primary
|
||||
hard drive is attached to, defaults to "ide". When set to "sata", the
|
||||
drive is attached to an AHCI SATA controller.
|
||||
drive is attached to an AHCI SATA controller. When set to "scsi", the drive
|
||||
is attached to an LsiLogic SCSI controller.
|
||||
|
||||
* `headless` (boolean) - Packer defaults to building VirtualBox
|
||||
virtual machines by launching a GUI that shows the console of the
|
||||
|
|
|
@ -313,7 +313,7 @@ an Ubuntu 12.04 installer:
|
|||
|
||||
```javascript
|
||||
[
|
||||
"<esc><esc><enter><wait>",
|
||||
"<esc><esc><enter><wait>",
|
||||
"/install/vmlinuz noapic ",
|
||||
"preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg ",
|
||||
"debian-installer=en_US auto locale=en_US kbd-chooser/method=us ",
|
||||
|
@ -321,7 +321,7 @@ an Ubuntu 12.04 installer:
|
|||
"fb=false debconf/frontend=noninteractive ",
|
||||
"keyboard-configuration/modelcode=SKIP keyboard-configuration/layout=USA ",
|
||||
"keyboard-configuration/variant=USA console-setup/ask_detect=false ",
|
||||
"initrd=/install/initrd.gz -- <enter>"
|
||||
"initrd=/install/initrd.gz -- <enter>"
|
||||
]
|
||||
```
|
||||
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
layout: "docs"
|
||||
page_title: "Push - Command-Line"
|
||||
description: |-
|
||||
The `packer push` Packer command takes a template and pushes it to a build service that will automatically build this Packer template.
|
||||
---
|
||||
|
||||
# Command-Line: Push
|
||||
|
||||
The `packer push` Packer command takes a template and pushes it to a build
|
||||
service. The build service will automatically build your Packer template and
|
||||
expose the artifacts.
|
||||
|
||||
This command currently only sends templates to
|
||||
[Atlas](https://atlas.hashicorp.com) by HashiCorp, but the command will
|
||||
be pluggable in the future with alternate implementations.
|
||||
|
||||
External build services such as Atlas make it easy to iterate on Packer
|
||||
templates, especially when the builder you're running may not be easily
|
||||
accessable (such as developing `qemu` builders on Mac or Windows).
|
||||
|
||||
For the `push` command to work, the
|
||||
[push configuration](/docs/templates/push.html)
|
||||
must be completed within the template.
|
||||
|
||||
## Options
|
||||
|
||||
* `-create=true` - If the build configuration matching the name of the push
|
||||
doesn't exist, it will be created if this is true. This defaults to true.
|
||||
|
||||
* `-token=FOO` - An access token for authenticating the push. This can also
|
||||
be specified within the push configuration in the template. By setting this
|
||||
in the template, you can take advantage of user variables.
|
|
@ -50,7 +50,7 @@ found later, it will take precedence over one found earlier.
|
|||
|
||||
1. The directory where `packer` is, or the executable directory.
|
||||
|
||||
2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d` on
|
||||
2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` on
|
||||
Windows.
|
||||
|
||||
3. The current working directory.
|
||||
|
|
|
@ -73,3 +73,13 @@ it may not be the latest available version.
|
|||
$ brew tap homebrew/binary
|
||||
$ brew install packer
|
||||
```
|
||||
|
||||
### Chocolatey
|
||||
|
||||
If you're using Windows and [Chocolatey](http://chocolatey.org), you can install Packer from
|
||||
Windows command line (cmd). Remember that this is updated by a 3rd party, so
|
||||
it may not be the latest available version.
|
||||
|
||||
```text
|
||||
$ choco install packer
|
||||
```
|
||||
|
|
|
@ -31,7 +31,8 @@ Required:
|
|||
* `password` (string) - Password to use to authenticate to the vSphere
|
||||
endpoint.
|
||||
|
||||
* `resource_pool` (string) - The resource pool to upload the VM to.
|
||||
* `resource_pool` (string) - The resource pool to upload the VM to. This can be
|
||||
" " if you do not have resource pools configured
|
||||
|
||||
* `username` (string) - The username to use to authenticate to the vSphere
|
||||
endpoint.
|
||||
|
|
|
@ -54,6 +54,10 @@ configuration is actually required, but at least `run_list` is recommended.
|
|||
the secret for encrypted data bags. By default, this is empty, so no
|
||||
secret will be available.
|
||||
|
||||
* `environments_path` (string) - The path to the "environments" directory on your local filesystem.
|
||||
These will be uploaded to the remote machine in the directory specified by the
|
||||
`staging_directory`. By default, this is empty.
|
||||
|
||||
* `execute_command` (string) - The command used to execute Chef. This has
|
||||
various [configuration template variables](/docs/templates/configuration-templates.html)
|
||||
available. See below for more information.
|
||||
|
|
|
@ -19,7 +19,7 @@ master.
|
|||
-> **Note:** Puppet will _not_ be installed automatically
|
||||
by this provisioner. This provisioner expects that Puppet is already
|
||||
installed on the machine. It is common practice to use the
|
||||
[shell provisioner[(/docs/provisioners/shell.html) before the
|
||||
[shell provisioner](/docs/provisioners/shell.html) before the
|
||||
Puppet provisioner to do this.
|
||||
|
||||
## Basic Example
|
||||
|
|
|
@ -148,20 +148,20 @@ on reboot or in your shell script. For example, on Gentoo:
|
|||
|
||||
*My shell script doesn't work correctly on Ubuntu*
|
||||
|
||||
* On Ubuntu the /bin/sh shell is
|
||||
* On Ubuntu, the `/bin/sh` shell is
|
||||
[dash](http://en.wikipedia.org/wiki/Debian_Almquist_shell). If your script has
|
||||
[bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell\)) specific commands in it
|
||||
[bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell))-specific commands in it,
|
||||
then put `#!/bin/bash` at the top of your script. Differences
|
||||
between dash and bash can be found on the [DashAsBinSh](https://wiki.ubuntu.com/DashAsBinSh) Ubuntu wiki page.
|
||||
|
||||
*My shell works when I login but fails with the shell provisioner*
|
||||
|
||||
* See the above tip. More than likely your login shell is using /bin/bash
|
||||
while the provisioner is using /bin/sh.
|
||||
* See the above tip. More than likely, your login shell is using `/bin/bash`
|
||||
while the provisioner is using `/bin/sh`.
|
||||
|
||||
*My installs hang when using `apt-get` or `yum`*
|
||||
|
||||
* Make sure you add a "-y" to the command to prevent it from requiring
|
||||
* Make sure you add a `-y` to the command to prevent it from requiring
|
||||
user input before proceeding.
|
||||
|
||||
*How do I tell what my shell script is doing?*
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
---
|
||||
layout: "docs"
|
||||
page_title: "Templates: Push"
|
||||
description: |-
|
||||
Within the template, the push section configures how a template can be
|
||||
pushed to a remote build service.
|
||||
---
|
||||
|
||||
# Templates: Push
|
||||
|
||||
Within the template, the push section configures how a template can be
|
||||
[pushed](/docs/command-line/push.html) to a remote build service.
|
||||
|
||||
Push configuration is responsible for defining what files are required
|
||||
to build this template, what the name of build configuration is in the
|
||||
build service, etc.
|
||||
|
||||
The only build service that Packer can currently push to is
|
||||
[Atlas](https://atlas.hashicorp.com) by HashiCorp. Support for other build
|
||||
services will come in the form of plugins in the future.
|
||||
|
||||
Within a template, a push configuration section looks like this:
|
||||
|
||||
```javascript
|
||||
{
|
||||
"push": {
|
||||
// ... push configuration here
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
There are many configuration options available for the builder. They are
|
||||
segmented below into two categories: required and optional parameters. Within
|
||||
each category, the available configuration keys are alphabetized.
|
||||
|
||||
### Required
|
||||
|
||||
* `name` (string) - Name of the build configuration in the build service.
|
||||
If this doesn't exist, it will be created (by default).
|
||||
|
||||
### Optional
|
||||
|
||||
* `address` (string) - The address of the build service to use. By default
|
||||
this is `https://atlas.hashicorp.com`.
|
||||
|
||||
* `base_dir` (string) - The base directory of the files to upload. This
|
||||
will be the CWD when the build service executes your template. This
|
||||
path is relative to the template.
|
||||
|
||||
* `include` (array of strings) - Glob patterns to include relative to
|
||||
the `base_dir`. If this is specified, only files that match the include
|
||||
pattern are included.
|
||||
|
||||
* `exclude` (array of strings) - Glob patterns to exclude relative to
|
||||
the `base_dir`.
|
||||
|
||||
* `token` (string) - An access token to use to authenticate to the build
|
||||
service. For Atlas, you can retrieve this access token in your account
|
||||
section by clicking your account name in the upper right corner.
|
||||
|
||||
* `vcs` (bool) - If true, Packer will detect your VCS (if there is one)
|
||||
and only upload the files that are tracked by the VCS. This is useful
|
||||
for automatically excluding ignored files. This defaults to true.
|
|
@ -16,7 +16,7 @@ with Redis pre-installed. This is just an example. Packer can create images
|
|||
for [many platforms](/intro/platforms.html) with anything pre-installed.
|
||||
|
||||
If you don't have an AWS account, [create one now](http://aws.amazon.com/free/).
|
||||
For the example, we'll use a "t1.micro" instance to build our image, which
|
||||
For the example, we'll use a "t2.micro" instance to build our image, which
|
||||
qualifies under the AWS [free-tier](http://aws.amazon.com/free/), meaning
|
||||
it will be free. If you already have an AWS account, you may be charged some
|
||||
amount of money, but it shouldn't be more than a few cents.
|
||||
|
@ -54,8 +54,8 @@ briefly. Create a file `example.json` and fill it with the following contents:
|
|||
"access_key": "{{user `aws_access_key`}}",
|
||||
"secret_key": "{{user `aws_secret_key`}}",
|
||||
"region": "us-east-1",
|
||||
"source_ami": "ami-de0d9eb7",
|
||||
"instance_type": "t1.micro",
|
||||
"source_ami": "ami-9eaa1cf6",
|
||||
"instance_type": "t2.micro",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "packer-example {{timestamp}}"
|
||||
}]
|
||||
|
|
|
@ -83,7 +83,7 @@ The entire template should now [look like this](https://gist.github.com/pearkes/
|
|||
Additional builders are simply added to the `builders` array in the template.
|
||||
This tells Packer to build multiple images. The builder `type` values don't
|
||||
even need to be different! In fact, if you wanted to build multiple AMIs,
|
||||
you can do that as well.
|
||||
you can do that as long as you specify a unique `name` for each build.
|
||||
|
||||
Validate the template with `packer validate`. This is always a good practice.
|
||||
|
||||
|
|
|
@ -49,7 +49,9 @@ Available commands are:
|
|||
build build image(s) from template
|
||||
fix fixes templates from old versions of packer
|
||||
inspect see components of a template
|
||||
push push template files to a Packer build service
|
||||
validate check that a template is valid
|
||||
version Prints the Packer version
|
||||
```
|
||||
|
||||
If you get an error that `packer` could not be found, then your PATH
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
<li><a href="/docs/command-line/build.html">Build</a></li>
|
||||
<li><a href="/docs/command-line/fix.html">Fix</a></li>
|
||||
<li><a href="/docs/command-line/inspect.html">Inspect</a></li>
|
||||
<li><a href="/docs/command-line/push.html">Push</a></li>
|
||||
<li><a href="/docs/command-line/validate.html">Validate</a></li>
|
||||
<li><a href="/docs/command-line/machine-readable.html">Machine-Readable Output</a></li>
|
||||
</ul>
|
||||
|
@ -23,6 +24,7 @@
|
|||
<li><a href="/docs/templates/builders.html">Builders</a></li>
|
||||
<li><a href="/docs/templates/provisioners.html">Provisioners</a></li>
|
||||
<li><a href="/docs/templates/post-processors.html">Post-Processors</a></li>
|
||||
<li><a href="/docs/templates/push.html">Push</a></li>
|
||||
<li><a href="/docs/templates/configuration-templates.html">Configuration Templates</a></li>
|
||||
<li><a href="/docs/templates/user-variables.html">User Variables</a></li>
|
||||
<li><a href="/docs/templates/veewee-to-packer.html">Veewee-to-Packer</a></li>
|
||||
|
|
Loading…
Reference in New Issue