Merge pull request #7466 from hashicorp/provisioner_timeout

Provisioner timeout
This commit is contained in:
Megan Marsh 2019-04-10 13:21:50 -07:00 committed by GitHub
commit 212645b91c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
110 changed files with 1116 additions and 660 deletions

View File

@ -2,6 +2,7 @@ package chroot
import (
"bytes"
"context"
"fmt"
"io"
"log"
@ -23,7 +24,7 @@ type Communicator struct {
CmdWrapper CommandWrapper
}
func (c *Communicator) Start(cmd *packer.RemoteCmd) error {
func (c *Communicator) Start(ctx context.Context, cmd *packer.RemoteCmd) error {
// need extra escapes for the command since we're wrapping it in quotes
cmd.Command = strconv.Quote(cmd.Command)
command, err := c.CmdWrapper(

View File

@ -1,6 +1,7 @@
package chroot
import (
"context"
"fmt"
sl "github.com/hashicorp/packer/common/shell-local"
@ -9,6 +10,7 @@ import (
)
func RunLocalCommands(commands []string, wrappedCommand CommandWrapper, ictx interpolate.Context, ui packer.Ui) error {
ctx := context.TODO()
for _, rawCmd := range commands {
intCmd, err := interpolate.Render(rawCmd, &ictx)
if err != nil {
@ -25,13 +27,13 @@ func RunLocalCommands(commands []string, wrappedCommand CommandWrapper, ictx int
ExecuteCommand: []string{"sh", "-c", command},
}
cmd := &packer.RemoteCmd{Command: command}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return fmt.Errorf("Error executing command: %s", err)
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf(
"Received non-zero exit code %d from command: %s",
cmd.ExitStatus,
cmd.ExitStatus(),
command)
}
}

View File

@ -3,12 +3,13 @@ package common
import (
"context"
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
retry "github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
@ -90,17 +91,26 @@ func (s *StepCreateTags) Run(ctx context.Context, state multistep.StateBag) mult
snapshotTags.Report(ui)
// Retry creating tags for about 2.5 minutes
err = retry.Retry(0.2, 30, 11, func(_ uint) (bool, error) {
err = retry.Config{
Tries: 11,
ShouldRetry: func(error) bool {
if awsErr, ok := err.(awserr.Error); ok {
switch awsErr.Code() {
case "InvalidAMIID.NotFound", "InvalidSnapshot.NotFound":
return true
}
}
return false
},
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
// Tag images and snapshots
_, err := regionConn.CreateTags(&ec2.CreateTagsInput{
Resources: resourceIds,
Tags: amiTags,
})
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "InvalidAMIID.NotFound" ||
awsErr.Code() == "InvalidSnapshot.NotFound" {
return false, nil
}
if err != nil {
return err
}
// Override tags on snapshots
@ -110,15 +120,7 @@ func (s *StepCreateTags) Run(ctx context.Context, state multistep.StateBag) mult
Tags: snapshotTags,
})
}
if err == nil {
return true, nil
}
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "InvalidSnapshot.NotFound" {
return false, nil
}
}
return true, err
return err
})
if err != nil {

View File

@ -4,11 +4,12 @@ import (
"context"
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
retry "github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
@ -31,21 +32,24 @@ func (s *StepPreValidate) Run(ctx context.Context, state multistep.StateBag) mul
// time to become eventually-consistent
ui.Say("You're using Vault-generated AWS credentials. It may take a " +
"few moments for them to become available on AWS. Waiting...")
err := retry.Retry(0.2, 30, 11, func(_ uint) (bool, error) {
ec2conn, err := accessconf.NewEC2Connection()
if err != nil {
return true, err
}
_, err = listEC2Regions(ec2conn)
if err != nil {
err := retry.Config{
Tries: 11,
ShouldRetry: func(err error) bool {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "AuthFailure" {
log.Printf("Waiting for Vault-generated AWS credentials" +
" to pass authentication... trying again.")
return false, nil
return true
}
return true, err
return false
},
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
ec2conn, err := accessconf.NewEC2Connection()
if err != nil {
return err
}
return true, nil
_, err = listEC2Regions(ec2conn)
return err
})
if err != nil {

View File

@ -6,12 +6,13 @@ import (
"fmt"
"io/ioutil"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
retry "github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/communicator"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
@ -230,20 +231,24 @@ func (s *StepRunSourceInstance) Run(ctx context.Context, state multistep.StateBa
if s.IsRestricted {
ec2Tags.Report(ui)
// Retry creating tags for about 2.5 minutes
err = retry.Retry(0.2, 30, 11, func(_ uint) (bool, error) {
err = retry.Config{
Tries: 11,
ShouldRetry: func(error) bool {
if awsErr, ok := err.(awserr.Error); ok {
switch awsErr.Code() {
case "InvalidInstanceID.NotFound":
return true
}
}
return false
},
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
_, err := ec2conn.CreateTags(&ec2.CreateTagsInput{
Tags: ec2Tags,
Resources: []*string{instance.InstanceId},
})
if err == nil {
return true, nil
}
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "InvalidInstanceID.NotFound" {
return false, nil
}
}
return true, err
return err
})
if err != nil {

View File

@ -13,7 +13,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
retry "github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/communicator"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
@ -241,13 +241,16 @@ func (s *StepRunSpotInstance) Run(ctx context.Context, state multistep.StateBag)
spotTags.Report(ui)
if len(spotTags) > 0 && s.SpotTags.IsSet() {
// Retry creating tags for about 2.5 minutes
err = retry.Retry(0.2, 30, 11, func(_ uint) (bool, error) {
err = retry.Config{
Tries: 11,
ShouldRetry: func(error) bool { return false },
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
_, err := ec2conn.CreateTags(&ec2.CreateTagsInput{
Tags: spotTags,
Resources: []*string{spotRequestId},
})
return true, err
return err
})
if err != nil {
err := fmt.Errorf("Error tagging spot request: %s", err)
@ -284,20 +287,24 @@ func (s *StepRunSpotInstance) Run(ctx context.Context, state multistep.StateBag)
instance := r.Reservations[0].Instances[0]
// Retry creating tags for about 2.5 minutes
err = retry.Retry(0.2, 30, 11, func(_ uint) (bool, error) {
err = retry.Config{
Tries: 11,
ShouldRetry: func(error) bool {
if awsErr, ok := err.(awserr.Error); ok {
switch awsErr.Code() {
case "InvalidInstanceID.NotFound":
return true
}
}
return false
},
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
_, err := ec2conn.CreateTags(&ec2.CreateTagsInput{
Tags: ec2Tags,
Resources: []*string{instance.InstanceId},
})
if err == nil {
return true, nil
}
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "InvalidInstanceID.NotFound" {
return false, nil
}
}
return true, err
return err
})
if err != nil {

View File

@ -3,10 +3,11 @@ package common
import (
"context"
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
@ -40,29 +41,26 @@ func (s *StepStopEBSBackedInstance) Run(ctx context.Context, state multistep.Sta
// does not exist.
// Work around this by retrying a few times, up to about 5 minutes.
err := common.Retry(10, 60, 6, func(i uint) (bool, error) {
ui.Message(fmt.Sprintf("Stopping instance, attempt %d", i+1))
err := retry.Config{
Tries: 6,
ShouldRetry: func(error) bool {
if awsErr, ok := err.(awserr.Error); ok {
switch awsErr.Code() {
case "InvalidInstanceID.NotFound":
return true
}
}
return false
},
RetryDelay: (&retry.Backoff{InitialBackoff: 10 * time.Second, MaxBackoff: 60 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
ui.Message(fmt.Sprintf("Stopping instance"))
_, err = ec2conn.StopInstances(&ec2.StopInstancesInput{
InstanceIds: []*string{instance.InstanceId},
})
if err == nil {
// success
return true, nil
}
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "InvalidInstanceID.NotFound" {
ui.Message(fmt.Sprintf(
"Error stopping instance; will retry ..."+
"Error: %s", err))
// retry
return false, nil
}
}
// errored, but not in expected way. Don't want to retry
return true, err
return err
})
if err != nil {

View File

@ -59,13 +59,13 @@ func (s *StepBundleVolume) Run(ctx context.Context, state multistep.StateBag) mu
ui.Say(fmt.Sprintf("Running: %s", config.BundleVolCommand))
}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
state.Put("error", fmt.Errorf("Error bundling volume: %s", err))
ui.Error(state.Get("error").(error).Error())
return multistep.ActionHalt
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
state.Put("error", fmt.Errorf(
"Volume bundling failed. Please see the output above for more\n"+
"details on what went wrong.\n\n"+

View File

@ -69,14 +69,14 @@ func (s *StepUploadBundle) Run(ctx context.Context, state multistep.StateBag) mu
ui.Say(fmt.Sprintf("Running: %s", config.BundleUploadCommand))
}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
state.Put("error", fmt.Errorf("Error uploading volume: %s", err))
ui.Error(state.Get("error").(error).Error())
return multistep.ActionHalt
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus == 3 {
if cmd.ExitStatus() != 0 {
if cmd.ExitStatus() == 3 {
ui.Error(fmt.Sprintf("Please check that the bucket `%s` "+
"does not exist, or exists and is writable. This error "+
"indicates that the bucket may be owned by somebody else.",

View File

@ -3,9 +3,10 @@ package arm
import (
"context"
"fmt"
"time"
"github.com/hashicorp/packer/builder/azure/common/constants"
retry "github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
@ -94,17 +95,18 @@ func (s *StepDeleteResourceGroup) deleteDeploymentResources(ctx context.Context,
resourceType,
resourceName))
err := retry.Retry(10, 600, 10, func(attempt uint) (bool, error) {
err := retry.Config{
Tries: 10,
RetryDelay: (&retry.Backoff{InitialBackoff: 10 * time.Second, MaxBackoff: 600 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
err := deleteResource(ctx, s.client,
resourceType,
resourceName,
resourceGroupName)
if err != nil {
s.reportIfError(err, resourceName)
return false, nil
}
return true, nil
return err
})
if err = deploymentOperations.Next(); err != nil {

View File

@ -2,6 +2,7 @@ package docker
import (
"archive/tar"
"context"
"fmt"
"io"
"io/ioutil"
@ -28,7 +29,9 @@ type Communicator struct {
EntryPoint []string
}
func (c *Communicator) Start(remote *packer.RemoteCmd) error {
var _ packer.Communicator = new(Communicator)
func (c *Communicator) Start(ctx context.Context, remote *packer.RemoteCmd) error {
dockerArgs := []string{
"exec",
"-i",

View File

@ -15,10 +15,6 @@ import (
"github.com/hashicorp/packer/template"
)
func TestCommunicator_impl(t *testing.T) {
var _ packer.Communicator = new(Communicator)
}
// TestUploadDownload verifies that basic upload / download functionality works
func TestUploadDownload(t *testing.T) {
ui := packer.TestUi(t)

View File

@ -2,6 +2,7 @@ package docker
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
@ -49,15 +50,15 @@ func (c *WindowsContainerCommunicator) Upload(dst string, src io.Reader, fi *os.
Command: fmt.Sprintf("Copy-Item -Path %s/%s -Destination %s", c.ContainerDir,
filepath.Base(tempfile.Name()), dst),
}
if err := c.Start(cmd); err != nil {
ctx := context.TODO()
if err := c.Start(ctx, cmd); err != nil {
return err
}
// Wait for the copy to complete
cmd.Wait()
if cmd.ExitStatus != 0 {
return fmt.Errorf("Upload failed with non-zero exit status: %d", cmd.ExitStatus)
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Upload failed with non-zero exit status: %d", cmd.ExitStatus())
}
return nil
@ -135,14 +136,15 @@ func (c *WindowsContainerCommunicator) UploadDir(dst string, src string, exclude
Command: fmt.Sprintf("Copy-Item %s -Destination %s -Recurse",
containerSrc, containerDst),
}
if err := c.Start(cmd); err != nil {
ctx := context.TODO()
if err := c.Start(ctx, cmd); err != nil {
return err
}
// Wait for the copy to complete
cmd.Wait()
if cmd.ExitStatus != 0 {
return fmt.Errorf("Upload failed with non-zero exit status: %d", cmd.ExitStatus)
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Upload failed with non-zero exit status: %d", cmd.ExitStatus())
}
return nil
@ -160,15 +162,16 @@ func (c *WindowsContainerCommunicator) Download(src string, dst io.Writer) error
Stdout: &stdout,
Stderr: &stderr,
}
if err := c.Start(cmd); err != nil {
ctx := context.TODO()
if err := c.Start(ctx, cmd); err != nil {
return err
}
// Wait for the copy to complete
cmd.Wait()
if cmd.ExitStatus != 0 {
return fmt.Errorf("Failed to copy file to shared drive: %s, %s, %d", stderr.String(), stdout.String(), cmd.ExitStatus)
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Failed to copy file to shared drive: %s, %s, %d", stderr.String(), stdout.String(), cmd.ExitStatus())
}
// Read that copied file into a new file opened on host machine

View File

@ -68,5 +68,11 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack
artifact.filename = b.config.Target
}
if hook != nil {
if err := hook.Run(ctx, packer.HookProvision, ui, new(packer.MockCommunicator), nil); err != nil {
return nil, err
}
}
return artifact, nil
}

View File

@ -1,6 +1,7 @@
package googlecompute
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
@ -15,7 +16,7 @@ import (
compute "google.golang.org/api/compute/v1"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/useragent"
"github.com/hashicorp/packer/packer"
@ -608,14 +609,18 @@ type stateRefreshFunc func() (string, error)
// waitForState will spin in a loop forever waiting for state to
// reach a certain target.
func waitForState(errCh chan<- error, target string, refresh stateRefreshFunc) error {
err := common.Retry(2, 2, 0, func(_ uint) (bool, error) {
ctx := context.TODO()
err := retry.Config{
RetryDelay: (&retry.Backoff{InitialBackoff: 2 * time.Second, MaxBackoff: 2 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
state, err := refresh()
if err != nil {
return false, err
} else if state == target {
return true, nil
return err
}
return false, nil
if state == target {
return nil
}
return fmt.Errorf("retrying for state %s, got %s", target, state)
})
errCh <- err
return err

View File

@ -4,8 +4,9 @@ import (
"context"
"errors"
"fmt"
"time"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
@ -23,26 +24,32 @@ func (s *StepWaitStartupScript) Run(ctx context.Context, state multistep.StateBa
ui.Say("Waiting for any running startup script to finish...")
// Keep checking the serial port output to see if the startup script is done.
err := common.Retry(10, 60, 0, func(_ uint) (bool, error) {
err := retry.Config{
ShouldRetry: func(error) bool {
return true
},
RetryDelay: (&retry.Backoff{InitialBackoff: 10 * time.Second, MaxBackoff: 60 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
status, err := driver.GetInstanceMetadata(config.Zone,
instanceName, StartupScriptStatusKey)
if err != nil {
err := fmt.Errorf("Error getting startup script status: %s", err)
return false, err
return err
}
if status == StartupScriptStatusError {
err = errors.New("Startup script error.")
return false, err
return err
}
done := status == StartupScriptStatusDone
if !done {
ui.Say("Startup script not finished yet. Waiting...")
return errors.New("Startup script not done.")
}
return done, nil
return nil
})
if err != nil {

View File

@ -1,6 +1,7 @@
package hyperone
import (
"context"
"fmt"
"io"
"os"
@ -20,7 +21,7 @@ type ChrootCommunicator struct {
Wrapped packer.Communicator
}
func (c *ChrootCommunicator) Start(cmd *packer.RemoteCmd) error {
func (c *ChrootCommunicator) Start(ctx context.Context, cmd *packer.RemoteCmd) error {
command := strconv.Quote(cmd.Command)
chrootCommand, err := c.CmdWrapper(
fmt.Sprintf("sudo chroot %s /bin/sh -c %s", c.Chroot, command))
@ -30,7 +31,7 @@ func (c *ChrootCommunicator) Start(cmd *packer.RemoteCmd) error {
cmd.Command = chrootCommand
return c.Wrapped.Start(cmd)
return c.Wrapped.Start(ctx, cmd)
}
func (c *ChrootCommunicator) Upload(dst string, r io.Reader, fi *os.FileInfo) error {

View File

@ -2,6 +2,7 @@ package hyperone
import (
"bytes"
"context"
"fmt"
"log"
"strings"
@ -22,6 +23,7 @@ func formatOpenAPIError(err error) string {
}
func runCommands(commands []string, ictx interpolate.Context, state multistep.StateBag) error {
ctx := context.TODO()
ui := state.Get("ui").(packer.Ui)
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
comm := state.Get("communicator").(packer.Communicator)
@ -43,15 +45,15 @@ func runCommands(commands []string, ictx interpolate.Context, state multistep.St
ui.Say(fmt.Sprintf("Executing command: %s", command))
err = remoteCmd.StartWithUi(comm, ui)
err = remoteCmd.RunWithUi(ctx, comm, ui)
if err != nil {
return fmt.Errorf("error running remote cmd: %s", err)
}
if remoteCmd.ExitStatus != 0 {
if remoteCmd.ExitStatus() != 0 {
return fmt.Errorf(
"received non-zero exit code %d from command: %s",
remoteCmd.ExitStatus,
remoteCmd.ExitStatus(),
command)
}
}
@ -59,6 +61,7 @@ func runCommands(commands []string, ictx interpolate.Context, state multistep.St
}
func captureOutput(command string, state multistep.StateBag) (string, error) {
ctx := context.TODO()
comm := state.Get("communicator").(packer.Communicator)
var stdout bytes.Buffer
@ -69,16 +72,16 @@ func captureOutput(command string, state multistep.StateBag) (string, error) {
log.Println(fmt.Sprintf("Executing command: %s", command))
err := comm.Start(remoteCmd)
err := comm.Start(ctx, remoteCmd)
if err != nil {
return "", fmt.Errorf("error running remote cmd: %s", err)
}
remoteCmd.Wait()
if remoteCmd.ExitStatus != 0 {
if remoteCmd.ExitStatus() != 0 {
return "", fmt.Errorf(
"received non-zero exit code %d from command: %s",
remoteCmd.ExitStatus,
remoteCmd.ExitStatus(),
command)
}

View File

@ -45,7 +45,7 @@ func (s *StepShutdown) Run(ctx context.Context, state multistep.StateBag) multis
Stdout: &stdout,
Stderr: &stderr,
}
if err := comm.Start(cmd); err != nil {
if err := comm.Start(ctx, cmd); err != nil {
err := fmt.Errorf("Failed to send shutdown command: %s", err)
state.Put("error", err)
ui.Error(err.Error())

View File

@ -1,6 +1,7 @@
package lxc
import (
"context"
"fmt"
"io"
"io/ioutil"
@ -22,7 +23,7 @@ type LxcAttachCommunicator struct {
CmdWrapper CommandWrapper
}
func (c *LxcAttachCommunicator) Start(cmd *packer.RemoteCmd) error {
func (c *LxcAttachCommunicator) Start(ctx context.Context, cmd *packer.RemoteCmd) error {
localCmd, err := c.Execute(cmd.Command)
if err != nil {

View File

@ -1,6 +1,7 @@
package lxd
import (
"context"
"fmt"
"io"
"log"
@ -17,7 +18,7 @@ type Communicator struct {
CmdWrapper CommandWrapper
}
func (c *Communicator) Start(cmd *packer.RemoteCmd) error {
func (c *Communicator) Start(ctx context.Context, cmd *packer.RemoteCmd) error {
localCmd, err := c.Execute(cmd.Command)
if err != nil {
@ -55,11 +56,13 @@ func (c *Communicator) Start(cmd *packer.RemoteCmd) error {
}
func (c *Communicator) Upload(dst string, r io.Reader, fi *os.FileInfo) error {
ctx := context.TODO()
fileDestination := filepath.Join(c.ContainerName, dst)
// find out if the place we are pushing to is a directory
testDirectoryCommand := fmt.Sprintf(`test -d "%s"`, dst)
cmd := &packer.RemoteCmd{Command: testDirectoryCommand}
err := c.Start(cmd)
err := c.Start(ctx, cmd)
if err != nil {
log.Printf("Unable to check whether remote path is a dir: %s", err)
@ -67,7 +70,7 @@ func (c *Communicator) Upload(dst string, r io.Reader, fi *os.FileInfo) error {
}
cmd.Wait()
if cmd.ExitStatus == 0 {
if cmd.ExitStatus() == 0 {
log.Printf("path is a directory; copying file into directory.")
fileDestination = filepath.Join(c.ContainerName, dst, (*fi).Name())
}

View File

@ -66,15 +66,15 @@ func (s *stepUploadImage) Run(ctx context.Context, state multistep.StateBag) mul
cmd := &packer.RemoteCmd{
Command: fmt.Sprintf("sudo /bin/sh %s", dest),
}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
err = fmt.Errorf("Problem creating image`: %s", err)
ui.Error(err.Error())
state.Put("error", err)
return multistep.ActionHalt
}
if cmd.ExitStatus != 0 {
err = fmt.Errorf("Create Disk Image command failed with exit code %d", cmd.ExitStatus)
if cmd.ExitStatus() != 0 {
err = fmt.Errorf("Create Disk Image command failed with exit code %d", cmd.ExitStatus())
ui.Error(err.Error())
state.Put("error", err)
return multistep.ActionHalt

View File

@ -45,7 +45,7 @@ func (s *StepShutdown) Run(ctx context.Context, state multistep.StateBag) multis
Stdout: &stdout,
Stderr: &stderr,
}
if err := comm.Start(cmd); err != nil {
if err := comm.Start(ctx, cmd); err != nil {
err := fmt.Errorf("Failed to send shutdown command: %s", err)
state.Put("error", err)
ui.Error(err.Error())

View File

@ -5,8 +5,10 @@ import (
"fmt"
"path/filepath"
"strings"
"time"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
@ -50,16 +52,18 @@ func (s *stepConvertDisk) Run(ctx context.Context, state multistep.StateBag) mul
ui.Say("Converting hard drive...")
// Retry the conversion a few times in case it takes the qemu process a
// moment to release the lock
err := common.Retry(1, 10, 10, func(_ uint) (bool, error) {
if err := driver.QemuImg(command...); err != nil {
err := retry.Config{
Tries: 10,
ShouldRetry: func(err error) bool {
if strings.Contains(err.Error(), `Failed to get shared "write" lock`) {
ui.Say("Error getting file lock for conversion; retrying...")
return false, nil
return true
}
err = fmt.Errorf("Error converting hard drive: %s", err)
return true, err
}
return true, nil
return false
},
RetryDelay: (&retry.Backoff{InitialBackoff: 1 * time.Second, MaxBackoff: 10 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
return driver.QemuImg(command...)
})
if err != nil {

View File

@ -52,7 +52,7 @@ func (s *stepShutdown) Run(ctx context.Context, state multistep.StateBag) multis
ui.Say("Gracefully halting virtual machine...")
log.Printf("Executing shutdown command: %s", config.ShutdownCommand)
cmd := &packer.RemoteCmd{Command: config.ShutdownCommand}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
err := fmt.Errorf("Failed to send shutdown command: %s", err)
state.Put("error", err)
ui.Error(err.Error())

View File

@ -6,9 +6,9 @@ import (
"io/ioutil"
"os"
"runtime"
"strings"
"time"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/communicator"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
@ -102,6 +102,7 @@ func (s *stepConfigKeyPair) Cleanup(state multistep.StateBag) {
if s.Comm.SSHPrivateKeyFile != "" || (s.Comm.SSHKeyPairName == "" && s.keyID == "") {
return
}
ctx := context.TODO()
client := state.Get("cvm_client").(*cvm.Client)
ui := state.Get("ui").(packer.Ui)
@ -109,16 +110,12 @@ func (s *stepConfigKeyPair) Cleanup(state multistep.StateBag) {
ui.Say("Deleting temporary keypair...")
req := cvm.NewDeleteKeyPairsRequest()
req.KeyIds = []*string{&s.keyID}
err := common.Retry(5, 5, 60, func(u uint) (bool, error) {
err := retry.Config{
Tries: 60,
RetryDelay: (&retry.Backoff{InitialBackoff: 5 * time.Second, MaxBackoff: 5 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
_, err := client.DeleteKeyPairs(req)
if err == nil {
return true, nil
}
if strings.Index(err.Error(), "NotSupported") != -1 {
return false, nil
} else {
return false, err
}
return err
})
if err != nil {
ui.Error(fmt.Sprintf(

View File

@ -2,11 +2,11 @@ package cvm
import (
"context"
"time"
"fmt"
"strings"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/pkg/errors"
@ -102,22 +102,19 @@ func (s *stepConfigSecurityGroup) Cleanup(state multistep.StateBag) {
if !s.isCreate {
return
}
ctx := context.TODO()
vpcClient := state.Get("vpc_client").(*vpc.Client)
ui := state.Get("ui").(packer.Ui)
MessageClean(state, "VPC")
req := vpc.NewDeleteSecurityGroupRequest()
req.SecurityGroupId = &s.SecurityGroupId
err := common.Retry(5, 5, 60, func(u uint) (bool, error) {
err := retry.Config{
Tries: 60,
RetryDelay: (&retry.Backoff{InitialBackoff: 5 * time.Second, MaxBackoff: 5 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
_, err := vpcClient.DeleteSecurityGroup(req)
if err == nil {
return true, nil
}
if strings.Index(err.Error(), "ResourceInUse") != -1 {
return false, nil
} else {
return false, err
}
return err
})
if err != nil {
ui.Error(fmt.Sprintf("delete security group(%s) failed: %s, you need to delete it by hand",

View File

@ -3,9 +3,9 @@ package cvm
import (
"context"
"fmt"
"strings"
"time"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/pkg/errors"
@ -77,6 +77,7 @@ func (s *stepConfigSubnet) Cleanup(state multistep.StateBag) {
if !s.isCreate {
return
}
ctx := context.TODO()
vpcClient := state.Get("vpc_client").(*vpc.Client)
ui := state.Get("ui").(packer.Ui)
@ -84,16 +85,12 @@ func (s *stepConfigSubnet) Cleanup(state multistep.StateBag) {
MessageClean(state, "SUBNET")
req := vpc.NewDeleteSubnetRequest()
req.SubnetId = &s.SubnetId
err := common.Retry(5, 5, 60, func(u uint) (bool, error) {
err := retry.Config{
Tries: 60,
RetryDelay: (&retry.Backoff{InitialBackoff: 5 * time.Second, MaxBackoff: 5 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
_, err := vpcClient.DeleteSubnet(req)
if err == nil {
return true, nil
}
if strings.Index(err.Error(), "ResourceInUse") != -1 {
return false, nil
} else {
return false, err
}
return err
})
if err != nil {
ui.Error(fmt.Sprintf("delete subnet(%s) failed: %s, you need to delete it by hand",

View File

@ -3,9 +3,9 @@ package cvm
import (
"context"
"fmt"
"strings"
"time"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/pkg/errors"
@ -66,6 +66,7 @@ func (s *stepConfigVPC) Cleanup(state multistep.StateBag) {
if !s.isCreate {
return
}
ctx := context.TODO()
vpcClient := state.Get("vpc_client").(*vpc.Client)
ui := state.Get("ui").(packer.Ui)
@ -73,16 +74,12 @@ func (s *stepConfigVPC) Cleanup(state multistep.StateBag) {
MessageClean(state, "VPC")
req := vpc.NewDeleteVpcRequest()
req.VpcId = &s.VpcId
err := common.Retry(5, 5, 60, func(u uint) (bool, error) {
err := retry.Config{
Tries: 60,
RetryDelay: (&retry.Backoff{InitialBackoff: 5 * time.Second, MaxBackoff: 5 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
_, err := vpcClient.DeleteVpc(req)
if err == nil {
return true, nil
}
if strings.Index(err.Error(), "ResourceInUse") != -1 {
return false, nil
} else {
return false, err
}
return err
})
if err != nil {
ui.Error(fmt.Sprintf("delete vpc(%s) failed: %s, you need to delete it by hand",

View File

@ -2,6 +2,7 @@ package common
import (
"bytes"
"context"
"fmt"
"log"
"os/exec"
@ -11,8 +12,7 @@ import (
"time"
versionUtil "github.com/hashicorp/go-version"
packer "github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
)
type VBox42Driver struct {
@ -64,14 +64,13 @@ func (d *VBox42Driver) CreateSCSIController(vmName string, name string) error {
}
func (d *VBox42Driver) Delete(name string) error {
return packer.Retry(1, 1, 5, func(i uint) (bool, error) {
if err := d.VBoxManage("unregistervm", name, "--delete"); err != nil {
if i+1 == 5 {
return false, err
}
return false, nil
}
return true, nil
ctx := context.TODO()
return retry.Config{
Tries: 5,
RetryDelay: (&retry.Backoff{InitialBackoff: 1 * time.Second, MaxBackoff: 1 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
err := d.VBoxManage("unregistervm", name, "--delete")
return err
})
}

View File

@ -4,8 +4,9 @@ import (
"context"
"fmt"
"log"
"time"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
@ -46,25 +47,26 @@ func (s *StepRemoveDevices) Run(ctx context.Context, state multistep.StateBag) m
return multistep.ActionHalt
}
var vboxErr error
// Retry for 10 minutes to remove the floppy controller.
log.Printf("Trying for 10 minutes to remove floppy controller.")
err := common.Retry(15, 15, 40, func(_ uint) (bool, error) {
err := retry.Config{
Tries: 40,
RetryDelay: (&retry.Backoff{InitialBackoff: 15 * time.Second, MaxBackoff: 15 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
// Don't forget to remove the floppy controller as well
command = []string{
"storagectl", vmName,
"--name", "Floppy Controller",
"--remove",
}
vboxErr = driver.VBoxManage(command...)
if vboxErr != nil {
err := driver.VBoxManage(command...)
if err != nil {
log.Printf("Error removing floppy controller. Retrying.")
return false, nil
}
return true, nil
return err
})
if err == common.RetryExhaustedError {
err := fmt.Errorf("Error removing floppy controller: %s", vboxErr)
if err != nil {
err := fmt.Errorf("Error removing floppy controller: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt

View File

@ -38,7 +38,7 @@ func (s *StepShutdown) Run(ctx context.Context, state multistep.StateBag) multis
ui.Say("Gracefully halting virtual machine...")
log.Printf("Executing shutdown command: %s", s.Command)
cmd := &packer.RemoteCmd{Command: s.Command}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
err := fmt.Errorf("Failed to send shutdown command: %s", err)
state.Put("error", err)
ui.Error(err.Error())

View File

@ -687,6 +687,7 @@ func (d *ESX5Driver) VerifyChecksum(ctype string, hash string, file string) bool
}
func (d *ESX5Driver) ssh(command string, stdin io.Reader) (*bytes.Buffer, error) {
ctx := context.TODO()
var stdout, stderr bytes.Buffer
cmd := &packer.RemoteCmd{
@ -696,14 +697,14 @@ func (d *ESX5Driver) ssh(command string, stdin io.Reader) (*bytes.Buffer, error)
Stdin: stdin,
}
err := d.comm.Start(cmd)
err := d.comm.Start(ctx, cmd)
if err != nil {
return nil, err
}
cmd.Wait()
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
err = fmt.Errorf("'%s'\n\nStdout: %s\n\nStderr: %s",
cmd.Command, stdout.String(), stderr.String())
return nil, err

View File

@ -51,7 +51,7 @@ func (s *StepShutdown) Run(ctx context.Context, state multistep.StateBag) multis
Stdout: &stdout,
Stderr: &stderr,
}
if err := comm.Start(cmd); err != nil {
if err := comm.Start(ctx, cmd); err != nil {
err := fmt.Errorf("Failed to send shutdown command: %s", err)
state.Put("error", err)
ui.Error(err.Error())

View File

@ -205,4 +205,8 @@ func cleanup() {
os.RemoveAll("pear.txt")
os.RemoveAll("tomato.txt")
os.RemoveAll("unnamed.txt")
os.RemoveAll("roses.txt")
os.RemoveAll("fuchsias.txt")
os.RemoveAll("lilas.txt")
os.RemoveAll("campanules.txt")
}

View File

@ -0,0 +1,82 @@
package command
import (
"bytes"
"path/filepath"
"testing"
"github.com/hashicorp/packer/builder/file"
"github.com/hashicorp/packer/packer"
shell_local "github.com/hashicorp/packer/provisioner/shell-local"
"github.com/hashicorp/packer/provisioner/sleep"
)
// testCoreConfigBuilder creates a packer CoreConfig that has a file builder
// available. This allows us to test a builder that writes files to disk.
func testCoreConfigSleepBuilder(t *testing.T) *packer.CoreConfig {
components := packer.ComponentFinder{
Builder: func(n string) (packer.Builder, error) {
switch n {
case "file":
return &file.Builder{}, nil
default:
panic(n)
}
},
Provisioner: func(n string) (packer.Provisioner, error) {
switch n {
case "shell-local":
return &shell_local.Provisioner{}, nil
case "sleep":
return &sleep.Provisioner{}, nil
default:
panic(n)
}
},
}
return &packer.CoreConfig{
Components: components,
}
}
// testMetaFile creates a Meta object that includes a file builder
func testMetaSleepFile(t *testing.T) Meta {
var out, err bytes.Buffer
return Meta{
CoreConfig: testCoreConfigSleepBuilder(t),
Ui: &packer.BasicUi{
Writer: &out,
ErrorWriter: &err,
},
}
}
func TestBuildSleepTimeout(t *testing.T) {
defer cleanup()
c := &BuildCommand{
Meta: testMetaSleepFile(t),
}
args := []string{
filepath.Join(testFixture("timeout"), "template.json"),
}
defer cleanup()
if code := c.Run(args); code == 0 {
fatalCommand(t, c.Meta)
}
for _, f := range []string{"roses.txt", "fuchsias.txt", "lilas.txt"} {
if !fileExists(f) {
t.Errorf("Expected to find %s", f)
}
}
for _, f := range []string{"campanules.txt"} {
if fileExists(f) {
t.Errorf("Expected to not find %s", f)
}
}
}

View File

@ -82,6 +82,7 @@ import (
saltmasterlessprovisioner "github.com/hashicorp/packer/provisioner/salt-masterless"
shellprovisioner "github.com/hashicorp/packer/provisioner/shell"
shelllocalprovisioner "github.com/hashicorp/packer/provisioner/shell-local"
sleepprovisioner "github.com/hashicorp/packer/provisioner/sleep"
windowsrestartprovisioner "github.com/hashicorp/packer/provisioner/windows-restart"
windowsshellprovisioner "github.com/hashicorp/packer/provisioner/windows-shell"
)
@ -145,6 +146,7 @@ var Provisioners = map[string]packer.Provisioner{
"salt-masterless": new(saltmasterlessprovisioner.Provisioner),
"shell": new(shellprovisioner.Provisioner),
"shell-local": new(shelllocalprovisioner.Provisioner),
"sleep": new(sleepprovisioner.Provisioner),
"windows-restart": new(windowsrestartprovisioner.Provisioner),
"windows-shell": new(windowsshellprovisioner.Provisioner),
}

View File

@ -0,0 +1,37 @@
{
"builders": [
{
"name": "roses",
"type": "file",
"content": "roses",
"target": "roses.txt"
}
],
"provisioners": [
{
"type": "shell-local",
"inline": [
"touch fuchsias.txt"
],
"timeout": "5s"
},
{
"type": "shell-local",
"inline": [
"touch lilas.txt"
]
},
{
"type": "sleep",
"duration": "2m",
"timeout": "1ms"
},
{
"type": "shell-local",
"inline": [
"touch campanules.txt"
]
}
]
}

View File

@ -2,6 +2,7 @@ package adapter
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
@ -233,15 +234,15 @@ func (c *Adapter) remoteExec(command string, in io.Reader, out io.Writer, err io
Stderr: err,
Command: command,
}
ctx := context.TODO()
if err := c.comm.Start(cmd); err != nil {
if err := c.comm.Start(ctx, cmd); err != nil {
c.ui.Error(err.Error())
return cmd.ExitStatus
}
cmd.Wait()
return cmd.ExitStatus
return cmd.ExitStatus()
}
type envRequest struct {

View File

@ -1,6 +1,7 @@
package adapter
import (
"context"
"errors"
"io"
"log"
@ -95,7 +96,7 @@ func (a addr) String() string {
type communicator struct{}
func (c communicator) Start(*packer.RemoteCmd) error {
func (c communicator) Start(context.Context, *packer.RemoteCmd) error {
return errors.New("communicator not supported")
}

81
common/retry/retry.go Normal file
View File

@ -0,0 +1,81 @@
package retry
import (
"context"
"fmt"
"log"
"time"
)
// Config represents a retry config
type Config struct {
// The operation will be retried until StartTimeout has elapsed. 0 means
// forever.
StartTimeout time.Duration
// RetryDelay gives the time elapsed after a failure and before we try
// again. Returns 2s by default.
RetryDelay func() time.Duration
// Max number of retries, 0 means infinite
Tries int
// ShouldRetry tells wether error should be retried. Nil defaults to always
// true.
ShouldRetry func(error) bool
}
// Run fn until context is cancelled up until StartTimeout time has passed.
func (cfg Config) Run(ctx context.Context, fn func(context.Context) error) error {
retryDelay := func() time.Duration { return 2 * time.Second }
if cfg.RetryDelay != nil {
retryDelay = cfg.RetryDelay
}
shouldRetry := func(error) bool { return true }
if cfg.ShouldRetry != nil {
shouldRetry = cfg.ShouldRetry
}
var startTimeout <-chan time.Time // nil chans never unlock !
if cfg.StartTimeout != 0 {
startTimeout = time.After(cfg.StartTimeout)
}
for try := 0; ; try++ {
var err error
if cfg.Tries != 0 && try == cfg.Tries {
return err
}
if err = fn(ctx); err == nil {
return nil
}
if !shouldRetry(err) {
return err
}
log.Print(fmt.Errorf("Retryable error: %s", err))
select {
case <-ctx.Done():
return err
case <-startTimeout:
return err
default:
time.Sleep(retryDelay())
}
}
}
type Backoff struct {
InitialBackoff time.Duration
MaxBackoff time.Duration
Multiplier float64
}
func (lb *Backoff) Linear() time.Duration {
wait := lb.InitialBackoff
lb.InitialBackoff = time.Duration(lb.Multiplier * float64(lb.InitialBackoff))
if lb.MaxBackoff != 0 && lb.InitialBackoff > lb.MaxBackoff {
lb.InitialBackoff = lb.MaxBackoff
}
return wait
}

View File

@ -0,0 +1,79 @@
package retry
import (
"context"
"errors"
"testing"
"time"
)
func success(context.Context) error { return nil }
func wait(ctx context.Context) error {
<-ctx.Done()
return ctx.Err()
}
var failErr = errors.New("woops !")
func fail(context.Context) error { return failErr }
func TestConfig_Run(t *testing.T) {
cancelledCtx, cancel := context.WithCancel(context.Background())
cancel()
type fields struct {
StartTimeout time.Duration
RetryDelay func() time.Duration
}
type args struct {
ctx context.Context
fn func(context.Context) error
}
tests := []struct {
name string
fields fields
args args
wantErr error
}{
{"success",
fields{StartTimeout: time.Second, RetryDelay: nil},
args{context.Background(), success},
nil},
{"context cancelled",
fields{StartTimeout: time.Second, RetryDelay: nil},
args{cancelledCtx, wait},
context.Canceled},
{"timeout",
fields{StartTimeout: 20 * time.Millisecond, RetryDelay: func() time.Duration { return 10 * time.Millisecond }},
args{cancelledCtx, fail},
failErr},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := Config{
StartTimeout: tt.fields.StartTimeout,
RetryDelay: tt.fields.RetryDelay,
}
if err := cfg.Run(tt.args.ctx, tt.args.fn); err != tt.wantErr {
t.Fatalf("Config.Run() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestBackoff_Linear(t *testing.T) {
b := Backoff{
InitialBackoff: 2 * time.Minute,
Multiplier: 2,
}
linear := (&b).Linear
if linear() != 2*time.Minute {
t.Fatal("first backoff should be 2 minutes")
}
if linear() != 4*time.Minute {
t.Fatal("second backoff should be 4 minutes")
}
}

View File

@ -1,6 +1,7 @@
package shell_local
import (
"context"
"fmt"
"io"
"log"
@ -15,14 +16,14 @@ type Communicator struct {
ExecuteCommand []string
}
func (c *Communicator) Start(cmd *packer.RemoteCmd) error {
func (c *Communicator) Start(ctx context.Context, cmd *packer.RemoteCmd) error {
if len(c.ExecuteCommand) == 0 {
return fmt.Errorf("Error launching command via shell-local communicator: No ExecuteCommand provided")
}
// Build the local command to execute
log.Printf("[INFO] (shell-local communicator): Executing local shell command %s", c.ExecuteCommand)
localCmd := exec.Command(c.ExecuteCommand[0], c.ExecuteCommand[1:]...)
localCmd := exec.CommandContext(ctx, c.ExecuteCommand[0], c.ExecuteCommand[1:]...)
localCmd.Stdin = cmd.Stdin
localCmd.Stdout = cmd.Stdout
localCmd.Stderr = cmd.Stderr

View File

@ -2,6 +2,7 @@ package shell_local
import (
"bytes"
"context"
"runtime"
"strings"
"testing"
@ -28,14 +29,15 @@ func TestCommunicator(t *testing.T) {
Stdout: &buf,
}
if err := c.Start(cmd); err != nil {
ctx := context.Background()
if err := c.Start(ctx, cmd); err != nil {
t.Fatalf("err: %s", err)
}
cmd.Wait()
if cmd.ExitStatus != 0 {
t.Fatalf("err bad exit status: %d", cmd.ExitStatus)
if cmd.ExitStatus() != 0 {
t.Fatalf("err bad exit status: %d", cmd.ExitStatus())
}
if strings.TrimSpace(buf.String()) != "foo" {

View File

@ -2,6 +2,7 @@ package shell_local
import (
"bufio"
"context"
"fmt"
"log"
"os"
@ -27,7 +28,7 @@ type EnvVarsTemplate struct {
WinRMPassword string
}
func Run(ui packer.Ui, config *Config) (bool, error) {
func Run(ctx context.Context, ui packer.Ui, config *Config) (bool, error) {
// Check if shell-local can even execute against this runtime OS
if len(config.OnlyOn) > 0 {
runCommand := false
@ -90,17 +91,17 @@ func Run(ui packer.Ui, config *Config) (bool, error) {
flattenedCmd := strings.Join(interpolatedCmds, " ")
cmd := &packer.RemoteCmd{Command: flattenedCmd}
log.Printf("[INFO] (shell-local): starting local command: %s", flattenedCmd)
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return false, fmt.Errorf(
"Error executing script: %s\n\n"+
"Please see output above for more information.",
script)
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return false, fmt.Errorf(
"Erroneous exit code %d while executing script: %s\n\n"+
"Please see output above for more information.",
cmd.ExitStatus,
cmd.ExitStatus(),
script)
}
}

View File

@ -56,12 +56,12 @@ func (s *StepCleanupTempKeys) Run(ctx context.Context, state multistep.StateBag)
//
// TODO: Why create a backup file if you are going to remove it?
cmd.Command = fmt.Sprintf("sed -i.bak '/ %s$/d' ~/.ssh/authorized_keys; rm ~/.ssh/authorized_keys.bak", s.Comm.SSHTemporaryKeyPairName)
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
log.Printf("Error cleaning up ~/.ssh/authorized_keys; please clean up keys manually: %s", err)
}
cmd = new(packer.RemoteCmd)
cmd.Command = fmt.Sprintf("sudo sed -i.bak '/ %s$/d' /root/.ssh/authorized_keys; sudo rm /root/.ssh/authorized_keys.bak", s.Comm.SSHTemporaryKeyPairName)
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
log.Printf("Error cleaning up /root/.ssh/authorized_keys; please clean up keys manually: %s", err)
}

View File

@ -1,6 +1,7 @@
package none
import (
"context"
"errors"
"io"
"os"
@ -23,7 +24,7 @@ func New(config string) (result *comm, err error) {
return
}
func (c *comm) Start(cmd *packer.RemoteCmd) (err error) {
func (c *comm) Start(ctx context.Context, cmd *packer.RemoteCmd) (err error) {
cmd.SetExited(0)
return
}

View File

@ -3,6 +3,7 @@ package ssh
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
@ -82,7 +83,7 @@ func New(address string, config *Config) (result *comm, err error) {
return
}
func (c *comm) Start(cmd *packer.RemoteCmd) (err error) {
func (c *comm) Start(ctx context.Context, cmd *packer.RemoteCmd) (err error) {
session, err := c.newSession()
if err != nil {
return

View File

@ -4,6 +4,7 @@ package ssh
import (
"bytes"
"context"
"fmt"
"net"
"testing"
@ -188,7 +189,8 @@ func TestStart(t *testing.T) {
Stdout: new(bytes.Buffer),
}
client.Start(cmd)
ctx := context.Background()
client.Start(ctx, cmd)
}
func TestHandshakeTimeout(t *testing.T) {

View File

@ -1,6 +1,7 @@
package winrm
import (
"context"
"encoding/base64"
"fmt"
"io"
@ -74,7 +75,7 @@ func New(config *Config) (*Communicator, error) {
}
// Start implementation of communicator.Communicator interface
func (c *Communicator) Start(rc *packer.RemoteCmd) error {
func (c *Communicator) Start(ctx context.Context, rc *packer.RemoteCmd) error {
shell, err := c.client.CreateShell()
if err != nil {
return err

View File

@ -2,6 +2,7 @@ package winrm
import (
"bytes"
"context"
"io"
"strings"
"testing"
@ -77,8 +78,8 @@ func TestStart(t *testing.T) {
stdout := new(bytes.Buffer)
cmd.Command = "echo foo"
cmd.Stdout = stdout
err = c.Start(&cmd)
ctx := context.Background()
err = c.Start(ctx, &cmd)
if err != nil {
t.Fatalf("error executing remote command: %s", err)
}

2
go.mod
View File

@ -113,7 +113,7 @@ require (
github.com/mitchellh/go-fs v0.0.0-20180402234041-7b48fa161ea7
github.com/mitchellh/go-homedir v1.0.0
github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed
github.com/mitchellh/iochan v0.0.0-20150529224432-87b45ffd0e95
github.com/mitchellh/iochan v1.0.1-0.20190408094311-e9f5309a3061
github.com/mitchellh/mapstructure v0.0.0-20180111000720-b4575eea38cc
github.com/mitchellh/panicwrap v0.0.0-20170106182340-fce601fe5557
github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784

2
go.sum
View File

@ -307,6 +307,8 @@ github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed h1:FI2NIv6fpef6BQ
github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0=
github.com/mitchellh/iochan v0.0.0-20150529224432-87b45ffd0e95 h1:aHWVygBsLb+Kls/35B3tevL1hvDxZ0UklPA0BmhqTEk=
github.com/mitchellh/iochan v0.0.0-20150529224432-87b45ffd0e95/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/iochan v1.0.1-0.20190408094311-e9f5309a3061 h1:BSEloc+wp5WA/fu0jw5HBWOfoKLdvpqi38ZP22eNemg=
github.com/mitchellh/iochan v1.0.1-0.20190408094311-e9f5309a3061/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20180111000720-b4575eea38cc h1:5T6hzGUO5OrL6MdYXYoLQtRWJDDgjdlOVBn9mIqGY1g=
github.com/mitchellh/mapstructure v0.0.0-20180111000720-b4575eea38cc/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/panicwrap v0.0.0-20170106182340-fce601fe5557 h1:w1QuuAA2km2Hax+EPamrq5ZRBeaNv2vsjvgB4an0zoU=

View File

@ -33,7 +33,7 @@ type StepConnectWinRM struct {
WinRMPort func(multistep.StateBag) (int, error)
}
func (s *StepConnectWinRM) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
func (s *StepConnectWinRM) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
ui := state.Get("ui").(packer.Ui)
var comm packer.Communicator
@ -87,6 +87,7 @@ func (s *StepConnectWinRM) Cleanup(multistep.StateBag) {
}
func (s *StepConnectWinRM) waitForWinRM(state multistep.StateBag, cancel <-chan struct{}) (packer.Communicator, error) {
ctx := context.TODO()
var comm packer.Communicator
for {
select {
@ -164,7 +165,7 @@ func (s *StepConnectWinRM) waitForWinRM(state multistep.StateBag, cancel <-chan
log.Printf("Checking that WinRM is connected with: '%s'", connectCheckCommand)
ui := state.Get("ui").(packer.Ui)
err := cmd.StartWithUi(comm, ui)
err := cmd.RunWithUi(ctx, comm, ui)
if err != nil {
log.Printf("Communication connection err: %s", err)

View File

@ -1,11 +1,12 @@
package packer
import (
"context"
"io"
"os"
"strings"
"sync"
"unicode"
"golang.org/x/sync/errgroup"
"github.com/mitchellh/iochan"
)
@ -32,19 +33,14 @@ type RemoteCmd struct {
Stdout io.Writer
Stderr io.Writer
// This will be set to true when the remote command has exited. It
// shouldn't be set manually by the user, but there is no harm in
// doing so.
Exited bool
// Once Exited is true, this will contain the exit code of the process.
ExitStatus int
// Internal fields
exitCh chan struct{}
exitStatus int
// This thing is a mutex, lock when making modifications concurrently
sync.Mutex
exitChInit sync.Once
exitCh chan interface{}
}
// A Communicator is the interface used to communicate with the machine
@ -59,7 +55,7 @@ type Communicator interface {
// Start again. The Start method returns immediately once the command
// is started. It does not wait for the command to complete. The
// RemoteCmd.Exited field should be used for this.
Start(*RemoteCmd) error
Start(context.Context, *RemoteCmd) error
// Upload uploads a file to the machine to the given path with the
// contents coming from the given reader. This method will block until
@ -84,10 +80,12 @@ type Communicator interface {
DownloadDir(src string, dst string, exclude []string) error
}
// StartWithUi runs the remote command and streams the output to any
// configured Writers for stdout/stderr, while also writing each line
// as it comes to a Ui.
func (r *RemoteCmd) StartWithUi(c Communicator, ui Ui) error {
// RunWithUi runs the remote command and streams the output to any configured
// Writers for stdout/stderr, while also writing each line as it comes to a Ui.
// RunWithUi will not return until the command finishes or is cancelled.
func (r *RemoteCmd) RunWithUi(ctx context.Context, c Communicator, ui Ui) error {
r.initchan()
stdout_r, stdout_w := io.Pipe()
stderr_r, stderr_w := io.Pipe()
defer stdout_w.Close()
@ -117,94 +115,62 @@ func (r *RemoteCmd) StartWithUi(c Communicator, ui Ui) error {
r.Stderr = io.MultiWriter(r.Stderr, stderr_w)
}
// Start the command
if err := c.Start(r); err != nil {
// Loop and get all our output until done.
printFn := func(in io.Reader, out func(string)) error {
for output := range iochan.LineReader(in) {
if output != "" {
out(output)
}
}
return nil
}
wg, ctx := errgroup.WithContext(ctx)
wg.Go(func() error { return printFn(stdout_r, ui.Message) })
wg.Go(func() error { return printFn(stderr_r, ui.Error) })
if err := c.Start(ctx, r); err != nil {
return err
}
// Create the channels we'll use for data
exitCh := make(chan struct{})
stdoutCh := iochan.DelimReader(stdout_r, '\n')
stderrCh := iochan.DelimReader(stderr_r, '\n')
// Start the goroutine to watch for the exit
go func() {
defer close(exitCh)
defer stdout_w.Close()
defer stderr_w.Close()
r.Wait()
}()
// Loop and get all our output
OutputLoop:
for {
select {
case output := <-stderrCh:
if output != "" {
ui.Message(r.cleanOutputLine(output))
}
case output := <-stdoutCh:
if output != "" {
ui.Message(r.cleanOutputLine(output))
}
case <-exitCh:
break OutputLoop
}
select {
case <-ctx.Done():
return ctx.Err()
case <-r.exitCh:
return nil
}
// Make sure we finish off stdout/stderr because we may have gotten
// a message from the exit channel before finishing these first.
for output := range stdoutCh {
ui.Message(r.cleanOutputLine(output))
}
for output := range stderrCh {
ui.Message(r.cleanOutputLine(output))
}
return nil
}
// SetExited is a helper for setting that this process is exited. This
// should be called by communicators who are running a remote command in
// order to set that the command is done.
func (r *RemoteCmd) SetExited(status int) {
r.initchan()
r.Lock()
defer r.Unlock()
r.exitStatus = status
r.Unlock()
if r.exitCh == nil {
r.exitCh = make(chan struct{})
}
r.Exited = true
r.ExitStatus = status
close(r.exitCh)
}
// Wait waits for the remote command to complete.
func (r *RemoteCmd) Wait() {
// Make sure our condition variable is initialized.
r.Lock()
if r.exitCh == nil {
r.exitCh = make(chan struct{})
}
r.Unlock()
// Wait for command exit and return exit status
func (r *RemoteCmd) Wait() int {
r.initchan()
<-r.exitCh
r.Lock()
defer r.Unlock()
return r.exitStatus
}
// cleanOutputLine cleans up a line so that '\r' don't muck up the
// UI output when we're reading from a remote command.
func (r *RemoteCmd) cleanOutputLine(line string) string {
// Trim surrounding whitespace
line = strings.TrimRightFunc(line, unicode.IsSpace)
// Trim up to the first carriage return, since that text would be
// lost anyways.
idx := strings.LastIndex(line, "\r")
if idx > -1 {
line = line[idx+1:]
}
return line
func (r *RemoteCmd) ExitStatus() int {
return r.Wait()
}
func (r *RemoteCmd) initchan() {
r.exitChInit.Do(func() {
if r.exitCh == nil {
r.exitCh = make(chan interface{})
}
})
}

View File

@ -2,8 +2,10 @@ package packer
import (
"bytes"
"context"
"io"
"os"
"strings"
"sync"
)
@ -34,7 +36,7 @@ type MockCommunicator struct {
DownloadData string
}
func (c *MockCommunicator) Start(rc *RemoteCmd) error {
func (c *MockCommunicator) Start(ctx context.Context, rc *RemoteCmd) error {
c.StartCalled = true
c.StartCmd = rc
@ -43,7 +45,7 @@ func (c *MockCommunicator) Start(rc *RemoteCmd) error {
if rc.Stdout != nil && c.StartStdout != "" {
wg.Add(1)
go func() {
rc.Stdout.Write([]byte(c.StartStdout))
io.Copy(rc.Stdout, strings.NewReader(c.StartStdout))
wg.Done()
}()
}
@ -51,7 +53,7 @@ func (c *MockCommunicator) Start(rc *RemoteCmd) error {
if rc.Stderr != nil && c.StartStderr != "" {
wg.Add(1)
go func() {
rc.Stderr.Write([]byte(c.StartStderr))
io.Copy(rc.Stderr, strings.NewReader(c.StartStderr))
wg.Done()
}()
}

View File

@ -2,44 +2,70 @@ package packer
import (
"bytes"
"context"
"io"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/mitchellh/iochan"
"golang.org/x/sync/errgroup"
)
func TestRemoteCmd_StartWithUi(t *testing.T) {
data := "hello\nworld\nthere"
data := []string{
"hello",
"world",
"foo",
"there",
}
originalOutput := new(bytes.Buffer)
uiOutput := new(bytes.Buffer)
originalOutputReader, originalOutputWriter := io.Pipe()
uilOutputReader, uilOutputWriter := io.Pipe()
testComm := new(MockCommunicator)
testComm.StartStdout = data
testComm.StartStdout = strings.Join(data, "\n") + "\n"
testUi := &BasicUi{
Reader: new(bytes.Buffer),
Writer: uiOutput,
Writer: uilOutputWriter,
}
rc := &RemoteCmd{
Command: "test",
Stdout: originalOutput,
Stdout: originalOutputWriter,
}
ctx := context.TODO()
wg := errgroup.Group{}
testPrintFn := func(in io.Reader, expected []string) error {
i := 0
got := []string{}
for output := range iochan.LineReader(in) {
got = append(got, output)
i++
if i == len(expected) {
// here ideally the LineReader chan should be closed, but since
// the stream virtually has no ending we need to leave early.
break
}
}
if diff := cmp.Diff(got, expected); diff != "" {
t.Fatalf("bad output: %s", diff)
}
return nil
}
err := rc.StartWithUi(testComm, testUi)
wg.Go(func() error { return testPrintFn(uilOutputReader, data) })
wg.Go(func() error { return testPrintFn(originalOutputReader, data) })
err := rc.RunWithUi(ctx, testComm, testUi)
if err != nil {
t.Fatalf("err: %s", err)
}
rc.Wait()
expected := strings.TrimSpace(data)
if strings.TrimSpace(uiOutput.String()) != expected {
t.Fatalf("bad output: '%s'", uiOutput.String())
}
if originalOutput.String() != expected {
t.Fatalf("bad: %#v", originalOutput.String())
}
wg.Wait()
}
func TestRemoteCmd_Wait(t *testing.T) {

View File

@ -168,6 +168,11 @@ func (c *Core) Build(n string) (Build, error) {
PauseBefore: rawP.PauseBefore,
Provisioner: provisioner,
}
} else if rawP.Timeout > 0 {
provisioner = &TimeoutProvisioner{
Timeout: rawP.Timeout,
Provisioner: provisioner,
}
}
provisioners = append(provisioners, coreBuildProvisioner{

View File

@ -16,10 +16,10 @@ const HookProvision = "packer_provision"
// in. In addition to that, the Hook is given access to a UI so that it can
// output things to the user.
//
// Cancel is called when the hook needs to be cancelled. This will usually
// The first context argument controlls cancellation, the context will usually
// be called when Run is still in progress so the mechanism that handles this
// must be race-free. Cancel should attempt to cancel the hook in the
// quickest, safest way possible.
// must be race-free. Cancel should attempt to cancel the hook in the quickest,
// safest way possible.
type Hook interface {
Run(context.Context, string, Ui, Communicator, interface{}) error
}

View File

@ -0,0 +1,43 @@
package packer
import (
"context"
"fmt"
"time"
)
// TimeoutProvisioner is a Provisioner implementation that can timeout after a
// duration
type TimeoutProvisioner struct {
Provisioner
Timeout time.Duration
}
func (p *TimeoutProvisioner) Provision(ctx context.Context, ui Ui, comm Communicator) error {
ctx, cancel := context.WithTimeout(ctx, p.Timeout)
defer cancel()
// Use a select to determine if we get cancelled during the wait
ui.Say(fmt.Sprintf("Setting a %s timeout for the next provisioner...", p.Timeout))
errC := make(chan interface{})
go func() {
select {
case <-errC:
// all good
case <-ctx.Done():
switch ctx.Err() {
case context.DeadlineExceeded:
ui.Error("Cancelling provisioner after a timeout...")
default:
// the context also gets cancelled when the provisioner is
// successful
}
}
}()
err := p.Provisioner.Provision(ctx, ui, comm)
close(errC)
return err
}

View File

@ -1,6 +1,7 @@
package rpc
import (
"context"
"encoding/gob"
"io"
"log"
@ -64,7 +65,7 @@ func Communicator(client *rpc.Client) *communicator {
return &communicator{client: client}
}
func (c *communicator) Start(cmd *packer.RemoteCmd) (err error) {
func (c *communicator) Start(ctx context.Context, cmd *packer.RemoteCmd) (err error) {
var args CommunicatorStartArgs
args.Command = cmd.Command
@ -201,6 +202,8 @@ func (c *communicator) Download(path string, w io.Writer) (err error) {
}
func (c *CommunicatorServer) Start(args *CommunicatorStartArgs, reply *interface{}) error {
ctx := context.TODO()
// Build the RemoteCmd on this side so that it all pipes over
// to the remote side.
var cmd packer.RemoteCmd
@ -260,7 +263,7 @@ func (c *CommunicatorServer) Start(args *CommunicatorStartArgs, reply *interface
responseWriter := gob.NewEncoder(responseC)
// Start the actual command
err = c.c.Start(&cmd)
err = c.c.Start(ctx, &cmd)
if err != nil {
close(doneCh)
return NewBasicError(err)
@ -272,8 +275,8 @@ func (c *CommunicatorServer) Start(args *CommunicatorStartArgs, reply *interface
defer close(doneCh)
defer responseC.Close()
cmd.Wait()
log.Printf("[INFO] RPC endpoint: Communicator ended with: %d", cmd.ExitStatus)
responseWriter.Encode(&CommandFinished{cmd.ExitStatus})
log.Printf("[INFO] RPC endpoint: Communicator ended with: %d", cmd.ExitStatus())
responseWriter.Encode(&CommandFinished{cmd.ExitStatus()})
}()
return nil

View File

@ -2,6 +2,7 @@ package rpc
import (
"bufio"
"context"
"io"
"reflect"
"testing"
@ -36,8 +37,10 @@ func TestCommunicatorRPC(t *testing.T) {
c.StartStderr = "errfoo\n"
c.StartExitStatus = 42
ctx := context.Background()
// Test Start
err := remote.Start(&cmd)
err := remote.Start(ctx, &cmd)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -73,8 +76,8 @@ func TestCommunicatorRPC(t *testing.T) {
}
// Test that we can get the exit status properly
if cmd.ExitStatus != 42 {
t.Fatalf("bad exit: %d", cmd.ExitStatus)
if cmd.ExitStatus() != 42 {
t.Fatalf("bad exit: %d", cmd.ExitStatus())
}
// Test that we can upload things

View File

@ -41,7 +41,7 @@ func (p *PostProcessor) PostProcess(ctx context.Context, ui packer.Ui, artifact
// this particular post-processor doesn't do anything with the artifact
// except to return it.
success, retErr := sl.Run(ui, &p.config)
success, retErr := sl.Run(ctx, ui, &p.config)
if !success {
return nil, false, false, retErr
}

View File

@ -4,8 +4,9 @@ import (
"context"
"fmt"
"log"
"time"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
@ -25,23 +26,27 @@ func (s *stepUpload) Run(ctx context.Context, state multistep.StateBag) multiste
"Depending on your internet connection and the size of the box,\n" +
"this may take some time")
err := common.Retry(10, 10, 3, func(i uint) (bool, error) {
ui.Message(fmt.Sprintf("Uploading box, attempt %d", i+1))
err := retry.Config{
Tries: 3,
RetryDelay: (&retry.Backoff{InitialBackoff: 10 * time.Second, MaxBackoff: 10 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
ui.Message(fmt.Sprintf("Uploading box"))
resp, err := client.Upload(artifactFilePath, url)
if err != nil {
ui.Message(fmt.Sprintf(
"Error uploading box! Will retry in 10 seconds. Error: %s", err))
return false, nil
return err
}
if resp.StatusCode != 200 {
log.Printf("bad HTTP status: %d", resp.StatusCode)
err := fmt.Errorf("bad HTTP status: %d", resp.StatusCode)
log.Print(err)
ui.Message(fmt.Sprintf(
"Error uploading box! Will retry in 10 seconds. Status: %d",
resp.StatusCode))
return false, nil
return err
}
return true, nil
return err
})
if err != nil {

View File

@ -1,6 +1,7 @@
package ansiblelocal
import (
"context"
"io"
"os"
@ -12,7 +13,7 @@ type communicatorMock struct {
uploadDestination []string
}
func (c *communicatorMock) Start(cmd *packer.RemoteCmd) error {
func (c *communicatorMock) Start(ctx context.Context, cmd *packer.RemoteCmd) error {
c.startCommand = append(c.startCommand, cmd.Command)
cmd.SetExited(0)
return nil

View File

@ -348,6 +348,7 @@ func (p *Provisioner) provisionPlaybookFile(ui packer.Ui, comm packer.Communicat
}
func (p *Provisioner) executeGalaxy(ui packer.Ui, comm packer.Communicator) error {
ctx := context.TODO()
rolesDir := filepath.ToSlash(filepath.Join(p.config.StagingDir, "roles"))
galaxyFile := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(p.config.GalaxyFile)))
@ -358,12 +359,12 @@ func (p *Provisioner) executeGalaxy(ui packer.Ui, comm packer.Communicator) erro
cmd := &packer.RemoteCmd{
Command: command,
}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
// ansible-galaxy version 2.0.0.2 doesn't return exit codes on error..
return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus)
return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus())
}
return nil
}
@ -403,6 +404,7 @@ func (p *Provisioner) executeAnsible(ui packer.Ui, comm packer.Communicator) err
func (p *Provisioner) executeAnsiblePlaybook(
ui packer.Ui, comm packer.Communicator, playbookFile, extraArgs, inventory string,
) error {
ctx := context.TODO()
command := fmt.Sprintf("cd %s && %s %s%s -c local -i %s",
p.config.StagingDir, p.config.Command, playbookFile, extraArgs, inventory,
)
@ -410,17 +412,17 @@ func (p *Provisioner) executeAnsiblePlaybook(
cmd := &packer.RemoteCmd{
Command: command,
}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus == 127 {
if cmd.ExitStatus() != 0 {
if cmd.ExitStatus() == 127 {
return fmt.Errorf("%s could not be found. Verify that it is available on the\n"+
"PATH after connecting to the machine.",
p.config.Command)
}
return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus)
return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus())
}
return nil
}
@ -464,32 +466,34 @@ func (p *Provisioner) uploadFile(ui packer.Ui, comm packer.Communicator, dst, sr
}
func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ctx := context.TODO()
cmd := &packer.RemoteCmd{
Command: fmt.Sprintf("mkdir -p '%s'", dir),
}
ui.Message(fmt.Sprintf("Creating directory: %s", dir))
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more information.")
}
return nil
}
func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ctx := context.TODO()
cmd := &packer.RemoteCmd{
Command: fmt.Sprintf("rm -rf '%s'", dir),
}
ui.Message(fmt.Sprintf("Removing directory: %s", dir))
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more information.")
}
return nil

View File

@ -467,22 +467,23 @@ func (p *Provisioner) createJson(ui packer.Ui, comm packer.Communicator) (string
}
func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ctx := context.TODO()
ui.Message(fmt.Sprintf("Creating directory: %s", dir))
cmd := &packer.RemoteCmd{Command: p.guestCommands.CreateDir(dir)}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more info.")
}
// Chmod the directory to 0777 just so that we can access it as our user
cmd = &packer.RemoteCmd{Command: p.guestCommands.Chmod(dir, "0777")}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more info.")
}
@ -514,6 +515,7 @@ func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node str
"-y",
"-c", knifeConfigPath,
}
ctx := context.TODO()
p.config.ctx.Data = &KnifeTemplate{
Sudo: !p.config.PreventSudo,
@ -527,10 +529,10 @@ func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node str
}
cmd := &packer.RemoteCmd{Command: command}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf(
"Non-zero exit status. See output above for more info.\n\n"+
"Command: %s",
@ -542,9 +544,10 @@ func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node str
func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ui.Message(fmt.Sprintf("Removing directory: %s", dir))
ctx := context.TODO()
cmd := &packer.RemoteCmd{Command: p.guestCommands.RemoveDir(dir)}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
@ -557,6 +560,8 @@ func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config
JsonPath: json,
Sudo: !p.config.PreventSudo,
}
ctx := context.TODO()
command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx)
if err != nil {
return err
@ -575,12 +580,12 @@ func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config
Command: command,
}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus)
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus())
}
return nil
@ -588,6 +593,7 @@ func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config
func (p *Provisioner) installChef(ui packer.Ui, comm packer.Communicator) error {
ui.Message("Installing Chef...")
ctx := context.TODO()
p.config.ctx.Data = &InstallChefTemplate{
Sudo: !p.config.PreventSudo,
@ -600,13 +606,13 @@ func (p *Provisioner) installChef(ui packer.Ui, comm packer.Communicator) error
ui.Message(command)
cmd := &packer.RemoteCmd{Command: command}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf(
"Install script exited with non-zero exit status %d", cmd.ExitStatus)
"Install script exited with non-zero exit status %d", cmd.ExitStatus())
}
return nil

View File

@ -410,21 +410,22 @@ func (p *Provisioner) createJson(ui packer.Ui, comm packer.Communicator) (string
func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ui.Message(fmt.Sprintf("Creating directory: %s", dir))
ctx := context.TODO()
cmd := &packer.RemoteCmd{Command: p.guestCommands.CreateDir(dir)}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more info.")
}
// Chmod the directory to 0777 just so that we can access it as our user
cmd = &packer.RemoteCmd{Command: p.guestCommands.Chmod(dir, "0777")}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more info.")
}
@ -447,13 +448,13 @@ func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config
cmd := &packer.RemoteCmd{
Command: command,
}
if err := cmd.StartWithUi(comm, ui); err != nil {
ctx := context.TODO()
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus)
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus())
}
return nil
@ -461,6 +462,7 @@ func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config
func (p *Provisioner) installChef(ui packer.Ui, comm packer.Communicator, version string) error {
ui.Message("Installing Chef...")
ctx := context.TODO()
p.config.ctx.Data = &InstallChefTemplate{
Sudo: !p.config.PreventSudo,
@ -472,13 +474,13 @@ func (p *Provisioner) installChef(ui packer.Ui, comm packer.Communicator, versio
}
cmd := &packer.RemoteCmd{Command: command}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf(
"Install script exited with non-zero exit status %d", cmd.ExitStatus)
"Install script exited with non-zero exit status %d", cmd.ExitStatus())
}
return nil

View File

@ -128,6 +128,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
}
func (p *Provisioner) maybeBootstrap(ui packer.Ui, comm packer.Communicator) error {
ctx := context.TODO()
if !p.config.Bootstrap {
return nil
}
@ -153,12 +154,12 @@ func (p *Provisioner) maybeBootstrap(ui packer.Ui, comm packer.Communicator) err
Stderr: &outErr,
}
if err = comm.Start(cmd); err != nil {
if err = comm.Start(ctx, cmd); err != nil {
return fmt.Errorf("Error bootstrapping converge: %s", err)
}
cmd.Wait()
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
ui.Error(out.String())
ui.Error(outErr.String())
return errors.New("Error bootstrapping converge")
@ -180,6 +181,7 @@ func (p *Provisioner) sendModuleDirectories(ui packer.Ui, comm packer.Communicat
}
func (p *Provisioner) applyModules(ui packer.Ui, comm packer.Communicator) error {
ctx := context.TODO()
// create params JSON file
params, err := json.Marshal(p.config.Params)
if err != nil {
@ -208,12 +210,12 @@ func (p *Provisioner) applyModules(ui packer.Ui, comm packer.Communicator) error
Stdout: &runOut,
Stderr: &runErr,
}
if err := comm.Start(cmd); err != nil {
if err := comm.Start(ctx, cmd); err != nil {
return fmt.Errorf("Error applying %q: %s", p.config.Module, err)
}
cmd.Wait()
if cmd.ExitStatus == 127 {
if cmd.ExitStatus() == 127 {
ui.Error("Could not find Converge. Is it installed and in PATH?")
if !p.config.Bootstrap {
ui.Error("Bootstrapping was disabled for this run. That might be why Converge isn't present.")
@ -221,10 +223,10 @@ func (p *Provisioner) applyModules(ui packer.Ui, comm packer.Communicator) error
return errors.New("Could not find Converge")
} else if cmd.ExitStatus != 0 {
} else if cmd.ExitStatus() != 0 {
ui.Error(strings.TrimSpace(runOut.String()))
ui.Error(strings.TrimSpace(runErr.String()))
ui.Error(fmt.Sprintf("Exited with error code %d.", cmd.ExitStatus))
ui.Error(fmt.Sprintf("Exited with error code %d.", cmd.ExitStatus()))
return fmt.Errorf("Error applying %q", p.config.Module)
}

View File

@ -14,6 +14,7 @@ import (
"time"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/common/shell"
"github.com/hashicorp/packer/common/uuid"
commonhelper "github.com/hashicorp/packer/helper/common"
@ -253,7 +254,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
// that the upload succeeded, a restart is initiated, and then the
// command is executed but the file doesn't exist any longer.
var cmd *packer.RemoteCmd
err = p.retryable(func() error {
retry.Config{StartTimeout: p.config.StartRetryTimeout}.Run(ctx, func(ctx context.Context) error {
if _, err := f.Seek(0, 0); err != nil {
return err
}
@ -262,7 +263,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
}
cmd = &packer.RemoteCmd{Command: command}
return cmd.StartWithUi(comm, ui)
return cmd.RunWithUi(ctx, comm, ui)
})
if err != nil {
return err
@ -271,7 +272,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
// Close the original file since we copied it
f.Close()
if err := p.config.ValidExitCode(cmd.ExitStatus); err != nil {
if err := p.config.ValidExitCode(cmd.ExitStatus()); err != nil {
return err
}
}
@ -285,31 +286,6 @@ func (p *Provisioner) Cancel() {
os.Exit(0)
}
// retryable will retry the given function over and over until a non-error is
// returned.
func (p *Provisioner) retryable(f func() error) error {
startTimeout := time.After(p.config.StartRetryTimeout)
for {
var err error
if err = f(); err == nil {
return nil
}
// Create an error and log it
err = fmt.Errorf("Retryable error: %s", err)
log.Print(err.Error())
// Check if we timed out, otherwise we retry. It is safe to retry
// since the only error case above is if the command failed to START.
select {
case <-startTimeout:
return err
default:
time.Sleep(retryableSleep)
}
}
}
// Environment variables required within the remote environment are uploaded
// within a PS script and then enabled by 'dot sourcing' the script
// immediately prior to execution of the main command
@ -387,13 +363,14 @@ func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) {
}
func (p *Provisioner) uploadEnvVars(flattenedEnvVars string) (err error) {
ctx := context.TODO()
// Upload all env vars to a powershell script on the target build file
// system. Do this in the context of a single retryable function so that
// we gracefully handle any errors created by transient conditions such as
// a system restart
envVarReader := strings.NewReader(flattenedEnvVars)
log.Printf("Uploading env vars to %s", p.config.RemoteEnvVarPath)
err = p.retryable(func() error {
err = retry.Config{StartTimeout: p.config.StartRetryTimeout}.Run(ctx, func(context.Context) error {
if err := p.communicator.Upload(p.config.RemoteEnvVarPath, envVarReader, nil); err != nil {
return fmt.Errorf("Error uploading ps script containing env vars: %s", err)
}

View File

@ -3,14 +3,11 @@ package powershell
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"regexp"
"strings"
"testing"
"time"
"github.com/hashicorp/packer/packer"
)
@ -643,36 +640,6 @@ func TestProvision_uploadEnvVars(t *testing.T) {
}
}
func TestRetryable(t *testing.T) {
config := testConfig()
count := 0
retryMe := func() error {
t.Logf("RetryMe, attempt number %d", count)
if count == 2 {
return nil
}
count++
return errors.New(fmt.Sprintf("Still waiting %d more times...", 2-count))
}
retryableSleep = 50 * time.Millisecond
p := new(Provisioner)
p.config.StartRetryTimeout = 155 * time.Millisecond
err := p.Prepare(config)
err = p.retryable(retryMe)
if err != nil {
t.Fatalf("should not have error retrying function")
}
count = 0
p.config.StartRetryTimeout = 10 * time.Millisecond
err = p.Prepare(config)
err = p.retryable(retryMe)
if err == nil {
t.Fatalf("should have error retrying function")
}
}
func TestCancel(t *testing.T) {
// Don't actually call Cancel() as it performs an os.Exit(0)
// which kills the 'go test' tool

View File

@ -348,12 +348,12 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
}
ui.Message(fmt.Sprintf("Running Puppet: %s", command))
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return fmt.Errorf("Got an error starting command: %s", err)
}
if cmd.ExitStatus != 0 && cmd.ExitStatus != 2 && !p.config.IgnoreExitCodes {
return fmt.Errorf("Puppet exited with a non-zero exit status: %d", cmd.ExitStatus)
if cmd.ExitStatus() != 0 && cmd.ExitStatus() != 2 && !p.config.IgnoreExitCodes {
return fmt.Errorf("Puppet exited with a non-zero exit status: %d", cmd.ExitStatus())
}
if p.config.CleanStagingDir {
@ -431,21 +431,22 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri
ui.Message(fmt.Sprintf("Creating directory: %s", dir))
cmd := &packer.RemoteCmd{Command: p.guestCommands.CreateDir(dir)}
ctx := context.TODO()
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status.")
}
// Chmod the directory to 0777 just so that we can access it as our user
cmd = &packer.RemoteCmd{Command: p.guestCommands.Chmod(dir, "0777")}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more info.")
}
@ -453,12 +454,14 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri
}
func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ctx := context.TODO()
cmd := &packer.RemoteCmd{Command: p.guestCommands.RemoveDir(dir)}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status.")
}

View File

@ -301,12 +301,12 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
}
ui.Message(fmt.Sprintf("Running Puppet: %s", command))
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 && cmd.ExitStatus != 2 && !p.config.IgnoreExitCodes {
return fmt.Errorf("Puppet exited with a non-zero exit status: %d", cmd.ExitStatus)
if cmd.ExitStatus() != 0 && cmd.ExitStatus() != 2 && !p.config.IgnoreExitCodes {
return fmt.Errorf("Puppet exited with a non-zero exit status: %d", cmd.ExitStatus())
}
if p.config.CleanStagingDir {
@ -320,21 +320,22 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ui.Message(fmt.Sprintf("Creating directory: %s", dir))
ctx := context.TODO()
cmd := &packer.RemoteCmd{Command: p.guestCommands.CreateDir(dir)}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more info.")
}
// Chmod the directory to 0777 just so that we can access it as our user
cmd = &packer.RemoteCmd{Command: p.guestCommands.Chmod(dir, "0777")}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more info.")
}
@ -342,12 +343,14 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri
}
func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ctx := context.TODO()
cmd := &packer.RemoteCmd{Command: p.guestCommands.RemoveDir(dir)}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status.")
}

View File

@ -231,14 +231,14 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
Command: fmt.Sprintf(p.guestOSTypeConfig.bootstrapFetchCmd),
}
ui.Message(fmt.Sprintf("Downloading saltstack bootstrap to /tmp/install_salt.sh"))
if err = cmd.StartWithUi(comm, ui); err != nil {
if err = cmd.RunWithUi(ctx, comm, ui); err != nil {
return fmt.Errorf("Unable to download Salt: %s", err)
}
cmd = &packer.RemoteCmd{
Command: fmt.Sprintf("%s %s", p.sudo(p.guestOSTypeConfig.bootstrapRunCmd), p.config.BootstrapArgs),
}
ui.Message(fmt.Sprintf("Installing Salt with command %s", cmd.Command))
if err = cmd.StartWithUi(comm, ui); err != nil {
if err = cmd.RunWithUi(ctx, comm, ui); err != nil {
return fmt.Errorf("Unable to install Salt: %s", err)
}
}
@ -342,9 +342,9 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
ui.Message(fmt.Sprintf("Running: salt-call --local %s", p.config.CmdArgs))
cmd := &packer.RemoteCmd{Command: p.sudo(fmt.Sprintf("%s --local %s", filepath.Join(p.config.SaltBinDir, "salt-call"), p.config.CmdArgs))}
if err = cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 {
if err = cmd.RunWithUi(ctx, comm, ui); err != nil || cmd.ExitStatus() != 0 {
if err == nil {
err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus)
err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus())
}
return fmt.Errorf("Error executing salt-call: %s", err)
@ -406,13 +406,15 @@ func (p *Provisioner) uploadFile(ui packer.Ui, comm packer.Communicator, dst, sr
}
func (p *Provisioner) moveFile(ui packer.Ui, comm packer.Communicator, dst string, src string) error {
ctx := context.TODO()
ui.Message(fmt.Sprintf("Moving %s to %s", src, dst))
cmd := &packer.RemoteCmd{
Command: p.sudo(p.guestCommands.MovePath(src, dst)),
}
if err := cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil || cmd.ExitStatus() != 0 {
if err == nil {
err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus)
err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus())
}
return fmt.Errorf("Unable to move %s to %s: %s", src, dst, err)
@ -425,38 +427,41 @@ func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir stri
cmd := &packer.RemoteCmd{
Command: p.guestCommands.CreateDir(dir),
}
if err := cmd.StartWithUi(comm, ui); err != nil {
ctx := context.TODO()
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status.")
}
return nil
}
func (p *Provisioner) statPath(ui packer.Ui, comm packer.Communicator, path string) error {
ctx := context.TODO()
ui.Message(fmt.Sprintf("Verifying Path: %s", path))
cmd := &packer.RemoteCmd{
Command: p.guestCommands.StatPath(path),
}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status.")
}
return nil
}
func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ctx := context.TODO()
ui.Message(fmt.Sprintf("Removing directory: %s", dir))
cmd := &packer.RemoteCmd{
Command: p.guestCommands.RemoveDir(dir),
}
if err := cmd.StartWithUi(comm, ui); err != nil {
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf("Non-zero exit status.")
}
return nil

View File

@ -26,7 +26,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error {
}
func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, _ packer.Communicator) error {
_, retErr := sl.Run(ui, &p.config)
_, retErr := sl.Run(ctx, ui, &p.config)
if retErr != nil {
return retErr
}

View File

@ -16,6 +16,7 @@ import (
"time"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/common/shell"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
@ -237,7 +238,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
// upload the var file
var cmd *packer.RemoteCmd
err = p.retryable(func() error {
err = retry.Config{StartTimeout: p.config.startRetryTimeout}.Run(ctx, func(ctx context.Context) error {
if _, err := tf.Seek(0, 0); err != nil {
return err
}
@ -256,7 +257,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
cmd = &packer.RemoteCmd{
Command: fmt.Sprintf("chmod 0600 %s", remoteVFName),
}
if err := comm.Start(cmd); err != nil {
if err := comm.Start(ctx, cmd); err != nil {
return fmt.Errorf(
"Error chmodding script file to 0600 in remote "+
"machine: %s", err)
@ -297,7 +298,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
// and then the command is executed but the file doesn't exist
// any longer.
var cmd *packer.RemoteCmd
err = p.retryable(func() error {
err = retry.Config{StartTimeout: p.config.startRetryTimeout}.Run(ctx, func(ctx context.Context) error {
if _, err := f.Seek(0, 0); err != nil {
return err
}
@ -314,7 +315,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
cmd = &packer.RemoteCmd{
Command: fmt.Sprintf("chmod 0755 %s", p.config.RemotePath),
}
if err := comm.Start(cmd); err != nil {
if err := comm.Start(ctx, cmd); err != nil {
return fmt.Errorf(
"Error chmodding script file to 0755 in remote "+
"machine: %s", err)
@ -322,7 +323,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
cmd.Wait()
cmd = &packer.RemoteCmd{Command: command}
return cmd.StartWithUi(comm, ui)
return cmd.RunWithUi(ctx, comm, ui)
})
if err != nil {
@ -331,7 +332,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
// If the exit code indicates a remote disconnect, fail unless
// we were expecting it.
if cmd.ExitStatus == packer.CmdDisconnect {
if cmd.ExitStatus() == packer.CmdDisconnect {
if !p.config.ExpectDisconnect {
return fmt.Errorf("Script disconnected unexpectedly. " +
"If you expected your script to disconnect, i.e. from a " +
@ -339,7 +340,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
"or `\"valid_exit_codes\": [0, 2300218]` to the shell " +
"provisioner parameters.")
}
} else if err := p.config.ValidExitCode(cmd.ExitStatus); err != nil {
} else if err := p.config.ValidExitCode(cmd.ExitStatus()); err != nil {
return err
}
@ -371,21 +372,22 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
}
func (p *Provisioner) cleanupRemoteFile(path string, comm packer.Communicator) error {
err := p.retryable(func() error {
ctx := context.TODO()
err := retry.Config{StartTimeout: p.config.startRetryTimeout}.Run(ctx, func(ctx context.Context) error {
cmd := &packer.RemoteCmd{
Command: fmt.Sprintf("rm -f %s", path),
}
if err := comm.Start(cmd); err != nil {
if err := comm.Start(ctx, cmd); err != nil {
return fmt.Errorf(
"Error removing temporary script at %s: %s",
path, err)
}
cmd.Wait()
// treat disconnects as retryable by returning an error
if cmd.ExitStatus == packer.CmdDisconnect {
if cmd.ExitStatus() == packer.CmdDisconnect {
return fmt.Errorf("Disconnect while removing temporary script.")
}
if cmd.ExitStatus != 0 {
if cmd.ExitStatus() != 0 {
return fmt.Errorf(
"Error removing temporary script at %s!",
path)
@ -400,32 +402,6 @@ func (p *Provisioner) cleanupRemoteFile(path string, comm packer.Communicator) e
return nil
}
// retryable will retry the given function over and over until a
// non-error is returned.
func (p *Provisioner) retryable(f func() error) error {
startTimeout := time.After(p.config.startRetryTimeout)
for {
var err error
if err = f(); err == nil {
return nil
}
// Create an error and log it
err = fmt.Errorf("Retryable error: %s", err)
log.Print(err.Error())
// Check if we timed out, otherwise we retry. It is safe to
// retry since the only error case above is if the command
// failed to START.
select {
case <-startTimeout:
return err
default:
time.Sleep(2 * time.Second)
}
}
}
func (p *Provisioner) escapeEnvVars() ([]string, map[string]string) {
envVars := make(map[string]string)

View File

@ -0,0 +1,30 @@
package sleep
import (
"context"
"time"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
)
type Provisioner struct {
Duration time.Duration
}
var _ packer.Provisioner = new(Provisioner)
func (p *Provisioner) Prepare(raws ...interface{}) error {
return config.Decode(&p, &config.DecodeOpts{}, raws...)
}
func (p *Provisioner) Provision(ctx context.Context, _ packer.Ui, _ packer.Communicator) error {
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(p.Duration):
return nil
}
}
func (p *Provisioner) Cancel() {}

View File

@ -0,0 +1,56 @@
package sleep
import (
"context"
"testing"
"time"
)
func test1sConfig() map[string]interface{} {
return map[string]interface{}{
"duration": "1s",
}
}
func TestConfigPrepare_1s(t *testing.T) {
raw := test1sConfig()
var p Provisioner
err := p.Prepare(raw)
if err != nil {
t.Fatalf("prerare failed: %v", err)
}
if p.Duration != time.Second {
t.Fatal("wrong duration")
}
}
func TestProvisioner_Provision(t *testing.T) {
ctxCancelled, cancel := context.WithCancel(context.Background())
cancel()
type fields struct {
Duration time.Duration
}
type args struct {
ctx context.Context
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
{"valid sleep", fields{time.Millisecond}, args{context.Background()}, false},
{"timeout", fields{time.Millisecond}, args{ctxCancelled}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Provisioner{
Duration: tt.fields.Duration,
}
if err := p.Provision(tt.args.ctx, nil, nil); (err != nil) != tt.wantErr {
t.Errorf("Provisioner.Provision() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@ -12,6 +12,7 @@ import (
"time"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
@ -104,17 +105,17 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
var cmd *packer.RemoteCmd
command := p.config.RestartCommand
err := p.retryable(func() error {
err := retry.Config{StartTimeout: p.config.RestartTimeout}.Run(ctx, func(context.Context) error {
cmd = &packer.RemoteCmd{Command: command}
return cmd.StartWithUi(comm, ui)
return cmd.RunWithUi(ctx, comm, ui)
})
if err != nil {
return err
}
if cmd.ExitStatus != 0 && cmd.ExitStatus != 1115 && cmd.ExitStatus != 1190 {
return fmt.Errorf("Restart script exited with non-zero exit status: %d", cmd.ExitStatus)
if cmd.ExitStatus() != 0 && cmd.ExitStatus() != 1115 && cmd.ExitStatus() != 1190 {
return fmt.Errorf("Restart script exited with non-zero exit status: %d", cmd.ExitStatus())
}
return waitForRestart(ctx, p, comm)
@ -141,25 +142,25 @@ var waitForRestart = func(ctx context.Context, p *Provisioner, comm packer.Commu
for {
log.Printf("Check if machine is rebooting...")
cmd = &packer.RemoteCmd{Command: trycommand}
err = cmd.StartWithUi(comm, ui)
err = cmd.RunWithUi(ctx, comm, ui)
if err != nil {
// Couldn't execute, we assume machine is rebooting already
break
}
if cmd.ExitStatus == 1 {
if cmd.ExitStatus() == 1 {
// SSH provisioner, and we're already rebooting. SSH can reconnect
// without our help; exit this wait loop.
break
}
if cmd.ExitStatus == 1115 || cmd.ExitStatus == 1190 || cmd.ExitStatus == 1717 {
if cmd.ExitStatus() == 1115 || cmd.ExitStatus() == 1190 || cmd.ExitStatus() == 1717 {
// Reboot already in progress but not completed
log.Printf("Reboot already in progress, waiting...")
time.Sleep(10 * time.Second)
}
if cmd.ExitStatus == 0 {
if cmd.ExitStatus() == 0 {
// Cancel reboot we created to test if machine was already rebooting
cmd = &packer.RemoteCmd{Command: abortcommand}
cmd.StartWithUi(comm, ui)
cmd.RunWithUi(ctx, comm, ui)
break
}
}
@ -221,7 +222,7 @@ var waitForCommunicator = func(ctx context.Context, p *Provisioner) error {
}
if runCustomRestartCheck {
// run user-configured restart check
err := cmdRestartCheck.StartWithUi(p.comm, p.ui)
err := cmdRestartCheck.RunWithUi(ctx, p.comm, p.ui)
if err != nil {
log.Printf("Communication connection err: %s", err)
continue
@ -243,7 +244,7 @@ var waitForCommunicator = func(ctx context.Context, p *Provisioner) error {
cmdModuleLoad.Stdout = &buf
cmdModuleLoad.Stdout = io.MultiWriter(cmdModuleLoad.Stdout, &buf2)
cmdModuleLoad.StartWithUi(p.comm, p.ui)
cmdModuleLoad.RunWithUi(ctx, p.comm, p.ui)
stdoutToRead := buf2.String()
if !strings.Contains(stdoutToRead, "restarted.") {
@ -262,7 +263,7 @@ var waitForCommunicator = func(ctx context.Context, p *Provisioner) error {
cmdKeyCheck.Stdout = &buf
cmdKeyCheck.Stdout = io.MultiWriter(cmdKeyCheck.Stdout, &buf2)
err := p.comm.Start(cmdKeyCheck)
err := p.comm.Start(ctx, cmdKeyCheck)
if err != nil {
log.Printf("Communication connection err: %s", err)
shouldContinue = true
@ -286,29 +287,3 @@ var waitForCommunicator = func(ctx context.Context, p *Provisioner) error {
return nil
}
// retryable will retry the given function over and over until a
// non-error is returned.
func (p *Provisioner) retryable(f func() error) error {
startTimeout := time.After(p.config.RestartTimeout)
for {
var err error
if err = f(); err == nil {
return nil
}
// Create an error and log it
err = fmt.Errorf("Retryable error: %s", err)
log.Print(err.Error())
// Check if we timed out, otherwise we retry. It is safe to
// retry since the only error case above is if the command
// failed to START.
select {
case <-startTimeout:
return err
default:
time.Sleep(retryableSleep)
}
}
}

View File

@ -3,7 +3,6 @@ package restart
import (
"bytes"
"context"
"errors"
"fmt"
"testing"
"time"
@ -305,36 +304,6 @@ func TestProvision_waitForCommunicatorWithCancel(t *testing.T) {
}
}
func TestRetryable(t *testing.T) {
config := testConfig()
count := 0
retryMe := func() error {
t.Logf("RetryMe, attempt number %d", count)
if count == 2 {
return nil
}
count++
return errors.New(fmt.Sprintf("Still waiting %d more times...", 2-count))
}
retryableSleep = 50 * time.Millisecond
p := new(Provisioner)
p.config.RestartTimeout = 155 * time.Millisecond
err := p.Prepare(config)
err = p.retryable(retryMe)
if err != nil {
t.Fatalf("should not have error retrying function")
}
count = 0
p.config.RestartTimeout = 10 * time.Millisecond
err = p.Prepare(config)
err = p.retryable(retryMe)
if err == nil {
t.Fatalf("should have error retrying function")
}
}
func TestProvision_Cancel(t *testing.T) {
config := testConfig()

View File

@ -14,6 +14,7 @@ import (
"time"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/common/shell"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
@ -204,7 +205,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
// and then the command is executed but the file doesn't exist
// any longer.
var cmd *packer.RemoteCmd
err = p.retryable(func() error {
err = retry.Config{StartTimeout: p.config.StartRetryTimeout}.Run(ctx, func(ctx context.Context) error {
if _, err := f.Seek(0, 0); err != nil {
return err
}
@ -214,7 +215,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
}
cmd = &packer.RemoteCmd{Command: command}
return cmd.StartWithUi(comm, ui)
return cmd.RunWithUi(ctx, comm, ui)
})
if err != nil {
return err
@ -223,7 +224,7 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
// Close the original file since we copied it
f.Close()
if err := p.config.ValidExitCode(cmd.ExitStatus); err != nil {
if err := p.config.ValidExitCode(cmd.ExitStatus()); err != nil {
return err
}
}
@ -231,32 +232,6 @@ func (p *Provisioner) Provision(ctx context.Context, ui packer.Ui, comm packer.C
return nil
}
// retryable will retry the given function over and over until a
// non-error is returned.
func (p *Provisioner) retryable(f func() error) error {
startTimeout := time.After(p.config.StartRetryTimeout)
for {
var err error
if err = f(); err == nil {
return nil
}
// Create an error and log it
err = fmt.Errorf("Retryable error: %s", err)
log.Print(err.Error())
// Check if we timed out, otherwise we retry. It is safe to
// retry since the only error case above is if the command
// failed to START.
select {
case <-startTimeout:
return err
default:
time.Sleep(retryableSleep)
}
}
}
func (p *Provisioner) createFlattenedEnvVars() (flattened string) {
flattened = ""
envVars := make(map[string]string)

View File

@ -3,14 +3,11 @@ package shell
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"testing"
"time"
"github.com/hashicorp/packer/packer"
)
@ -431,37 +428,6 @@ func TestProvisioner_createFlattenedEnvVars_windows(t *testing.T) {
}
}
}
func TestRetryable(t *testing.T) {
config := testConfig()
count := 0
retryMe := func() error {
log.Printf("RetryMe, attempt number %d", count)
if count == 2 {
return nil
}
count++
return errors.New(fmt.Sprintf("Still waiting %d more times...", 2-count))
}
retryableSleep = 50 * time.Millisecond
p := new(Provisioner)
p.config.StartRetryTimeout = 155 * time.Millisecond
err := p.Prepare(config)
err = p.retryable(retryMe)
if err != nil {
t.Fatalf("should not have error retrying function")
}
count = 0
p.config.StartRetryTimeout = 10 * time.Millisecond
err = p.Prepare(config)
err = p.retryable(retryMe)
if err == nil {
t.Fatalf("should have error retrying function")
}
}
func TestCancel(t *testing.T) {
// Don't actually call Cancel() as it performs an os.Exit(0)
// which kills the 'go test' tool

View File

@ -233,6 +233,7 @@ func (r *rawTemplate) Template() (*Template, error) {
delete(p.Config, "override")
delete(p.Config, "pause_before")
delete(p.Config, "type")
delete(p.Config, "timeout")
if len(p.Config) == 0 {
p.Config = nil

View File

@ -106,6 +106,19 @@ func TestParse(t *testing.T) {
false,
},
{
"parse-provisioner-timeout.json",
&Template{
Provisioners: []*Provisioner{
{
Type: "something",
Timeout: 5 * time.Minute,
},
},
},
false,
},
{
"parse-provisioner-only.json",
&Template{

View File

@ -148,6 +148,7 @@ type Provisioner struct {
Config map[string]interface{} `json:"config,omitempty"`
Override map[string]interface{} `json:"override,omitempty"`
PauseBefore time.Duration `mapstructure:"pause_before" json:"pause_before,omitempty"`
Timeout time.Duration `mapstructure:"timeout" json:"timeout,omitempty"`
}
// MarshalJSON conducts the necessary flattening of the Provisioner struct

View File

@ -18,6 +18,11 @@ func TestTemplateValidate(t *testing.T) {
File string
Err bool
}{
{
"validate-good-prov-timeout.json",
false,
},
{
"validate-no-builders.json",
true,

View File

@ -0,0 +1,8 @@
{
"provisioners": [
{
"type": "something",
"timeout": "5m"
}
]
}

View File

@ -0,0 +1,11 @@
{
"builders": [{
"type": "foo"
}],
"provisioners": [{
"timeout": "5m",
"type": "bar",
"only": ["foo"]
}]
}

1
vendor/github.com/mitchellh/iochan/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module github.com/mitchellh/iochan

View File

@ -39,3 +39,25 @@ func DelimReader(r io.Reader, delim byte) <-chan string {
return ch
}
// LineReader takes an io.Reader and produces the contents of the reader on the
// returned channel. Internally bufio.NewScanner is used, io.ScanLines parses
// lines and returns them without carriage return. Scan can panic if the split
// function returns too many empty tokens without advancing the input.
//
// The channel will be closed either by reaching the end of the input or an
// error.
func LineReader(r io.Reader) <-chan string {
ch := make(chan string)
go func() {
scanner := bufio.NewScanner(r)
defer close(ch)
for scanner.Scan() {
ch <- scanner.Text()
}
}()
return ch
}

View File

@ -2,7 +2,7 @@
description: |
The file Packer builder is not really a builder, it just creates an artifact
from a file. It can be used to debug post-processors without incurring high
wait times. It does not run any provisioners.
wait times.
layout: docs
page_title: 'File - Builders'
sidebar_current: 'docs-builders-file'
@ -14,7 +14,7 @@ Type: `file`
The `file` Packer builder is not really a builder, it just creates an artifact
from a file. It can be used to debug post-processors without incurring high
wait times. It does not run any provisioners.
wait times.
## Basic Example

View File

@ -155,6 +155,8 @@ chi-appservers
`staging_directory` will be removed after executing ansible. By default,
this is set to `false`.
<%= partial "partials/provisioners/common-config" %>
## Default Extra Variables
In addition to being able to specify extra arguments using the

View File

@ -143,6 +143,8 @@ Optional Parameters:
- `user` (string) - The `ansible_user` to use. Defaults to the user running
packer.
<%= partial "partials/provisioners/common-config" %>
## Default Extra Variables
In addition to being able to specify extra arguments using the

View File

@ -41,6 +41,8 @@ and between every provisioner.
breakpoints or label them with information about where in the build they
occur
<%= partial "partials/provisioners/common-config" %>
## Usage
Insert this provisioner wherever you want the build to pause. You'll see ui

View File

@ -143,6 +143,8 @@ configuration is actually required.
machine. If this is NOT set, then it is your responsibility via other means
(shell provisioner, etc.) to get a validation key to where Chef expects it.
<%= partial "partials/provisioners/common-config" %>
## Chef Configuration
By default, Packer uses a simple Chef configuration file in order to set the

View File

@ -111,6 +111,8 @@ configuration is actually required, but at least `run_list` is recommended.
- `version` (string) - The version of Chef to be installed. By default this
is empty which will install the latest version of Chef.
<%= partial "partials/provisioners/common-config" %>
## Chef Configuration
By default, Packer uses a simple Chef configuration file in order to set the

View File

@ -70,6 +70,8 @@ Optional parameters:
- `prevent_bootstrap_sudo` (boolean) - stop Converge from bootstrapping with
administrator privileges via sudo
<%= partial "partials/provisioners/common-config" %>
### Module Directories
The provisioner can transfer module directories to the remote host for

View File

@ -65,6 +65,9 @@ The available configuration options are listed below.
the Packer run, but realize that there are situations where this may be
unavoidable.
<%= partial "partials/provisioners/common-config" %>
## Directory Uploads
The file provisioner is also able to upload a complete directory to the remote

View File

@ -104,6 +104,8 @@ Optional Parameters:
- `user` (string) - The `--user` to use. Defaults to the user running Packer.
<%= partial "partials/provisioners/common-config" %>
## Default Extra Variables
In addition to being able to specify extra arguments using the

Some files were not shown because too many files have changed in this diff Show More