Merge remote-tracking branch 'origin/master' into ansible_local_playbook_files_update
This commit is contained in:
commit
e1a1bb522d
24
CHANGELOG.md
24
CHANGELOG.md
|
@ -4,10 +4,34 @@
|
|||
|
||||
* builder/vmware-esxi: Remove floppy files from the remote server on cleanup. [GH-6206]
|
||||
* core: When using `-on-error=[abort|ask]`, output the error to the user. [GH-6252]
|
||||
* builder/amazon: Can now force the chroot builder to mount an entire block device instead of a partition [GH-6194]
|
||||
* builder/chroot: A new template option, `nvme_device_path` has been added to provide a workaround for users who need the amazon-chroot builder to mount a NVMe volume on their instances. [GH-6295]
|
||||
* communicator/winrm: Updated dependencies to fix a race condition [GH-6261]
|
||||
* builder/hyper-v: Fix command for mounting multiple disks [GH-6267]
|
||||
* provisioner/shell: Remove file stat that was causing problems uploading files [GH-6239]
|
||||
* provisioner/puppet: Extra-Arguments are no longer prematurely interpolated.[GH-6215]
|
||||
* builder/azure: windows-sql-cloud is now in the default list of projects to check for provided images. [GH-6210]
|
||||
* builder/hyperv: Enable IP retrieval for Server 2008 R2 hosts. [GH-6219]
|
||||
* builder/hyperv: Fix bug in MAC address specification on Hyper-V. [GH-6187]
|
||||
* builder/parallels-pvm: Add missing disk compaction step. [GH-6202]
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
* builder/amazon: Amazon builders other than `chroot` now support T2 unlimited instances [GH-6265]
|
||||
* builder/azure: Updated Azure SDK to v15.0.0 [GH-6224]
|
||||
* builder/azure: Devicelogin Support for Windows [GH-6285]
|
||||
* builder/hyper-v: Hyper-V builds now connect to vnc display by default when building [GH-6243]
|
||||
* provisoner/shell-local: New options have been added to create feature parity with the shell-local post-processor. This feature now works on Windows hosts. [GH-5956]
|
||||
* post-processor/shell-local: New options have been added to create feature parity with the shell-local provisioner. This feature now works on Windows hosts. [GH-5956]
|
||||
* builder/hyper-v: New `use_fixed_vhd_format` allows vm export in an Azure-compatible format [GH-6101]
|
||||
* builder/azure: Faster deletion of Azure Resource Groups. [GH-6269]
|
||||
* builder/hyperv: New config option for specifying what secure boot template to use, allowing secure boot of linux vms. [GH-5883]
|
||||
* provisioner/chef: New config option allows user to skip cleanup of chef client staging directory. [GH-4300]
|
||||
* builder/azure: Allow device login for US government cloud. [GH-6105]
|
||||
* builder/qemu: Add support for hvf accelerator. [GH-6193]
|
||||
* builder/azure: Enable simultaneous builds within one resource group. [GH-6231]
|
||||
* builder/scaleway: Fix SSH communicator connection issue. [GH-6238]
|
||||
* core: Add opt-in Packer top-level command autocomplete [GH-5454]
|
||||
|
||||
## 1.2.3 (April 25, 2018)
|
||||
|
||||
|
|
|
@ -33,9 +33,10 @@ type Config struct {
|
|||
CommandWrapper string `mapstructure:"command_wrapper"`
|
||||
CopyFiles []string `mapstructure:"copy_files"`
|
||||
DevicePath string `mapstructure:"device_path"`
|
||||
NVMEDevicePath string `mapstructure:"nvme_device_path"`
|
||||
FromScratch bool `mapstructure:"from_scratch"`
|
||||
MountOptions []string `mapstructure:"mount_options"`
|
||||
MountPartition int `mapstructure:"mount_partition"`
|
||||
MountPartition string `mapstructure:"mount_partition"`
|
||||
MountPath string `mapstructure:"mount_path"`
|
||||
PostMountCommands []string `mapstructure:"post_mount_commands"`
|
||||
PreMountCommands []string `mapstructure:"pre_mount_commands"`
|
||||
|
@ -112,8 +113,8 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
b.config.MountPath = "/mnt/packer-amazon-chroot-volumes/{{.Device}}"
|
||||
}
|
||||
|
||||
if b.config.MountPartition == 0 {
|
||||
b.config.MountPartition = 1
|
||||
if b.config.MountPartition == "" {
|
||||
b.config.MountPartition = "1"
|
||||
}
|
||||
|
||||
// Accumulate any errors or warnings
|
||||
|
|
|
@ -3,8 +3,8 @@ package chroot
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
sl "github.com/hashicorp/packer/common/shell-local"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/post-processor/shell-local"
|
||||
"github.com/hashicorp/packer/template/interpolate"
|
||||
)
|
||||
|
||||
|
@ -21,7 +21,9 @@ func RunLocalCommands(commands []string, wrappedCommand CommandWrapper, ctx inte
|
|||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Executing command: %s", command))
|
||||
comm := &shell_local.Communicator{}
|
||||
comm := &sl.Communicator{
|
||||
ExecuteCommand: []string{command},
|
||||
}
|
||||
cmd := &packer.RemoteCmd{Command: command}
|
||||
if err := cmd.StartWithUi(comm, ui); err != nil {
|
||||
return fmt.Errorf("Error executing command: %s", err)
|
||||
|
|
|
@ -26,7 +26,7 @@ type mountPathData struct {
|
|||
// mount_device_cleanup CleanupFunc - To perform early cleanup
|
||||
type StepMountDevice struct {
|
||||
MountOptions []string
|
||||
MountPartition int
|
||||
MountPartition string
|
||||
|
||||
mountPath string
|
||||
}
|
||||
|
@ -35,6 +35,10 @@ func (s *StepMountDevice) Run(_ context.Context, state multistep.StateBag) multi
|
|||
config := state.Get("config").(*Config)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
device := state.Get("device").(string)
|
||||
if config.NVMEDevicePath != "" {
|
||||
// customizable device path for mounting NVME block devices on c5 and m5 HVM
|
||||
device = config.NVMEDevicePath
|
||||
}
|
||||
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
|
||||
|
||||
var virtualizationType string
|
||||
|
@ -47,6 +51,7 @@ func (s *StepMountDevice) Run(_ context.Context, state multistep.StateBag) multi
|
|||
}
|
||||
|
||||
ctx := config.ctx
|
||||
|
||||
ctx.Data = &mountPathData{Device: filepath.Base(device)}
|
||||
mountPath, err := interpolate.Render(config.MountPath, &ctx)
|
||||
|
||||
|
@ -75,8 +80,9 @@ func (s *StepMountDevice) Run(_ context.Context, state multistep.StateBag) multi
|
|||
}
|
||||
|
||||
deviceMount := device
|
||||
if virtualizationType == "hvm" {
|
||||
deviceMount = fmt.Sprintf("%s%d", device, s.MountPartition)
|
||||
|
||||
if virtualizationType == "hvm" && s.MountPartition != "0" {
|
||||
deviceMount = fmt.Sprintf("%s%s", device, s.MountPartition)
|
||||
}
|
||||
state.Put("deviceMount", deviceMount)
|
||||
|
||||
|
@ -97,7 +103,7 @@ func (s *StepMountDevice) Run(_ context.Context, state multistep.StateBag) multi
|
|||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] (step mount) mount command is %s", mountCommand)
|
||||
cmd := ShellCommand(mountCommand)
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer/common/uuid"
|
||||
|
@ -30,25 +30,26 @@ func (d *AmiFilterOptions) Empty() bool {
|
|||
type RunConfig struct {
|
||||
AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
|
||||
AvailabilityZone string `mapstructure:"availability_zone"`
|
||||
DisableStopInstance bool `mapstructure:"disable_stop_instance"`
|
||||
EbsOptimized bool `mapstructure:"ebs_optimized"`
|
||||
EnableT2Unlimited bool `mapstructure:"enable_t2_unlimited"`
|
||||
IamInstanceProfile string `mapstructure:"iam_instance_profile"`
|
||||
InstanceInitiatedShutdownBehavior string `mapstructure:"shutdown_behavior"`
|
||||
InstanceType string `mapstructure:"instance_type"`
|
||||
RunTags map[string]string `mapstructure:"run_tags"`
|
||||
SecurityGroupId string `mapstructure:"security_group_id"`
|
||||
SecurityGroupIds []string `mapstructure:"security_group_ids"`
|
||||
SourceAmi string `mapstructure:"source_ami"`
|
||||
SourceAmiFilter AmiFilterOptions `mapstructure:"source_ami_filter"`
|
||||
SpotPrice string `mapstructure:"spot_price"`
|
||||
SpotPriceAutoProduct string `mapstructure:"spot_price_auto_product"`
|
||||
DisableStopInstance bool `mapstructure:"disable_stop_instance"`
|
||||
SecurityGroupId string `mapstructure:"security_group_id"`
|
||||
SecurityGroupIds []string `mapstructure:"security_group_ids"`
|
||||
TemporarySGSourceCidr string `mapstructure:"temporary_security_group_source_cidr"`
|
||||
SubnetId string `mapstructure:"subnet_id"`
|
||||
TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name"`
|
||||
TemporarySGSourceCidr string `mapstructure:"temporary_security_group_source_cidr"`
|
||||
UserData string `mapstructure:"user_data"`
|
||||
UserDataFile string `mapstructure:"user_data_file"`
|
||||
WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout"`
|
||||
VpcId string `mapstructure:"vpc_id"`
|
||||
InstanceInitiatedShutdownBehavior string `mapstructure:"shutdown_behavior"`
|
||||
WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout"`
|
||||
|
||||
// Communicator settings
|
||||
Comm communicator.Config `mapstructure:",squash"`
|
||||
|
@ -84,32 +85,39 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
|||
c.SSHInterface != "public_dns" &&
|
||||
c.SSHInterface != "private_dns" &&
|
||||
c.SSHInterface != "" {
|
||||
errs = append(errs, errors.New(fmt.Sprintf("Unknown interface type: %s", c.SSHInterface)))
|
||||
errs = append(errs, fmt.Errorf("Unknown interface type: %s", c.SSHInterface))
|
||||
}
|
||||
|
||||
if c.SSHKeyPairName != "" {
|
||||
if c.Comm.Type == "winrm" && c.Comm.WinRMPassword == "" && c.Comm.SSHPrivateKey == "" {
|
||||
errs = append(errs, errors.New("ssh_private_key_file must be provided to retrieve the winrm password when using ssh_keypair_name."))
|
||||
errs = append(errs, fmt.Errorf("ssh_private_key_file must be provided to retrieve the winrm password when using ssh_keypair_name."))
|
||||
} else if c.Comm.SSHPrivateKey == "" && !c.Comm.SSHAgentAuth {
|
||||
errs = append(errs, errors.New("ssh_private_key_file must be provided or ssh_agent_auth enabled when ssh_keypair_name is specified."))
|
||||
errs = append(errs, fmt.Errorf("ssh_private_key_file must be provided or ssh_agent_auth enabled when ssh_keypair_name is specified."))
|
||||
}
|
||||
}
|
||||
|
||||
if c.SourceAmi == "" && c.SourceAmiFilter.Empty() {
|
||||
errs = append(errs, errors.New("A source_ami or source_ami_filter must be specified"))
|
||||
errs = append(errs, fmt.Errorf("A source_ami or source_ami_filter must be specified"))
|
||||
}
|
||||
|
||||
if c.InstanceType == "" {
|
||||
errs = append(errs, errors.New("An instance_type must be specified"))
|
||||
errs = append(errs, fmt.Errorf("An instance_type must be specified"))
|
||||
}
|
||||
|
||||
if c.SpotPrice == "auto" {
|
||||
if c.SpotPriceAutoProduct == "" {
|
||||
errs = append(errs, errors.New(
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"spot_price_auto_product must be specified when spot_price is auto"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.SpotPriceAutoProduct != "" {
|
||||
if c.SpotPrice != "auto" {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"spot_price should be set to auto when spot_price_auto_product is specified"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.UserData != "" && c.UserDataFile != "" {
|
||||
errs = append(errs, fmt.Errorf("Only one of user_data or user_data_file can be specified."))
|
||||
} else if c.UserDataFile != "" {
|
||||
|
@ -141,6 +149,18 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
|||
errs = append(errs, fmt.Errorf("shutdown_behavior only accepts 'stop' or 'terminate' values."))
|
||||
}
|
||||
|
||||
if c.EnableT2Unlimited {
|
||||
if c.SpotPrice != "" {
|
||||
errs = append(errs, fmt.Errorf("Error: T2 Unlimited cannot be used in conjuction with Spot Instances"))
|
||||
}
|
||||
firstDotIndex := strings.Index(c.InstanceType, ".")
|
||||
if firstDotIndex == -1 {
|
||||
errs = append(errs, fmt.Errorf("Error determining main Instance Type from: %s", c.InstanceType))
|
||||
} else if c.InstanceType[0:firstDotIndex] != "t2" {
|
||||
errs = append(errs, fmt.Errorf("Error: T2 Unlimited enabled with a non-T2 Instance Type: %s", c.InstanceType))
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ func TestRunConfigPrepare_InstanceType(t *testing.T) {
|
|||
c := testConfig()
|
||||
c.InstanceType = ""
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("err: %s", err)
|
||||
t.Fatalf("Should error if an instance_type is not specified")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -56,14 +56,14 @@ func TestRunConfigPrepare_SourceAmi(t *testing.T) {
|
|||
c := testConfig()
|
||||
c.SourceAmi = ""
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("err: %s", err)
|
||||
t.Fatalf("Should error if a source_ami (or source_ami_filter) is not specified")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SourceAmiFilterBlank(t *testing.T) {
|
||||
c := testConfigFilter()
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("err: %s", err)
|
||||
t.Fatalf("Should error if source_ami_filter is empty or not specified (and source_ami is not specified)")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,17 +79,58 @@ func TestRunConfigPrepare_SourceAmiFilterGood(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_EnableT2UnlimitedGood(t *testing.T) {
|
||||
c := testConfig()
|
||||
// Must have a T2 instance type if T2 Unlimited is enabled
|
||||
c.InstanceType = "t2.micro"
|
||||
c.EnableT2Unlimited = true
|
||||
err := c.Prepare(nil)
|
||||
if len(err) > 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_EnableT2UnlimitedBadInstanceType(t *testing.T) {
|
||||
c := testConfig()
|
||||
// T2 Unlimited cannot be used with instance types other than T2
|
||||
c.InstanceType = "m5.large"
|
||||
c.EnableT2Unlimited = true
|
||||
err := c.Prepare(nil)
|
||||
if len(err) != 1 {
|
||||
t.Fatalf("Should error if T2 Unlimited is enabled with non-T2 instance_type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_EnableT2UnlimitedBadWithSpotInstanceRequest(t *testing.T) {
|
||||
c := testConfig()
|
||||
// T2 Unlimited cannot be used with Spot Instances
|
||||
c.InstanceType = "t2.micro"
|
||||
c.EnableT2Unlimited = true
|
||||
c.SpotPrice = "auto"
|
||||
c.SpotPriceAutoProduct = "Linux/UNIX"
|
||||
err := c.Prepare(nil)
|
||||
if len(err) != 1 {
|
||||
t.Fatalf("Should error if T2 Unlimited has been used in conjuntion with a Spot Price request")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SpotAuto(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.SpotPrice = "auto"
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("err: %s", err)
|
||||
t.Fatalf("Should error if spot_price_auto_product is not set and spot_price is set to auto")
|
||||
}
|
||||
|
||||
// Good - SpotPrice and SpotPriceAutoProduct are correctly set
|
||||
c.SpotPriceAutoProduct = "foo"
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
c.SpotPrice = ""
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("Should error if spot_price is not set to auto and spot_price_auto_product is set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SSHPort(t *testing.T) {
|
||||
|
@ -125,7 +166,7 @@ func TestRunConfigPrepare_UserData(t *testing.T) {
|
|||
c.UserData = "foo"
|
||||
c.UserDataFile = tf.Name()
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("err: %s", err)
|
||||
t.Fatalf("Should error if user_data string and user_data_file have both been specified")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -137,7 +178,7 @@ func TestRunConfigPrepare_UserDataFile(t *testing.T) {
|
|||
|
||||
c.UserDataFile = "idontexistidontthink"
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("err: %s", err)
|
||||
t.Fatalf("Should error if the file specified by user_data_file does not exist")
|
||||
}
|
||||
|
||||
tf, err := ioutil.TempFile("", "packer")
|
||||
|
|
|
@ -24,6 +24,7 @@ type StepRunSourceInstance struct {
|
|||
Ctx interpolate.Context
|
||||
Debug bool
|
||||
EbsOptimized bool
|
||||
EnableT2Unlimited bool
|
||||
ExpectedRootDevice string
|
||||
IamInstanceProfile string
|
||||
InstanceInitiatedShutdownBehavior string
|
||||
|
@ -116,6 +117,11 @@ func (s *StepRunSourceInstance) Run(ctx context.Context, state multistep.StateBa
|
|||
EbsOptimized: &s.EbsOptimized,
|
||||
}
|
||||
|
||||
if s.EnableT2Unlimited {
|
||||
creditOption := "unlimited"
|
||||
runOpts.CreditSpecification = &ec2.CreditSpecificationRequest{CpuCredits: &creditOption}
|
||||
}
|
||||
|
||||
// Collect tags for tagging on resource creation
|
||||
var tagSpecs []*ec2.TagSpecification
|
||||
|
||||
|
|
|
@ -148,6 +148,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
Ctx: b.config.ctx,
|
||||
Debug: b.config.PackerDebug,
|
||||
EbsOptimized: b.config.EbsOptimized,
|
||||
EnableT2Unlimited: b.config.EnableT2Unlimited,
|
||||
ExpectedRootDevice: "ebs",
|
||||
IamInstanceProfile: b.config.IamInstanceProfile,
|
||||
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
||||
|
|
|
@ -162,6 +162,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
Ctx: b.config.ctx,
|
||||
Debug: b.config.PackerDebug,
|
||||
EbsOptimized: b.config.EbsOptimized,
|
||||
EnableT2Unlimited: b.config.EnableT2Unlimited,
|
||||
ExpectedRootDevice: "ebs",
|
||||
IamInstanceProfile: b.config.IamInstanceProfile,
|
||||
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
||||
|
|
|
@ -145,6 +145,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
Ctx: b.config.ctx,
|
||||
Debug: b.config.PackerDebug,
|
||||
EbsOptimized: b.config.EbsOptimized,
|
||||
EnableT2Unlimited: b.config.EnableT2Unlimited,
|
||||
ExpectedRootDevice: "ebs",
|
||||
IamInstanceProfile: b.config.IamInstanceProfile,
|
||||
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
||||
|
|
|
@ -230,6 +230,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
Ctx: b.config.ctx,
|
||||
Debug: b.config.PackerDebug,
|
||||
EbsOptimized: b.config.EbsOptimized,
|
||||
EnableT2Unlimited: b.config.EnableT2Unlimited,
|
||||
IamInstanceProfile: b.config.IamInstanceProfile,
|
||||
InstanceType: b.config.InstanceType,
|
||||
IsRestricted: b.config.IsChinaCloud() || b.config.IsGovCloud(),
|
||||
|
|
|
@ -10,11 +10,11 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
packerAzureCommon "github.com/hashicorp/packer/builder/azure/common"
|
||||
|
||||
armstorage "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
packerAzureCommon "github.com/hashicorp/packer/builder/azure/common"
|
||||
"github.com/hashicorp/packer/builder/azure/common/constants"
|
||||
"github.com/hashicorp/packer/builder/azure/common/lin"
|
||||
packerCommon "github.com/hashicorp/packer/common"
|
||||
|
@ -52,6 +52,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
}
|
||||
|
||||
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
|
||||
|
||||
ui.Say("Running builder ...")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -90,6 +91,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
if err := resolver.Resolve(b.config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.config.ObjectID == "" {
|
||||
b.config.ObjectID = getObjectIdFromToken(ui, spnCloud)
|
||||
} else {
|
||||
ui.Message("You have provided Object_ID which is no longer needed, azure packer builder determines this dynamically from the authentication token")
|
||||
}
|
||||
|
||||
if b.config.ObjectID == "" && b.config.OSType != constants.Target_Linux {
|
||||
return nil, fmt.Errorf("could not determine the ObjectID for the user, which is required for Windows builds")
|
||||
}
|
||||
|
||||
if b.config.isManagedImage() {
|
||||
group, err := azureClient.GroupsClient.Get(ctx, b.config.ManagedImageResourceGroupName)
|
||||
|
@ -347,6 +357,7 @@ func (b *Builder) configureStateBag(stateBag multistep.StateBag) {
|
|||
stateBag.Put(constants.ArmIsManagedImage, b.config.isManagedImage())
|
||||
stateBag.Put(constants.ArmManagedImageResourceGroupName, b.config.ManagedImageResourceGroupName)
|
||||
stateBag.Put(constants.ArmManagedImageName, b.config.ManagedImageName)
|
||||
stateBag.Put(constants.ArmAsyncResourceGroupDelete, b.config.AsyncResourceGroupDelete)
|
||||
}
|
||||
|
||||
// Parameters that are only known at runtime after querying Azure.
|
||||
|
@ -370,10 +381,17 @@ func (b *Builder) getServicePrincipalTokens(say func(string)) (*adal.ServicePrin
|
|||
var err error
|
||||
|
||||
if b.config.useDeviceLogin {
|
||||
servicePrincipalToken, err = packerAzureCommon.Authenticate(*b.config.cloudEnvironment, b.config.TenantID, say)
|
||||
say("Getting auth token for Service management endpoint")
|
||||
servicePrincipalToken, err = packerAzureCommon.Authenticate(*b.config.cloudEnvironment, b.config.TenantID, say, b.config.cloudEnvironment.ServiceManagementEndpoint)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
say("Getting token for Vault resource")
|
||||
servicePrincipalTokenVault, err = packerAzureCommon.Authenticate(*b.config.cloudEnvironment, b.config.TenantID, say, strings.TrimRight(b.config.cloudEnvironment.KeyVaultEndpoint, "/"))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
auth := NewAuthenticate(*b.config.cloudEnvironment, b.config.ClientID, b.config.ClientSecret, b.config.TenantID)
|
||||
|
||||
|
@ -384,11 +402,39 @@ func (b *Builder) getServicePrincipalTokens(say func(string)) (*adal.ServicePrin
|
|||
|
||||
servicePrincipalTokenVault, err = auth.getServicePrincipalTokenWithResource(
|
||||
strings.TrimRight(b.config.cloudEnvironment.KeyVaultEndpoint, "/"))
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
err = servicePrincipalToken.EnsureFresh()
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
err = servicePrincipalTokenVault.EnsureFresh()
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return servicePrincipalToken, servicePrincipalTokenVault, nil
|
||||
}
|
||||
|
||||
func getObjectIdFromToken(ui packer.Ui, token *adal.ServicePrincipalToken) string {
|
||||
claims := jwt.MapClaims{}
|
||||
var p jwt.Parser
|
||||
|
||||
var err error
|
||||
|
||||
_, _, err = p.ParseUnverified(token.OAuthToken(), claims)
|
||||
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Failed to parse the token,Error: %s", err.Error()))
|
||||
return ""
|
||||
}
|
||||
return claims["oid"].(string)
|
||||
|
||||
}
|
||||
|
|
|
@ -10,9 +10,9 @@ package arm
|
|||
// * ARM_STORAGE_ACCOUNT
|
||||
//
|
||||
// The subscription in question should have a resource group
|
||||
// called "packer-acceptance-test" in "West US" region. The
|
||||
// called "packer-acceptance-test" in "South Central US" region. The
|
||||
// storage account refered to in the above variable should
|
||||
// be inside this resource group and in "West US" as well.
|
||||
// be inside this resource group and in "South Central US" as well.
|
||||
//
|
||||
// In addition, the PACKER_ACC variable should also be set to
|
||||
// a non-empty value to enable Packer acceptance tests and the
|
||||
|
@ -23,9 +23,13 @@ package arm
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
builderT "github.com/hashicorp/packer/helper/builder/testing"
|
||||
"os"
|
||||
)
|
||||
|
||||
const DeviceLoginAcceptanceTest = "DEVICELOGIN_TEST"
|
||||
|
||||
func TestBuilderAcc_ManagedDisk_Windows(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
|
@ -34,6 +38,20 @@ func TestBuilderAcc_ManagedDisk_Windows(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestBuilderAcc_ManagedDisk_Windows_DeviceLogin(t *testing.T) {
|
||||
if os.Getenv(DeviceLoginAcceptanceTest) == "" {
|
||||
t.Skip(fmt.Sprintf(
|
||||
"Device Login Acceptance tests skipped unless env '%s' set, as its requires manual step during execution",
|
||||
DeviceLoginAcceptanceTest))
|
||||
return
|
||||
}
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccManagedDiskWindowsDeviceLogin,
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderAcc_ManagedDisk_Linux(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
|
@ -42,6 +60,20 @@ func TestBuilderAcc_ManagedDisk_Linux(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestBuilderAcc_ManagedDisk_Linux_DeviceLogin(t *testing.T) {
|
||||
if os.Getenv(DeviceLoginAcceptanceTest) == "" {
|
||||
t.Skip(fmt.Sprintf(
|
||||
"Device Login Acceptance tests skipped unless env '%s' set, as its requires manual step during execution",
|
||||
DeviceLoginAcceptanceTest))
|
||||
return
|
||||
}
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccManagedDiskLinuxDeviceLogin,
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderAcc_Blob_Windows(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
|
@ -65,8 +97,7 @@ const testBuilderAccManagedDiskWindows = `
|
|||
"variables": {
|
||||
"client_id": "{{env ` + "`ARM_CLIENT_ID`" + `}}",
|
||||
"client_secret": "{{env ` + "`ARM_CLIENT_SECRET`" + `}}",
|
||||
"subscription_id": "{{env ` + "`ARM_SUBSCRIPTION_ID`" + `}}",
|
||||
"object_id": "{{env ` + "`ARM_OBJECT_ID`" + `}}"
|
||||
"subscription_id": "{{env ` + "`ARM_SUBSCRIPTION_ID`" + `}}"
|
||||
},
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
|
@ -74,7 +105,6 @@ const testBuilderAccManagedDiskWindows = `
|
|||
"client_id": "{{user ` + "`client_id`" + `}}",
|
||||
"client_secret": "{{user ` + "`client_secret`" + `}}",
|
||||
"subscription_id": "{{user ` + "`subscription_id`" + `}}",
|
||||
"object_id": "{{user ` + "`object_id`" + `}}",
|
||||
|
||||
"managed_image_resource_group_name": "packer-acceptance-test",
|
||||
"managed_image_name": "testBuilderAccManagedDiskWindows-{{timestamp}}",
|
||||
|
@ -89,8 +119,39 @@ const testBuilderAccManagedDiskWindows = `
|
|||
"winrm_insecure": "true",
|
||||
"winrm_timeout": "3m",
|
||||
"winrm_username": "packer",
|
||||
"async_resourcegroup_delete": "true",
|
||||
|
||||
"location": "West US",
|
||||
"location": "South Central US",
|
||||
"vm_size": "Standard_DS2_v2"
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
const testBuilderAccManagedDiskWindowsDeviceLogin = `
|
||||
{
|
||||
"variables": {
|
||||
"subscription_id": "{{env ` + "`ARM_SUBSCRIPTION_ID`" + `}}"
|
||||
},
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
|
||||
"subscription_id": "{{user ` + "`subscription_id`" + `}}",
|
||||
|
||||
"managed_image_resource_group_name": "packer-acceptance-test",
|
||||
"managed_image_name": "testBuilderAccManagedDiskWindowsDeviceLogin-{{timestamp}}",
|
||||
|
||||
"os_type": "Windows",
|
||||
"image_publisher": "MicrosoftWindowsServer",
|
||||
"image_offer": "WindowsServer",
|
||||
"image_sku": "2012-R2-Datacenter",
|
||||
|
||||
"communicator": "winrm",
|
||||
"winrm_use_ssl": "true",
|
||||
"winrm_insecure": "true",
|
||||
"winrm_timeout": "3m",
|
||||
"winrm_username": "packer",
|
||||
|
||||
"location": "South Central US",
|
||||
"vm_size": "Standard_DS2_v2"
|
||||
}]
|
||||
}
|
||||
|
@ -118,7 +179,31 @@ const testBuilderAccManagedDiskLinux = `
|
|||
"image_offer": "UbuntuServer",
|
||||
"image_sku": "16.04-LTS",
|
||||
|
||||
"location": "West US",
|
||||
"location": "South Central US",
|
||||
"vm_size": "Standard_DS2_v2"
|
||||
}]
|
||||
}
|
||||
`
|
||||
const testBuilderAccManagedDiskLinuxDeviceLogin = `
|
||||
{
|
||||
"variables": {
|
||||
"subscription_id": "{{env ` + "`ARM_SUBSCRIPTION_ID`" + `}}"
|
||||
},
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
|
||||
"subscription_id": "{{user ` + "`subscription_id`" + `}}",
|
||||
|
||||
"managed_image_resource_group_name": "packer-acceptance-test",
|
||||
"managed_image_name": "testBuilderAccManagedDiskLinuxDeviceLogin-{{timestamp}}",
|
||||
|
||||
"os_type": "Linux",
|
||||
"image_publisher": "Canonical",
|
||||
"image_offer": "UbuntuServer",
|
||||
"image_sku": "16.04-LTS",
|
||||
"async_resourcegroup_delete": "true",
|
||||
|
||||
"location": "South Central US",
|
||||
"vm_size": "Standard_DS2_v2"
|
||||
}]
|
||||
}
|
||||
|
@ -157,7 +242,7 @@ const testBuilderAccBlobWindows = `
|
|||
"winrm_timeout": "3m",
|
||||
"winrm_username": "packer",
|
||||
|
||||
"location": "West US",
|
||||
"location": "South Central US",
|
||||
"vm_size": "Standard_DS2_v2"
|
||||
}]
|
||||
}
|
||||
|
@ -188,7 +273,7 @@ const testBuilderAccBlobLinux = `
|
|||
"image_offer": "UbuntuServer",
|
||||
"image_sku": "16.04-LTS",
|
||||
|
||||
"location": "West US",
|
||||
"location": "South Central US",
|
||||
"vm_size": "Standard_DS2_v2"
|
||||
}]
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ func TestStateBagShouldBePopulatedExpectedValues(t *testing.T) {
|
|||
constants.ArmStorageAccountName,
|
||||
constants.ArmVirtualMachineCaptureParameters,
|
||||
constants.ArmPublicIPAddressName,
|
||||
constants.ArmAsyncResourceGroupDelete,
|
||||
}
|
||||
|
||||
for _, v := range expectedStateBagKeys {
|
||||
|
|
|
@ -151,6 +151,9 @@ type Config struct {
|
|||
|
||||
Comm communicator.Config `mapstructure:",squash"`
|
||||
ctx *interpolate.Context
|
||||
|
||||
//Cleanup
|
||||
AsyncResourceGroupDelete bool `mapstructure:"async_resourcegroup_delete"`
|
||||
}
|
||||
|
||||
type keyVaultCertificate struct {
|
||||
|
@ -490,9 +493,6 @@ func assertRequiredParametersSet(c *Config, errs *packer.MultiError) {
|
|||
// readable by the ObjectID of the App. There may be another way to handle
|
||||
// this case, but I am not currently aware of it - send feedback.
|
||||
isUseDeviceLogin := func(c *Config) bool {
|
||||
if c.OSType == constants.Target_Windows {
|
||||
return false
|
||||
}
|
||||
|
||||
return c.SubscriptionID != "" &&
|
||||
c.ClientID == "" &&
|
||||
|
|
|
@ -2,13 +2,11 @@ package arm
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/hashicorp/packer/builder/azure/common/constants"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
// List of configuration parameters that are required by the ARM builder.
|
||||
|
@ -448,39 +446,6 @@ func TestUserDeviceLoginIsEnabledForLinux(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestUseDeviceLoginIsDisabledForWindows(t *testing.T) {
|
||||
config := map[string]string{
|
||||
"capture_name_prefix": "ignore",
|
||||
"capture_container_name": "ignore",
|
||||
"image_offer": "ignore",
|
||||
"image_publisher": "ignore",
|
||||
"image_sku": "ignore",
|
||||
"location": "ignore",
|
||||
"storage_account": "ignore",
|
||||
"resource_group_name": "ignore",
|
||||
"subscription_id": "ignore",
|
||||
"os_type": constants.Target_Windows,
|
||||
"communicator": "none",
|
||||
}
|
||||
|
||||
_, _, err := newConfig(config, getPackerConfiguration())
|
||||
if err == nil {
|
||||
t.Fatal("Expected test to fail, but it succeeded")
|
||||
}
|
||||
|
||||
multiError, _ := err.(*packer.MultiError)
|
||||
if len(multiError.Errors) != 2 {
|
||||
t.Errorf("Expected to find 2 errors, but found %d errors", len(multiError.Errors))
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "client_id must be specified") {
|
||||
t.Error("Expected to find error for 'client_id must be specified")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "client_secret must be specified") {
|
||||
t.Error("Expected to find error for 'client_secret must be specified")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigShouldRejectMalformedCaptureNamePrefix(t *testing.T) {
|
||||
config := map[string]string{
|
||||
"capture_container_name": "ignore",
|
||||
|
@ -1264,6 +1229,73 @@ func TestConfigShouldAllowTempNameOverrides(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConfigShouldAllowAsyncResourceGroupOverride(t *testing.T) {
|
||||
config := map[string]interface{}{
|
||||
"image_offer": "ignore",
|
||||
"image_publisher": "ignore",
|
||||
"image_sku": "ignore",
|
||||
"location": "ignore",
|
||||
"subscription_id": "ignore",
|
||||
"communicator": "none",
|
||||
"os_type": "linux",
|
||||
"managed_image_name": "ignore",
|
||||
"managed_image_resource_group_name": "ignore",
|
||||
"async_resourcegroup_delete": "true",
|
||||
}
|
||||
|
||||
c, _, err := newConfig(config, getPackerConfiguration())
|
||||
if err != nil {
|
||||
t.Errorf("newConfig failed with %q", err)
|
||||
}
|
||||
|
||||
if c.AsyncResourceGroupDelete != true {
|
||||
t.Errorf("expected async_resourcegroup_delete to be %q, but got %t", "async_resourcegroup_delete", c.AsyncResourceGroupDelete)
|
||||
}
|
||||
}
|
||||
func TestConfigShouldAllowAsyncResourceGroupOverrideNoValue(t *testing.T) {
|
||||
config := map[string]interface{}{
|
||||
"image_offer": "ignore",
|
||||
"image_publisher": "ignore",
|
||||
"image_sku": "ignore",
|
||||
"location": "ignore",
|
||||
"subscription_id": "ignore",
|
||||
"communicator": "none",
|
||||
"os_type": "linux",
|
||||
"managed_image_name": "ignore",
|
||||
"managed_image_resource_group_name": "ignore",
|
||||
}
|
||||
|
||||
c, _, err := newConfig(config, getPackerConfiguration())
|
||||
if err != nil {
|
||||
t.Errorf("newConfig failed with %q", err)
|
||||
}
|
||||
|
||||
if c.AsyncResourceGroupDelete != false {
|
||||
t.Errorf("expected async_resourcegroup_delete to be %q, but got %t", "async_resourcegroup_delete", c.AsyncResourceGroupDelete)
|
||||
}
|
||||
}
|
||||
func TestConfigShouldAllowAsyncResourceGroupOverrideBadValue(t *testing.T) {
|
||||
config := map[string]interface{}{
|
||||
"image_offer": "ignore",
|
||||
"image_publisher": "ignore",
|
||||
"image_sku": "ignore",
|
||||
"location": "ignore",
|
||||
"subscription_id": "ignore",
|
||||
"communicator": "none",
|
||||
"os_type": "linux",
|
||||
"managed_image_name": "ignore",
|
||||
"managed_image_resource_group_name": "ignore",
|
||||
"async_resourcegroup_delete": "asdasda",
|
||||
}
|
||||
|
||||
c, _, err := newConfig(config, getPackerConfiguration())
|
||||
if err != nil && c == nil {
|
||||
t.Log("newConfig failed which is expected ", err)
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func getArmBuilderConfiguration() map[string]string {
|
||||
m := make(map[string]string)
|
||||
for _, v := range requiredConfigValues {
|
||||
|
|
|
@ -118,14 +118,20 @@ func (s *StepCreateResourceGroup) Cleanup(state multistep.StateBag) {
|
|||
ctx := context.TODO()
|
||||
f, err := s.client.GroupsClient.Delete(ctx, resourceGroupName)
|
||||
if err == nil {
|
||||
err = f.WaitForCompletion(ctx, s.client.GroupsClient.Client)
|
||||
if state.Get(constants.ArmAsyncResourceGroupDelete).(bool) {
|
||||
s.say(fmt.Sprintf("\n Not waiting for Resource Group delete as requested by user. Resource Group Name is %s", resourceGroupName))
|
||||
} else {
|
||||
err = f.WaitForCompletion(ctx, s.client.GroupsClient.Client)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error deleting resource group. Please delete it manually.\n\n"+
|
||||
"Name: %s\n"+
|
||||
"Error: %s", resourceGroupName, err))
|
||||
return
|
||||
}
|
||||
if !state.Get(constants.ArmAsyncResourceGroupDelete).(bool) {
|
||||
ui.Say("Resource group has been deleted.")
|
||||
}
|
||||
|
||||
ui.Say("Resource group has been deleted.")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,7 +53,13 @@ func (s *StepDeleteResourceGroup) deleteResourceGroup(ctx context.Context, state
|
|||
s.say("\nThe resource group was created by Packer, deleting ...")
|
||||
f, err := s.client.GroupsClient.Delete(ctx, resourceGroupName)
|
||||
if err == nil {
|
||||
f.WaitForCompletion(ctx, s.client.GroupsClient.Client)
|
||||
if state.Get(constants.ArmAsyncResourceGroupDelete).(bool) {
|
||||
// No need to wait for the complition for delete if request is Accepted
|
||||
s.say(fmt.Sprintf("\nResource Group is being deleted, not waiting for deletion due to config. Resource Group Name '%s'", resourceGroupName))
|
||||
} else {
|
||||
f.WaitForCompletion(ctx, s.client.GroupsClient.Client)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -35,4 +35,5 @@ const (
|
|||
ArmManagedImageResourceGroupName string = "arm.ManagedImageResourceGroupName"
|
||||
ArmManagedImageLocation string = "arm.ManagedImageLocation"
|
||||
ArmManagedImageName string = "arm.ManagedImageName"
|
||||
ArmAsyncResourceGroupDelete string = "arm.AsyncResourceGroupDelete"
|
||||
)
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-06-01/subscriptions"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
|
@ -40,8 +41,10 @@ var (
|
|||
|
||||
// Authenticate fetches a token from the local file cache or initiates a consent
|
||||
// flow and waits for token to be obtained.
|
||||
func Authenticate(env azure.Environment, tenantID string, say func(string)) (*adal.ServicePrincipalToken, error) {
|
||||
func Authenticate(env azure.Environment, tenantID string, say func(string), scope string) (*adal.ServicePrincipalToken, error) {
|
||||
clientID, ok := clientIDs[env.Name]
|
||||
var resourceid string
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("packer-azure application not set up for Azure environment %q", env.Name)
|
||||
}
|
||||
|
@ -53,9 +56,14 @@ func Authenticate(env azure.Environment, tenantID string, say func(string)) (*ad
|
|||
|
||||
// for AzurePublicCloud (https://management.core.windows.net/), this old
|
||||
// Service Management scope covers both ASM and ARM.
|
||||
apiScope := env.ServiceManagementEndpoint
|
||||
|
||||
tokenPath := tokenCachePath(tenantID)
|
||||
if strings.Contains(scope, "vault") {
|
||||
resourceid = "vault"
|
||||
} else {
|
||||
resourceid = "mgmt"
|
||||
}
|
||||
|
||||
tokenPath := tokenCachePath(tenantID + resourceid)
|
||||
saveToken := mkTokenCallback(tokenPath)
|
||||
saveTokenCallback := func(t adal.Token) error {
|
||||
say("Azure token expired. Saving the refreshed token...")
|
||||
|
@ -63,41 +71,18 @@ func Authenticate(env azure.Environment, tenantID string, say func(string)) (*ad
|
|||
}
|
||||
|
||||
// Lookup the token cache file for an existing token.
|
||||
spt, err := tokenFromFile(say, *oauthCfg, tokenPath, clientID, apiScope, saveTokenCallback)
|
||||
spt, err := tokenFromFile(say, *oauthCfg, tokenPath, clientID, scope, saveTokenCallback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if spt != nil {
|
||||
say(fmt.Sprintf("Auth token found in file: %s", tokenPath))
|
||||
|
||||
// NOTE(ahmetalpbalkan): The token file we found may contain an
|
||||
// expired access_token. In that case, the first call to Azure SDK will
|
||||
// attempt to refresh the token using refresh_token, which might have
|
||||
// expired[1], in that case we will get an error and we shall remove the
|
||||
// token file and initiate token flow again so that the user would not
|
||||
// need removing the token cache file manually.
|
||||
//
|
||||
// [1]: expiration date of refresh_token is not returned in AAD /token
|
||||
// response, we just know it is 14 days. Therefore user’s token
|
||||
// will go stale every 14 days and we will delete the token file,
|
||||
// re-initiate the device flow.
|
||||
say("Validating the token.")
|
||||
if err = validateToken(env, spt); err != nil {
|
||||
say(fmt.Sprintf("Error: %v", err))
|
||||
say("Stored Azure credentials expired. Please reauthenticate.")
|
||||
say(fmt.Sprintf("Deleting %s", tokenPath))
|
||||
if err := os.RemoveAll(tokenPath); err != nil {
|
||||
return nil, fmt.Errorf("Error deleting stale token file: %v", err)
|
||||
}
|
||||
} else {
|
||||
say("Token works.")
|
||||
return spt, nil
|
||||
}
|
||||
return spt, nil
|
||||
}
|
||||
|
||||
// Start an OAuth 2.0 device flow
|
||||
say(fmt.Sprintf("Initiating device flow: %s", tokenPath))
|
||||
spt, err = tokenFromDeviceFlow(say, *oauthCfg, clientID, apiScope)
|
||||
spt, err = tokenFromDeviceFlow(say, *oauthCfg, clientID, scope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -183,20 +168,6 @@ func mkTokenCallback(path string) adal.TokenRefreshCallback {
|
|||
}
|
||||
}
|
||||
|
||||
// validateToken makes a call to Azure SDK with given token, essentially making
|
||||
// sure if the access_token valid, if not it uses SDK’s functionality to
|
||||
// automatically refresh the token using refresh_token (which might have
|
||||
// expired). This check is essentially to make sure refresh_token is good.
|
||||
func validateToken(env azure.Environment, token *adal.ServicePrincipalToken) error {
|
||||
c := subscriptions.NewClientWithBaseURI(env.ResourceManagerEndpoint)
|
||||
c.Authorizer = autorest.NewBearerAuthorizer(token)
|
||||
_, err := c.List(context.TODO())
|
||||
if err != nil {
|
||||
return fmt.Errorf("Token validity check failed: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindTenantID figures out the AAD tenant ID of the subscription by making an
|
||||
// unauthenticated request to the Get Subscription Details endpoint and parses
|
||||
// the value from WWW-Authenticate header.
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// A driver is able to talk to HyperV and perform certain
|
||||
// operations with it. Some of the operations on here may seem overly
|
||||
// specific, but they were built specifically in mind to handle features
|
||||
|
@ -109,4 +113,10 @@ type Driver interface {
|
|||
MountFloppyDrive(string, string) error
|
||||
|
||||
UnmountFloppyDrive(string) error
|
||||
|
||||
// Connect connects to a VM specified by the name given.
|
||||
Connect(string) (context.CancelFunc, error)
|
||||
|
||||
// Disconnect disconnects to a VM specified by the context cancel function.
|
||||
Disconnect(context.CancelFunc)
|
||||
}
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type DriverMock struct {
|
||||
IsRunning_Called bool
|
||||
IsRunning_VmName string
|
||||
|
@ -240,6 +244,14 @@ type DriverMock struct {
|
|||
UnmountFloppyDrive_Called bool
|
||||
UnmountFloppyDrive_VmName string
|
||||
UnmountFloppyDrive_Err error
|
||||
|
||||
Connect_Called bool
|
||||
Connect_VmName string
|
||||
Connect_Cancel context.CancelFunc
|
||||
Connect_Err error
|
||||
|
||||
Disconnect_Called bool
|
||||
Disconnect_Cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (d *DriverMock) IsRunning(vmName string) (bool, error) {
|
||||
|
@ -553,3 +565,14 @@ func (d *DriverMock) UnmountFloppyDrive(vmName string) error {
|
|||
d.UnmountFloppyDrive_VmName = vmName
|
||||
return d.UnmountFloppyDrive_Err
|
||||
}
|
||||
|
||||
func (d *DriverMock) Connect(vmName string) (context.CancelFunc, error) {
|
||||
d.Connect_Called = true
|
||||
d.Connect_VmName = vmName
|
||||
return d.Connect_Cancel, d.Connect_Err
|
||||
}
|
||||
|
||||
func (d *DriverMock) Disconnect(cancel context.CancelFunc) {
|
||||
d.Disconnect_Called = true
|
||||
d.Disconnect_Cancel = cancel
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"runtime"
|
||||
|
@ -347,3 +348,14 @@ func (d *HypervPS4Driver) verifyHypervPermissions() error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connect connects to a VM specified by the name given.
|
||||
func (d *HypervPS4Driver) Connect(vmName string) (context.CancelFunc, error) {
|
||||
return hyperv.ConnectVirtualMachine(vmName)
|
||||
}
|
||||
|
||||
// Disconnect disconnects to a VM specified by calling the context cancel function returned
|
||||
// from Connect.
|
||||
func (d *HypervPS4Driver) Disconnect(cancel context.CancelFunc) {
|
||||
hyperv.DisconnectVirtualMachine(cancel)
|
||||
}
|
||||
|
|
|
@ -3,13 +3,16 @@ package common
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type StepRun struct {
|
||||
vmName string
|
||||
GuiCancelFunc context.CancelFunc
|
||||
Headless bool
|
||||
vmName string
|
||||
}
|
||||
|
||||
func (s *StepRun) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
|
@ -29,6 +32,13 @@ func (s *StepRun) Run(_ context.Context, state multistep.StateBag) multistep.Ste
|
|||
|
||||
s.vmName = vmName
|
||||
|
||||
if !s.Headless {
|
||||
ui.Say("Attempting to connect with vmconnect...")
|
||||
s.GuiCancelFunc, err = driver.Connect(vmName)
|
||||
if err != nil {
|
||||
log.Printf(fmt.Sprintf("Non-fatal error starting vmconnect: %s. continuing...", err))
|
||||
}
|
||||
}
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
|
@ -40,6 +50,11 @@ func (s *StepRun) Cleanup(state multistep.StateBag) {
|
|||
driver := state.Get("driver").(Driver)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
if !s.Headless && s.GuiCancelFunc != nil {
|
||||
ui.Say("Disconnecting from vmconnect...")
|
||||
s.GuiCancelFunc()
|
||||
}
|
||||
|
||||
if running, _ := driver.IsRunning(s.vmName); running {
|
||||
if err := driver.Stop(s.vmName); err != nil {
|
||||
ui.Error(fmt.Sprintf("Error shutting down VM: %s", err))
|
||||
|
|
|
@ -115,6 +115,8 @@ type Config struct {
|
|||
// Create the VM with a Fixed VHD format disk instead of Dynamic VHDX
|
||||
FixedVHD bool `mapstructure:"use_fixed_vhd_format"`
|
||||
|
||||
Headless bool `mapstructure:"headless"`
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
|
@ -427,7 +429,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
SwitchVlanId: b.config.SwitchVlanId,
|
||||
},
|
||||
|
||||
&hypervcommon.StepRun{},
|
||||
&hypervcommon.StepRun{
|
||||
Headless: b.config.Headless,
|
||||
},
|
||||
|
||||
&hypervcommon.StepTypeBootCommand{
|
||||
BootCommand: b.config.FlatBootCommand(),
|
||||
|
|
|
@ -98,6 +98,8 @@ type Config struct {
|
|||
|
||||
SkipExport bool `mapstructure:"skip_export"`
|
||||
|
||||
Headless bool `mapstructure:"headless"`
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
|
@ -436,7 +438,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
SwitchVlanId: b.config.SwitchVlanId,
|
||||
},
|
||||
|
||||
&hypervcommon.StepRun{},
|
||||
&hypervcommon.StepRun{
|
||||
Headless: b.config.Headless,
|
||||
},
|
||||
|
||||
&hypervcommon.StepTypeBootCommand{
|
||||
BootCommand: b.config.FlatBootCommand(),
|
||||
|
|
|
@ -4,8 +4,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
|
@ -38,39 +36,10 @@ func (s StepCompactDisk) Run(_ context.Context, state multistep.StateBag) multis
|
|||
ui.Say("Compacting all attached virtual disks...")
|
||||
for i, diskFullPath := range diskFullPaths {
|
||||
ui.Message(fmt.Sprintf("Compacting virtual disk %d", i+1))
|
||||
// Get the file size of the virtual disk prior to compaction
|
||||
fi, err := os.Stat(diskFullPath)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error getting virtual disk file info pre compaction: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
diskFileSizeStart := fi.Size()
|
||||
// Defragment and compact the disk
|
||||
if err := driver.CompactDisk(diskFullPath); err != nil {
|
||||
state.Put("error", fmt.Errorf("Error compacting disk: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
// Get the file size of the virtual disk post compaction
|
||||
fi, err = os.Stat(diskFullPath)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error getting virtual disk file info post compaction: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
diskFileSizeEnd := fi.Size()
|
||||
// Report compaction results
|
||||
log.Printf("Before compaction the disk file size was: %d", diskFileSizeStart)
|
||||
log.Printf("After compaction the disk file size was: %d", diskFileSizeEnd)
|
||||
if diskFileSizeStart > 0 {
|
||||
percentChange := ((float64(diskFileSizeEnd) / float64(diskFileSizeStart)) * 100.0) - 100.0
|
||||
switch {
|
||||
case percentChange < 0:
|
||||
ui.Message(fmt.Sprintf("Compacting reduced the disk file size by %.2f%%", math.Abs(percentChange)))
|
||||
case percentChange == 0:
|
||||
ui.Message(fmt.Sprintf("The compacting operation left the disk file size unchanged"))
|
||||
case percentChange > 0:
|
||||
ui.Message(fmt.Sprintf("WARNING: Compacting increased the disk file size by %.2f%%", percentChange))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
|
|
|
@ -2,8 +2,6 @@ package common
|
|||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/helper/multistep"
|
||||
|
@ -17,25 +15,8 @@ func TestStepCompactDisk(t *testing.T) {
|
|||
state := testState(t)
|
||||
step := new(StepCompactDisk)
|
||||
|
||||
// Create a fake vmdk file for disk file size operations
|
||||
diskFile, err := ioutil.TempFile("", "disk.vmdk")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating fake vmdk file: %s", err)
|
||||
}
|
||||
|
||||
diskFullPath := diskFile.Name()
|
||||
defer os.Remove(diskFullPath)
|
||||
|
||||
content := []byte("I am the fake vmdk's contents")
|
||||
if _, err := diskFile.Write(content); err != nil {
|
||||
t.Fatalf("Error writing to fake vmdk file: %s", err)
|
||||
}
|
||||
if err := diskFile.Close(); err != nil {
|
||||
t.Fatalf("Error closing fake vmdk file: %s", err)
|
||||
}
|
||||
|
||||
// Set up required state
|
||||
state.Put("disk_full_paths", []string{diskFullPath})
|
||||
diskFullPaths := []string{"foo"}
|
||||
state.Put("disk_full_paths", diskFullPaths)
|
||||
|
||||
driver := state.Get("driver").(*DriverMock)
|
||||
|
||||
|
@ -51,7 +32,7 @@ func TestStepCompactDisk(t *testing.T) {
|
|||
if !driver.CompactDiskCalled {
|
||||
t.Fatal("should've called")
|
||||
}
|
||||
if driver.CompactDiskPath != diskFullPath {
|
||||
if driver.CompactDiskPath != "foo" {
|
||||
t.Fatal("should call with right path")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
package hyperv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
|
@ -900,7 +902,7 @@ Hyper-V\Get-VMNetworkAdapter -VMName $vmName | Hyper-V\Connect-VMNetworkAdapter
|
|||
func AddVirtualMachineHardDiskDrive(vmName string, vhdRoot string, vhdName string, vhdSizeBytes int64, vhdBlockSize int64, controllerType string) error {
|
||||
|
||||
var script = `
|
||||
param([string]$vmName,[string]$vhdRoot, [string]$vhdName, [string]$vhdSizeInBytes,[string]$vhdBlockSizeInByte [string]$controllerType)
|
||||
param([string]$vmName,[string]$vhdRoot, [string]$vhdName, [string]$vhdSizeInBytes, [string]$vhdBlockSizeInByte, [string]$controllerType)
|
||||
$vhdPath = Join-Path -Path $vhdRoot -ChildPath $vhdName
|
||||
Hyper-V\New-VHD -path $vhdPath -SizeBytes $vhdSizeInBytes -BlockSizeBytes $vhdBlockSizeInByte
|
||||
Hyper-V\Add-VMHardDiskDrive -VMName $vmName -path $vhdPath -controllerType $controllerType
|
||||
|
@ -1244,3 +1246,18 @@ param([string]$vmName, [string]$scanCodes)
|
|||
err := ps.Run(script, vmName, scanCodes)
|
||||
return err
|
||||
}
|
||||
|
||||
func ConnectVirtualMachine(vmName string) (context.CancelFunc, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cmd := exec.CommandContext(ctx, "vmconnect.exe", "localhost", vmName)
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
// Failed to start so cancel function not required
|
||||
cancel = nil
|
||||
}
|
||||
return cancel, err
|
||||
}
|
||||
|
||||
func DisconnectVirtualMachine(cancel context.CancelFunc) {
|
||||
cancel()
|
||||
}
|
||||
|
|
|
@ -1,36 +1,27 @@
|
|||
package shell
|
||||
package shell_local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/template/interpolate"
|
||||
)
|
||||
|
||||
type Communicator struct {
|
||||
ExecuteCommand []string
|
||||
Ctx interpolate.Context
|
||||
}
|
||||
|
||||
func (c *Communicator) Start(cmd *packer.RemoteCmd) error {
|
||||
// Render the template so that we know how to execute the command
|
||||
c.Ctx.Data = &ExecuteCommandTemplate{
|
||||
Command: cmd.Command,
|
||||
}
|
||||
for i, field := range c.ExecuteCommand {
|
||||
command, err := interpolate.Render(field, &c.Ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error processing command: %s", err)
|
||||
}
|
||||
|
||||
c.ExecuteCommand[i] = command
|
||||
if len(c.ExecuteCommand) == 0 {
|
||||
return fmt.Errorf("Error launching command via shell-local communicator: No ExecuteCommand provided")
|
||||
}
|
||||
|
||||
// Build the local command to execute
|
||||
log.Printf("[INFO] (shell-local communicator): Executing local shell command %s", c.ExecuteCommand)
|
||||
localCmd := exec.Command(c.ExecuteCommand[0], c.ExecuteCommand[1:]...)
|
||||
localCmd.Stdin = cmd.Stdin
|
||||
localCmd.Stdout = cmd.Stdout
|
||||
|
@ -79,7 +70,3 @@ func (c *Communicator) Download(string, io.Writer) error {
|
|||
func (c *Communicator) DownloadDir(string, string, []string) error {
|
||||
return fmt.Errorf("downloadDir not supported")
|
||||
}
|
||||
|
||||
type ExecuteCommandTemplate struct {
|
||||
Command string
|
||||
}
|
|
@ -19,12 +19,13 @@ func TestCommunicator(t *testing.T) {
|
|||
return
|
||||
}
|
||||
|
||||
c := &Communicator{}
|
||||
c := &Communicator{
|
||||
ExecuteCommand: []string{"/bin/sh", "-c", "echo foo"},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
cmd := &packer.RemoteCmd{
|
||||
Command: "/bin/echo foo",
|
||||
Stdout: &buf,
|
||||
Stdout: &buf,
|
||||
}
|
||||
|
||||
if err := c.Start(cmd); err != nil {
|
|
@ -0,0 +1,215 @@
|
|||
package shell_local
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/packer/common"
|
||||
configHelper "github.com/hashicorp/packer/helper/config"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/template/interpolate"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
|
||||
// ** DEPRECATED: USE INLINE INSTEAD **
|
||||
// ** Only Present for backwards compatibiltiy **
|
||||
// Command is the command to execute
|
||||
Command string
|
||||
|
||||
// An inline script to execute. Multiple strings are all executed
|
||||
// in the context of a single shell.
|
||||
Inline []string
|
||||
|
||||
// The shebang value used when running inline scripts.
|
||||
InlineShebang string `mapstructure:"inline_shebang"`
|
||||
|
||||
// The file extension to use for the file generated from the inline commands
|
||||
TempfileExtension string `mapstructure:"tempfile_extension"`
|
||||
|
||||
// The local path of the shell script to upload and execute.
|
||||
Script string
|
||||
|
||||
// An array of multiple scripts to run.
|
||||
Scripts []string
|
||||
|
||||
// An array of environment variables that will be injected before
|
||||
// your command(s) are executed.
|
||||
Vars []string `mapstructure:"environment_vars"`
|
||||
|
||||
EnvVarFormat string `mapstructure:"env_var_format"`
|
||||
// End dedupe with postprocessor
|
||||
|
||||
// The command used to execute the script. The '{{ .Path }}' variable
|
||||
// should be used to specify where the script goes, {{ .Vars }}
|
||||
// can be used to inject the environment_vars into the environment.
|
||||
ExecuteCommand []string `mapstructure:"execute_command"`
|
||||
|
||||
UseLinuxPathing bool `mapstructure:"use_linux_pathing"`
|
||||
|
||||
Ctx interpolate.Context
|
||||
}
|
||||
|
||||
func Decode(config *Config, raws ...interface{}) error {
|
||||
err := configHelper.Decode(&config, &configHelper.DecodeOpts{
|
||||
Interpolate: true,
|
||||
InterpolateContext: &config.Ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"execute_command",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error decoding config: %s, config is %#v, and raws is %#v", err, config, raws)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Validate(config *Config) error {
|
||||
var errs *packer.MultiError
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
if len(config.ExecuteCommand) == 0 {
|
||||
config.ExecuteCommand = []string{
|
||||
"cmd",
|
||||
"/V",
|
||||
"/C",
|
||||
"{{.Vars}}",
|
||||
"call",
|
||||
"{{.Script}}",
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if config.InlineShebang == "" {
|
||||
config.InlineShebang = "/bin/sh -e"
|
||||
}
|
||||
if len(config.ExecuteCommand) == 0 {
|
||||
config.ExecuteCommand = []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"{{.Vars}} {{.Script}}",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up input
|
||||
if config.Inline != nil && len(config.Inline) == 0 {
|
||||
config.Inline = make([]string, 0)
|
||||
}
|
||||
|
||||
if config.Scripts == nil {
|
||||
config.Scripts = make([]string, 0)
|
||||
}
|
||||
|
||||
if config.Vars == nil {
|
||||
config.Vars = make([]string, 0)
|
||||
}
|
||||
|
||||
// Verify that the user has given us a command to run
|
||||
if config.Command == "" && len(config.Inline) == 0 &&
|
||||
len(config.Scripts) == 0 && config.Script == "" {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
errors.New("Command, Inline, Script and Scripts options cannot all be empty."))
|
||||
}
|
||||
|
||||
// Check that user hasn't given us too many commands to run
|
||||
tooManyOptionsErr := errors.New("You may only specify one of the " +
|
||||
"following options: Command, Inline, Script or Scripts. Please" +
|
||||
" consolidate these options in your config.")
|
||||
|
||||
if config.Command != "" {
|
||||
if len(config.Inline) != 0 || len(config.Scripts) != 0 || config.Script != "" {
|
||||
errs = packer.MultiErrorAppend(errs, tooManyOptionsErr)
|
||||
} else {
|
||||
config.Inline = []string{config.Command}
|
||||
}
|
||||
}
|
||||
|
||||
if config.Script != "" {
|
||||
if len(config.Scripts) > 0 || len(config.Inline) > 0 {
|
||||
errs = packer.MultiErrorAppend(errs, tooManyOptionsErr)
|
||||
} else {
|
||||
config.Scripts = []string{config.Script}
|
||||
}
|
||||
}
|
||||
|
||||
if len(config.Scripts) > 0 && config.Inline != nil {
|
||||
errs = packer.MultiErrorAppend(errs, tooManyOptionsErr)
|
||||
}
|
||||
|
||||
// Check that all scripts we need to run exist locally
|
||||
for _, path := range config.Scripts {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Bad script '%s': %s", path, err))
|
||||
}
|
||||
}
|
||||
if config.UseLinuxPathing {
|
||||
for index, script := range config.Scripts {
|
||||
scriptAbsPath, err := filepath.Abs(script)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error converting %s to absolute path: %s", script, err.Error())
|
||||
}
|
||||
converted, err := ConvertToLinuxPath(scriptAbsPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Scripts[index] = converted
|
||||
}
|
||||
// Interoperability issues with WSL makes creating and running tempfiles
|
||||
// via golang's os package basically impossible.
|
||||
if len(config.Inline) > 0 {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Packer is unable to use the Command and Inline "+
|
||||
"features with the Windows Linux Subsystem. Please use "+
|
||||
"the Script or Scripts options instead"))
|
||||
}
|
||||
}
|
||||
// This is currently undocumented and not a feature users are expected to
|
||||
// interact with.
|
||||
if config.EnvVarFormat == "" {
|
||||
if (runtime.GOOS == "windows") && !config.UseLinuxPathing {
|
||||
config.EnvVarFormat = "set %s=%s && "
|
||||
} else {
|
||||
config.EnvVarFormat = "%s='%s' "
|
||||
}
|
||||
}
|
||||
|
||||
// drop unnecessary "." in extension; we add this later.
|
||||
if config.TempfileExtension != "" {
|
||||
if strings.HasPrefix(config.TempfileExtension, ".") {
|
||||
config.TempfileExtension = config.TempfileExtension[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// Do a check for bad environment variables, such as '=foo', 'foobar'
|
||||
for _, kv := range config.Vars {
|
||||
vs := strings.SplitN(kv, "=", 2)
|
||||
if len(vs) != 2 || vs[0] == "" {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Environment variable not in format 'key=value': %s", kv))
|
||||
}
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// C:/path/to/your/file becomes /mnt/c/path/to/your/file
|
||||
func ConvertToLinuxPath(winAbsPath string) (string, error) {
|
||||
// get absolute path of script, and morph it into the bash path
|
||||
winAbsPath = strings.Replace(winAbsPath, "\\", "/", -1)
|
||||
splitPath := strings.SplitN(winAbsPath, ":/", 2)
|
||||
winBashPath := fmt.Sprintf("/mnt/%s/%s", strings.ToLower(splitPath[0]), splitPath[1])
|
||||
return winBashPath, nil
|
||||
}
|
|
@ -0,0 +1,164 @@
|
|||
package shell_local
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/template/interpolate"
|
||||
)
|
||||
|
||||
type ExecuteCommandTemplate struct {
|
||||
Vars string
|
||||
Script string
|
||||
Command string
|
||||
}
|
||||
|
||||
func Run(ui packer.Ui, config *Config) (bool, error) {
|
||||
scripts := make([]string, len(config.Scripts))
|
||||
if len(config.Scripts) > 0 {
|
||||
copy(scripts, config.Scripts)
|
||||
} else if config.Inline != nil {
|
||||
// If we have an inline script, then turn that into a temporary
|
||||
// shell script and use that.
|
||||
tempScriptFileName, err := createInlineScriptFile(config)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
scripts = append(scripts, tempScriptFileName)
|
||||
|
||||
// figure out what extension the file should have, and rename it.
|
||||
if config.TempfileExtension != "" {
|
||||
os.Rename(tempScriptFileName, fmt.Sprintf("%s.%s", tempScriptFileName, config.TempfileExtension))
|
||||
tempScriptFileName = fmt.Sprintf("%s.%s", tempScriptFileName, config.TempfileExtension)
|
||||
}
|
||||
defer os.Remove(tempScriptFileName)
|
||||
}
|
||||
|
||||
// Create environment variables to set before executing the command
|
||||
flattenedEnvVars, err := createFlattenedEnvVars(config)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, script := range scripts {
|
||||
interpolatedCmds, err := createInterpolatedCommands(config, script, flattenedEnvVars)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ui.Say(fmt.Sprintf("Running local shell script: %s", script))
|
||||
|
||||
comm := &Communicator{
|
||||
ExecuteCommand: interpolatedCmds,
|
||||
}
|
||||
|
||||
// The remoteCmd generated here isn't actually run, but it allows us to
|
||||
// use the same interafce for the shell-local communicator as we use for
|
||||
// the other communicators; ultimately, this command is just used for
|
||||
// buffers and for reading the final exit status.
|
||||
flattenedCmd := strings.Join(interpolatedCmds, " ")
|
||||
cmd := &packer.RemoteCmd{Command: flattenedCmd}
|
||||
log.Printf("[INFO] (shell-local): starting local command: %s", flattenedCmd)
|
||||
|
||||
if err := cmd.StartWithUi(comm, ui); err != nil {
|
||||
return false, fmt.Errorf(
|
||||
"Error executing script: %s\n\n"+
|
||||
"Please see output above for more information.",
|
||||
script)
|
||||
}
|
||||
if cmd.ExitStatus != 0 {
|
||||
return false, fmt.Errorf(
|
||||
"Erroneous exit code %d while executing script: %s\n\n"+
|
||||
"Please see output above for more information.",
|
||||
cmd.ExitStatus,
|
||||
script)
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func createInlineScriptFile(config *Config) (string, error) {
|
||||
tf, err := ioutil.TempFile("", "packer-shell")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error preparing shell script: %s", err)
|
||||
}
|
||||
defer tf.Close()
|
||||
// Write our contents to it
|
||||
writer := bufio.NewWriter(tf)
|
||||
if config.InlineShebang != "" {
|
||||
shebang := fmt.Sprintf("#!%s\n", config.InlineShebang)
|
||||
log.Printf("[INFO] (shell-local): Prepending inline script with %s", shebang)
|
||||
writer.WriteString(shebang)
|
||||
}
|
||||
for _, command := range config.Inline {
|
||||
if _, err := writer.WriteString(command + "\n"); err != nil {
|
||||
return "", fmt.Errorf("Error preparing shell script: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := writer.Flush(); err != nil {
|
||||
return "", fmt.Errorf("Error preparing shell script: %s", err)
|
||||
}
|
||||
|
||||
err = os.Chmod(tf.Name(), 0700)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] (shell-local): error modifying permissions of temp script file: %s", err.Error())
|
||||
}
|
||||
return tf.Name(), nil
|
||||
}
|
||||
|
||||
// Generates the final command to send to the communicator, using either the
|
||||
// user-provided ExecuteCommand or defaulting to something that makes sense for
|
||||
// the host OS
|
||||
func createInterpolatedCommands(config *Config, script string, flattenedEnvVars string) ([]string, error) {
|
||||
config.Ctx.Data = &ExecuteCommandTemplate{
|
||||
Vars: flattenedEnvVars,
|
||||
Script: script,
|
||||
Command: script,
|
||||
}
|
||||
|
||||
interpolatedCmds := make([]string, len(config.ExecuteCommand))
|
||||
for i, cmd := range config.ExecuteCommand {
|
||||
interpolatedCmd, err := interpolate.Render(cmd, &config.Ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error processing command: %s", err)
|
||||
}
|
||||
interpolatedCmds[i] = interpolatedCmd
|
||||
}
|
||||
return interpolatedCmds, nil
|
||||
}
|
||||
|
||||
func createFlattenedEnvVars(config *Config) (string, error) {
|
||||
flattened := ""
|
||||
envVars := make(map[string]string)
|
||||
|
||||
// Always available Packer provided env vars
|
||||
envVars["PACKER_BUILD_NAME"] = fmt.Sprintf("%s", config.PackerBuildName)
|
||||
envVars["PACKER_BUILDER_TYPE"] = fmt.Sprintf("%s", config.PackerBuilderType)
|
||||
|
||||
// Split vars into key/value components
|
||||
for _, envVar := range config.Vars {
|
||||
keyValue := strings.SplitN(envVar, "=", 2)
|
||||
// Store pair, replacing any single quotes in value so they parse
|
||||
// correctly with required environment variable format
|
||||
envVars[keyValue[0]] = strings.Replace(keyValue[1], "'", `'"'"'`, -1)
|
||||
}
|
||||
|
||||
// Create a list of env var keys in sorted order
|
||||
var keys []string
|
||||
for k := range envVars {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, key := range keys {
|
||||
flattened += fmt.Sprintf(config.EnvVarFormat, key, envVars[key])
|
||||
}
|
||||
return flattened, nil
|
||||
}
|
Binary file not shown.
|
@ -2,8 +2,7 @@
|
|||
"variables": {
|
||||
"client_id": "{{env `ARM_CLIENT_ID`}}",
|
||||
"client_secret": "{{env `ARM_CLIENT_SECRET`}}",
|
||||
"subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}",
|
||||
"object_id": "{{env `ARM_OBJECT_ID`}}"
|
||||
"subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}"
|
||||
},
|
||||
"builders": [{
|
||||
"type": "azure-arm",
|
||||
|
@ -11,7 +10,6 @@
|
|||
"client_id": "{{user `client_id`}}",
|
||||
"client_secret": "{{user `client_secret`}}",
|
||||
"subscription_id": "{{user `subscription_id`}}",
|
||||
"object_id": "{{user `object_id`}}",
|
||||
|
||||
"managed_image_resource_group_name": "packertest",
|
||||
"managed_image_name": "MyWindowsOSImage",
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
"variables": {
|
||||
"subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}"
|
||||
},
|
||||
"builders": [{
|
||||
"type": "azure-arm",
|
||||
|
||||
"subscription_id": "{{user `subscription_id`}}",
|
||||
|
||||
"managed_image_resource_group_name": "packertest",
|
||||
"managed_image_name": "MyWindowsOSImage",
|
||||
|
||||
"os_type": "Windows",
|
||||
"image_publisher": "MicrosoftWindowsServer",
|
||||
"image_offer": "WindowsServer",
|
||||
"image_sku": "2012-R2-Datacenter",
|
||||
|
||||
"communicator": "winrm",
|
||||
"winrm_use_ssl": "true",
|
||||
"winrm_insecure": "true",
|
||||
"winrm_timeout": "3m",
|
||||
"winrm_username": "packer",
|
||||
|
||||
"location": "South Central US",
|
||||
"vm_size": "Standard_DS2_v2"
|
||||
}],
|
||||
"provisioners": [{
|
||||
"type": "powershell",
|
||||
"inline": [
|
||||
"if( Test-Path $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml ){ rm $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml -Force}",
|
||||
"& $env:SystemRoot\\System32\\Sysprep\\Sysprep.exe /oobe /generalize /quiet /quit",
|
||||
"while($true) { $imageState = Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Setup\\State | Select ImageState; if($imageState.ImageState -ne 'IMAGE_STATE_GENERALIZE_RESEAL_TO_OOBE') { Write-Output $imageState.ImageState; Start-Sleep -s 10 } else { break } }"
|
||||
]
|
||||
}]
|
||||
}
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
package shell_local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
type Communicator struct{}
|
||||
|
||||
func (c *Communicator) Start(cmd *packer.RemoteCmd) error {
|
||||
localCmd := exec.Command("sh", "-c", cmd.Command)
|
||||
localCmd.Stdin = cmd.Stdin
|
||||
localCmd.Stdout = cmd.Stdout
|
||||
localCmd.Stderr = cmd.Stderr
|
||||
|
||||
// Start it. If it doesn't work, then error right away.
|
||||
if err := localCmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We've started successfully. Start a goroutine to wait for
|
||||
// it to complete and track exit status.
|
||||
go func() {
|
||||
var exitStatus int
|
||||
err := localCmd.Wait()
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
exitStatus = 1
|
||||
|
||||
// There is no process-independent way to get the REAL
|
||||
// exit status so we just try to go deeper.
|
||||
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
|
||||
exitStatus = status.ExitStatus()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cmd.SetExited(exitStatus)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Communicator) Upload(string, io.Reader, *os.FileInfo) error {
|
||||
return fmt.Errorf("upload not supported")
|
||||
}
|
||||
|
||||
func (c *Communicator) UploadDir(string, string, []string) error {
|
||||
return fmt.Errorf("uploadDir not supported")
|
||||
}
|
||||
|
||||
func (c *Communicator) Download(string, io.Writer) error {
|
||||
return fmt.Errorf("download not supported")
|
||||
}
|
||||
|
||||
func (c *Communicator) DownloadDir(src string, dst string, exclude []string) error {
|
||||
return fmt.Errorf("downloadDir not supported")
|
||||
}
|
|
@ -1,51 +1,12 @@
|
|||
package shell_local
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/packer/common"
|
||||
"github.com/hashicorp/packer/helper/config"
|
||||
sl "github.com/hashicorp/packer/common/shell-local"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/template/interpolate"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
|
||||
// An inline script to execute. Multiple strings are all executed
|
||||
// in the context of a single shell.
|
||||
Inline []string
|
||||
|
||||
// The shebang value used when running inline scripts.
|
||||
InlineShebang string `mapstructure:"inline_shebang"`
|
||||
|
||||
// The local path of the shell script to upload and execute.
|
||||
Script string
|
||||
|
||||
// An array of multiple scripts to run.
|
||||
Scripts []string
|
||||
|
||||
// An array of environment variables that will be injected before
|
||||
// your command(s) are executed.
|
||||
Vars []string `mapstructure:"environment_vars"`
|
||||
|
||||
// The command used to execute the script. The '{{ .Path }}' variable
|
||||
// should be used to specify where the script goes, {{ .Vars }}
|
||||
// can be used to inject the environment_vars into the environment.
|
||||
ExecuteCommand string `mapstructure:"execute_command"`
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
type PostProcessor struct {
|
||||
config Config
|
||||
config sl.Config
|
||||
}
|
||||
|
||||
type ExecuteCommandTemplate struct {
|
||||
|
@ -54,179 +15,34 @@ type ExecuteCommandTemplate struct {
|
|||
}
|
||||
|
||||
func (p *PostProcessor) Configure(raws ...interface{}) error {
|
||||
err := config.Decode(&p.config, &config.DecodeOpts{
|
||||
Interpolate: true,
|
||||
InterpolateContext: &p.config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"execute_command",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
err := sl.Decode(&p.config, raws...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.config.ExecuteCommand == "" {
|
||||
p.config.ExecuteCommand = `chmod +x "{{.Script}}"; {{.Vars}} "{{.Script}}"`
|
||||
if len(p.config.ExecuteCommand) == 1 {
|
||||
// Backwards compatibility -- before we merged the shell-local
|
||||
// post-processor and provisioners, the post-processor accepted
|
||||
// execute_command as a string rather than a slice of strings. It didn't
|
||||
// have a configurable call to shell program, automatically prepending
|
||||
// the user-supplied execute_command string with "sh -c". If users are
|
||||
// still using the old way of defining ExecuteCommand (by supplying a
|
||||
// single string rather than a slice of strings) then we need to
|
||||
// prepend this command with the call that the post-processor defaulted
|
||||
// to before.
|
||||
p.config.ExecuteCommand = append([]string{"sh", "-c"}, p.config.ExecuteCommand...)
|
||||
}
|
||||
|
||||
if p.config.Inline != nil && len(p.config.Inline) == 0 {
|
||||
p.config.Inline = nil
|
||||
}
|
||||
|
||||
if p.config.InlineShebang == "" {
|
||||
p.config.InlineShebang = "/bin/sh -e"
|
||||
}
|
||||
|
||||
if p.config.Scripts == nil {
|
||||
p.config.Scripts = make([]string, 0)
|
||||
}
|
||||
|
||||
if p.config.Vars == nil {
|
||||
p.config.Vars = make([]string, 0)
|
||||
}
|
||||
|
||||
var errs *packer.MultiError
|
||||
if p.config.Script != "" && len(p.config.Scripts) > 0 {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
errors.New("Only one of script or scripts can be specified."))
|
||||
}
|
||||
|
||||
if p.config.Script != "" {
|
||||
p.config.Scripts = []string{p.config.Script}
|
||||
}
|
||||
|
||||
if len(p.config.Scripts) == 0 && p.config.Inline == nil {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
errors.New("Either a script file or inline script must be specified."))
|
||||
} else if len(p.config.Scripts) > 0 && p.config.Inline != nil {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
errors.New("Only a script file or an inline script can be specified, not both."))
|
||||
}
|
||||
|
||||
for _, path := range p.config.Scripts {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Bad script '%s': %s", path, err))
|
||||
}
|
||||
}
|
||||
|
||||
// Do a check for bad environment variables, such as '=foo', 'foobar'
|
||||
for _, kv := range p.config.Vars {
|
||||
vs := strings.SplitN(kv, "=", 2)
|
||||
if len(vs) != 2 || vs[0] == "" {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Environment variable not in format 'key=value': %s", kv))
|
||||
}
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
return nil
|
||||
return sl.Validate(&p.config)
|
||||
}
|
||||
|
||||
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
|
||||
// this particular post-processor doesn't do anything with the artifact
|
||||
// except to return it.
|
||||
|
||||
scripts := make([]string, len(p.config.Scripts))
|
||||
copy(scripts, p.config.Scripts)
|
||||
|
||||
// If we have an inline script, then turn that into a temporary
|
||||
// shell script and use that.
|
||||
if p.config.Inline != nil {
|
||||
tf, err := ioutil.TempFile("", "packer-shell")
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("Error preparing shell script: %s", err)
|
||||
}
|
||||
defer os.Remove(tf.Name())
|
||||
|
||||
// Set the path to the temporary file
|
||||
scripts = append(scripts, tf.Name())
|
||||
|
||||
// Write our contents to it
|
||||
writer := bufio.NewWriter(tf)
|
||||
writer.WriteString(fmt.Sprintf("#!%s\n", p.config.InlineShebang))
|
||||
for _, command := range p.config.Inline {
|
||||
if _, err := writer.WriteString(command + "\n"); err != nil {
|
||||
return nil, false, fmt.Errorf("Error preparing shell script: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := writer.Flush(); err != nil {
|
||||
return nil, false, fmt.Errorf("Error preparing shell script: %s", err)
|
||||
}
|
||||
|
||||
tf.Close()
|
||||
retBool, retErr := sl.Run(ui, &p.config)
|
||||
if !retBool {
|
||||
return nil, retBool, retErr
|
||||
}
|
||||
|
||||
// Create environment variables to set before executing the command
|
||||
flattenedEnvVars := p.createFlattenedEnvVars()
|
||||
|
||||
for _, script := range scripts {
|
||||
|
||||
p.config.ctx.Data = &ExecuteCommandTemplate{
|
||||
Vars: flattenedEnvVars,
|
||||
Script: script,
|
||||
}
|
||||
|
||||
command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("Error processing command: %s", err)
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Post processing with local shell script: %s", script))
|
||||
|
||||
comm := &Communicator{}
|
||||
|
||||
cmd := &packer.RemoteCmd{Command: command}
|
||||
|
||||
log.Printf("starting local command: %s", command)
|
||||
if err := cmd.StartWithUi(comm, ui); err != nil {
|
||||
return nil, false, fmt.Errorf(
|
||||
"Error executing script: %s\n\n"+
|
||||
"Please see output above for more information.",
|
||||
script)
|
||||
}
|
||||
if cmd.ExitStatus != 0 {
|
||||
return nil, false, fmt.Errorf(
|
||||
"Erroneous exit code %d while executing script: %s\n\n"+
|
||||
"Please see output above for more information.",
|
||||
cmd.ExitStatus,
|
||||
script)
|
||||
}
|
||||
}
|
||||
|
||||
return artifact, true, nil
|
||||
}
|
||||
|
||||
func (p *PostProcessor) createFlattenedEnvVars() (flattened string) {
|
||||
flattened = ""
|
||||
envVars := make(map[string]string)
|
||||
|
||||
// Always available Packer provided env vars
|
||||
envVars["PACKER_BUILD_NAME"] = fmt.Sprintf("%s", p.config.PackerBuildName)
|
||||
envVars["PACKER_BUILDER_TYPE"] = fmt.Sprintf("%s", p.config.PackerBuilderType)
|
||||
|
||||
// Split vars into key/value components
|
||||
for _, envVar := range p.config.Vars {
|
||||
keyValue := strings.SplitN(envVar, "=", 2)
|
||||
// Store pair, replacing any single quotes in value so they parse
|
||||
// correctly with required environment variable format
|
||||
envVars[keyValue[0]] = strings.Replace(keyValue[1], "'", `'"'"'`, -1)
|
||||
}
|
||||
|
||||
// Create a list of env var keys in sorted order
|
||||
var keys []string
|
||||
for k := range envVars {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
// Re-assemble vars surrounding value with single quotes and flatten
|
||||
for _, key := range keys {
|
||||
flattened += fmt.Sprintf("%s='%s' ", key, envVars[key])
|
||||
}
|
||||
return
|
||||
return artifact, retBool, retErr
|
||||
}
|
||||
|
|
|
@ -3,9 +3,11 @@ package shell_local
|
|||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPostProcessor_ImplementsPostProcessor(t *testing.T) {
|
||||
|
@ -28,32 +30,35 @@ func TestPostProcessor_Impl(t *testing.T) {
|
|||
|
||||
func TestPostProcessorPrepare_Defaults(t *testing.T) {
|
||||
var p PostProcessor
|
||||
config := testConfig()
|
||||
raws := testConfig()
|
||||
|
||||
err := p.Configure(config)
|
||||
err := p.Configure(raws)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorPrepare_InlineShebang(t *testing.T) {
|
||||
config := testConfig()
|
||||
raws := testConfig()
|
||||
|
||||
delete(config, "inline_shebang")
|
||||
delete(raws, "inline_shebang")
|
||||
p := new(PostProcessor)
|
||||
err := p.Configure(config)
|
||||
err := p.Configure(raws)
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if p.config.InlineShebang != "/bin/sh -e" {
|
||||
expected := ""
|
||||
if runtime.GOOS != "windows" {
|
||||
expected = "/bin/sh -e"
|
||||
}
|
||||
if p.config.InlineShebang != expected {
|
||||
t.Fatalf("bad value: %s", p.config.InlineShebang)
|
||||
}
|
||||
|
||||
// Test with a good one
|
||||
config["inline_shebang"] = "foo"
|
||||
raws["inline_shebang"] = "foo"
|
||||
p = new(PostProcessor)
|
||||
err = p.Configure(config)
|
||||
err = p.Configure(raws)
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
@ -65,23 +70,23 @@ func TestPostProcessorPrepare_InlineShebang(t *testing.T) {
|
|||
|
||||
func TestPostProcessorPrepare_InvalidKey(t *testing.T) {
|
||||
var p PostProcessor
|
||||
config := testConfig()
|
||||
raws := testConfig()
|
||||
|
||||
// Add a random key
|
||||
config["i_should_not_be_valid"] = true
|
||||
err := p.Configure(config)
|
||||
raws["i_should_not_be_valid"] = true
|
||||
err := p.Configure(raws)
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorPrepare_Script(t *testing.T) {
|
||||
config := testConfig()
|
||||
delete(config, "inline")
|
||||
raws := testConfig()
|
||||
delete(raws, "inline")
|
||||
|
||||
config["script"] = "/this/should/not/exist"
|
||||
raws["script"] = "/this/should/not/exist"
|
||||
p := new(PostProcessor)
|
||||
err := p.Configure(config)
|
||||
err := p.Configure(raws)
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
@ -93,23 +98,65 @@ func TestPostProcessorPrepare_Script(t *testing.T) {
|
|||
}
|
||||
defer os.Remove(tf.Name())
|
||||
|
||||
config["script"] = tf.Name()
|
||||
raws["script"] = tf.Name()
|
||||
p = new(PostProcessor)
|
||||
err = p.Configure(config)
|
||||
err = p.Configure(raws)
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorPrepare_ExecuteCommand(t *testing.T) {
|
||||
// Check that passing a string will work (Backwards Compatibility)
|
||||
p := new(PostProcessor)
|
||||
raws := testConfig()
|
||||
raws["execute_command"] = "foo bar"
|
||||
err := p.Configure(raws)
|
||||
expected := []string{"sh", "-c", "foo bar"}
|
||||
if err != nil {
|
||||
t.Fatalf("should handle backwards compatibility: %s", err)
|
||||
}
|
||||
assert.Equal(t, p.config.ExecuteCommand, expected,
|
||||
"Did not get expected execute_command: expected: %#v; received %#v", expected, p.config.ExecuteCommand)
|
||||
|
||||
// Check that passing a list will work
|
||||
p = new(PostProcessor)
|
||||
raws = testConfig()
|
||||
raws["execute_command"] = []string{"foo", "bar"}
|
||||
err = p.Configure(raws)
|
||||
if err != nil {
|
||||
t.Fatalf("should handle backwards compatibility: %s", err)
|
||||
}
|
||||
expected = []string{"foo", "bar"}
|
||||
assert.Equal(t, p.config.ExecuteCommand, expected,
|
||||
"Did not get expected execute_command: expected: %#v; received %#v", expected, p.config.ExecuteCommand)
|
||||
|
||||
// Check that default is as expected
|
||||
raws = testConfig()
|
||||
delete(raws, "execute_command")
|
||||
p = new(PostProcessor)
|
||||
p.Configure(raws)
|
||||
if runtime.GOOS != "windows" {
|
||||
expected = []string{"/bin/sh", "-c", "{{.Vars}} {{.Script}}"}
|
||||
} else {
|
||||
expected = []string{"cmd", "/V", "/C", "{{.Vars}}", "call", "{{.Script}}"}
|
||||
}
|
||||
assert.Equal(t, p.config.ExecuteCommand, expected,
|
||||
"Did not get expected default: expected: %#v; received %#v", expected, p.config.ExecuteCommand)
|
||||
}
|
||||
|
||||
func TestPostProcessorPrepare_ScriptAndInline(t *testing.T) {
|
||||
var p PostProcessor
|
||||
config := testConfig()
|
||||
raws := testConfig()
|
||||
|
||||
delete(config, "inline")
|
||||
delete(config, "script")
|
||||
err := p.Configure(config)
|
||||
// Error if no scripts/inline commands provided
|
||||
delete(raws, "inline")
|
||||
delete(raws, "script")
|
||||
delete(raws, "command")
|
||||
delete(raws, "scripts")
|
||||
err := p.Configure(raws)
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
t.Fatalf("should error when no scripts/inline commands are provided")
|
||||
}
|
||||
|
||||
// Test with both
|
||||
|
@ -119,9 +166,9 @@ func TestPostProcessorPrepare_ScriptAndInline(t *testing.T) {
|
|||
}
|
||||
defer os.Remove(tf.Name())
|
||||
|
||||
config["inline"] = []interface{}{"foo"}
|
||||
config["script"] = tf.Name()
|
||||
err = p.Configure(config)
|
||||
raws["inline"] = []interface{}{"foo"}
|
||||
raws["script"] = tf.Name()
|
||||
err = p.Configure(raws)
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
@ -129,7 +176,7 @@ func TestPostProcessorPrepare_ScriptAndInline(t *testing.T) {
|
|||
|
||||
func TestPostProcessorPrepare_ScriptAndScripts(t *testing.T) {
|
||||
var p PostProcessor
|
||||
config := testConfig()
|
||||
raws := testConfig()
|
||||
|
||||
// Test with both
|
||||
tf, err := ioutil.TempFile("", "packer")
|
||||
|
@ -138,21 +185,21 @@ func TestPostProcessorPrepare_ScriptAndScripts(t *testing.T) {
|
|||
}
|
||||
defer os.Remove(tf.Name())
|
||||
|
||||
config["inline"] = []interface{}{"foo"}
|
||||
config["scripts"] = []string{tf.Name()}
|
||||
err = p.Configure(config)
|
||||
raws["inline"] = []interface{}{"foo"}
|
||||
raws["scripts"] = []string{tf.Name()}
|
||||
err = p.Configure(raws)
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorPrepare_Scripts(t *testing.T) {
|
||||
config := testConfig()
|
||||
delete(config, "inline")
|
||||
raws := testConfig()
|
||||
delete(raws, "inline")
|
||||
|
||||
config["scripts"] = []string{}
|
||||
raws["scripts"] = []string{}
|
||||
p := new(PostProcessor)
|
||||
err := p.Configure(config)
|
||||
err := p.Configure(raws)
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
@ -164,92 +211,55 @@ func TestPostProcessorPrepare_Scripts(t *testing.T) {
|
|||
}
|
||||
defer os.Remove(tf.Name())
|
||||
|
||||
config["scripts"] = []string{tf.Name()}
|
||||
raws["scripts"] = []string{tf.Name()}
|
||||
p = new(PostProcessor)
|
||||
err = p.Configure(config)
|
||||
err = p.Configure(raws)
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorPrepare_EnvironmentVars(t *testing.T) {
|
||||
config := testConfig()
|
||||
raws := testConfig()
|
||||
|
||||
// Test with a bad case
|
||||
config["environment_vars"] = []string{"badvar", "good=var"}
|
||||
raws["environment_vars"] = []string{"badvar", "good=var"}
|
||||
p := new(PostProcessor)
|
||||
err := p.Configure(config)
|
||||
err := p.Configure(raws)
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
||||
// Test with a trickier case
|
||||
config["environment_vars"] = []string{"=bad"}
|
||||
raws["environment_vars"] = []string{"=bad"}
|
||||
p = new(PostProcessor)
|
||||
err = p.Configure(config)
|
||||
err = p.Configure(raws)
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
||||
// Test with a good case
|
||||
// Note: baz= is a real env variable, just empty
|
||||
config["environment_vars"] = []string{"FOO=bar", "baz="}
|
||||
raws["environment_vars"] = []string{"FOO=bar", "baz="}
|
||||
p = new(PostProcessor)
|
||||
err = p.Configure(config)
|
||||
err = p.Configure(raws)
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
// Test when the env variable value contains an equals sign
|
||||
config["environment_vars"] = []string{"good=withequals=true"}
|
||||
raws["environment_vars"] = []string{"good=withequals=true"}
|
||||
p = new(PostProcessor)
|
||||
err = p.Configure(config)
|
||||
err = p.Configure(raws)
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
// Test when the env variable value starts with an equals sign
|
||||
config["environment_vars"] = []string{"good==true"}
|
||||
raws["environment_vars"] = []string{"good==true"}
|
||||
p = new(PostProcessor)
|
||||
err = p.Configure(config)
|
||||
err = p.Configure(raws)
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessor_createFlattenedEnvVars(t *testing.T) {
|
||||
var flattenedEnvVars string
|
||||
config := testConfig()
|
||||
|
||||
userEnvVarTests := [][]string{
|
||||
{}, // No user env var
|
||||
{"FOO=bar"}, // Single user env var
|
||||
{"FOO=bar's"}, // User env var with single quote in value
|
||||
{"FOO=bar", "BAZ=qux"}, // Multiple user env vars
|
||||
{"FOO=bar=baz"}, // User env var with value containing equals
|
||||
{"FOO==bar"}, // User env var with value starting with equals
|
||||
}
|
||||
expected := []string{
|
||||
`PACKER_BUILDER_TYPE='iso' PACKER_BUILD_NAME='vmware' `,
|
||||
`FOO='bar' PACKER_BUILDER_TYPE='iso' PACKER_BUILD_NAME='vmware' `,
|
||||
`FOO='bar'"'"'s' PACKER_BUILDER_TYPE='iso' PACKER_BUILD_NAME='vmware' `,
|
||||
`BAZ='qux' FOO='bar' PACKER_BUILDER_TYPE='iso' PACKER_BUILD_NAME='vmware' `,
|
||||
`FOO='bar=baz' PACKER_BUILDER_TYPE='iso' PACKER_BUILD_NAME='vmware' `,
|
||||
`FOO='=bar' PACKER_BUILDER_TYPE='iso' PACKER_BUILD_NAME='vmware' `,
|
||||
}
|
||||
|
||||
p := new(PostProcessor)
|
||||
p.Configure(config)
|
||||
|
||||
// Defaults provided by Packer
|
||||
p.config.PackerBuildName = "vmware"
|
||||
p.config.PackerBuilderType = "iso"
|
||||
|
||||
for i, expectedValue := range expected {
|
||||
p.config.Vars = userEnvVarTests[i]
|
||||
flattenedEnvVars = p.createFlattenedEnvVars()
|
||||
if flattenedEnvVars != expectedValue {
|
||||
t.Fatalf("expected flattened env vars to be: %s, got %s.", expectedValue, flattenedEnvVars)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -133,7 +133,15 @@ func DecompressOva(dir, src string) error {
|
|||
if hdr == nil || err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We use the fileinfo to get the file name because we are not
|
||||
// expecting path information as from the tar header. It's important
|
||||
// that we not use the path name from the tar header without checking
|
||||
// for the presence of `..`. If we accidentally allow for that, we can
|
||||
// open ourselves up to a path traversal vulnerability.
|
||||
info := hdr.FileInfo()
|
||||
|
||||
// Shouldn't be any directories, skip them
|
||||
|
|
|
@ -1,9 +1,27 @@
|
|||
package vagrant
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestVBoxProvider_impl(t *testing.T) {
|
||||
var _ Provider = new(VBoxProvider)
|
||||
}
|
||||
|
||||
func TestDecomressOVA(t *testing.T) {
|
||||
td, err := ioutil.TempDir("", "pp-vagrant-virtualbox")
|
||||
assert.NoError(t, err)
|
||||
fixture := "../../common/test-fixtures/decompress-tar/outside_parent.tar"
|
||||
err = DecompressOva(td, fixture)
|
||||
assert.NoError(t, err)
|
||||
_, err = os.Stat(filepath.Join(filepath.Base(td), "demo.poc"))
|
||||
assert.Error(t, err)
|
||||
_, err = os.Stat(filepath.Join(td, "demo.poc"))
|
||||
assert.NoError(t, err)
|
||||
os.RemoveAll(td)
|
||||
}
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
package shell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
func TestCommunicator_impl(t *testing.T) {
|
||||
var _ packer.Communicator = new(Communicator)
|
||||
}
|
||||
|
||||
func TestCommunicator(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("windows not supported for this test")
|
||||
return
|
||||
}
|
||||
|
||||
c := &Communicator{
|
||||
ExecuteCommand: []string{"/bin/sh", "-c", "{{.Command}}"},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
cmd := &packer.RemoteCmd{
|
||||
Command: "echo foo",
|
||||
Stdout: &buf,
|
||||
}
|
||||
|
||||
if err := c.Start(cmd); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
cmd.Wait()
|
||||
|
||||
if cmd.ExitStatus != 0 {
|
||||
t.Fatalf("err bad exit status: %d", cmd.ExitStatus)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(buf.String()) != "foo" {
|
||||
t.Fatalf("bad: %s", buf.String())
|
||||
}
|
||||
}
|
|
@ -1,105 +1,32 @@
|
|||
package shell
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/hashicorp/packer/common"
|
||||
"github.com/hashicorp/packer/helper/config"
|
||||
sl "github.com/hashicorp/packer/common/shell-local"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/template/interpolate"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
|
||||
// Command is the command to execute
|
||||
Command string
|
||||
|
||||
// ExecuteCommand is the command used to execute the command.
|
||||
ExecuteCommand []string `mapstructure:"execute_command"`
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
type Provisioner struct {
|
||||
config Config
|
||||
config sl.Config
|
||||
}
|
||||
|
||||
func (p *Provisioner) Prepare(raws ...interface{}) error {
|
||||
err := config.Decode(&p.config, &config.DecodeOpts{
|
||||
Interpolate: true,
|
||||
InterpolateContext: &p.config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"execute_command",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
err := sl.Decode(&p.config, raws...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(p.config.ExecuteCommand) == 0 {
|
||||
if runtime.GOOS == "windows" {
|
||||
p.config.ExecuteCommand = []string{
|
||||
"cmd",
|
||||
"/C",
|
||||
"{{.Command}}",
|
||||
}
|
||||
} else {
|
||||
p.config.ExecuteCommand = []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"{{.Command}}",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var errs *packer.MultiError
|
||||
if p.config.Command == "" {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
errors.New("command must be specified"))
|
||||
}
|
||||
|
||||
if len(p.config.ExecuteCommand) == 0 {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
errors.New("execute_command must not be empty"))
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return errs
|
||||
err = sl.Validate(&p.config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provisioner) Provision(ui packer.Ui, _ packer.Communicator) error {
|
||||
// Make another communicator for local
|
||||
comm := &Communicator{
|
||||
Ctx: p.config.ctx,
|
||||
ExecuteCommand: p.config.ExecuteCommand,
|
||||
}
|
||||
|
||||
// Build the remote command
|
||||
cmd := &packer.RemoteCmd{Command: p.config.Command}
|
||||
|
||||
ui.Say(fmt.Sprintf(
|
||||
"Executing local command: %s",
|
||||
p.config.Command))
|
||||
if err := cmd.StartWithUi(comm, ui); err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error executing command: %s\n\n"+
|
||||
"Please see output above for more information.",
|
||||
p.config.Command)
|
||||
}
|
||||
if cmd.ExitStatus != 0 {
|
||||
return fmt.Errorf(
|
||||
"Erroneous exit code %d while executing command: %s\n\n"+
|
||||
"Please see output above for more information.",
|
||||
cmd.ExitStatus,
|
||||
p.config.Command)
|
||||
_, retErr := sl.Run(ui, &p.config)
|
||||
if retErr != nil {
|
||||
return retErr
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -56,8 +56,9 @@ This simple parsing example:
|
|||
is directly mapped to:
|
||||
|
||||
```go
|
||||
if token, err := request.ParseFromRequest(tokenString, request.OAuth2Extractor, req, keyLookupFunc); err == nil {
|
||||
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
|
||||
if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil {
|
||||
claims := token.Claims.(jwt.MapClaims)
|
||||
fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
|
||||
}
|
||||
```
|
||||
|
||||
|
|
|
@ -1,11 +1,15 @@
|
|||
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
|
||||
# jwt-go
|
||||
|
||||
[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go)
|
||||
[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go)
|
||||
|
||||
**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
|
||||
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
|
||||
|
||||
**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect.
|
||||
**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3.
|
||||
|
||||
**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail.
|
||||
|
||||
**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
|
||||
|
||||
## What the heck is a JWT?
|
||||
|
||||
|
@ -25,8 +29,8 @@ This library supports the parsing and verification as well as the generation and
|
|||
|
||||
See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
|
||||
|
||||
* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example_Parse_hmac)
|
||||
* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example_New_hmac)
|
||||
* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
|
||||
* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
|
||||
* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
|
||||
|
||||
## Extensions
|
||||
|
@ -37,7 +41,7 @@ Here's an example of an extension that integrates with the Google App Engine sig
|
|||
|
||||
## Compliance
|
||||
|
||||
This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
|
||||
This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
|
||||
|
||||
* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
|
||||
|
||||
|
@ -47,7 +51,10 @@ This library is considered production ready. Feedback and feature requests are
|
|||
|
||||
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
|
||||
|
||||
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning.
|
||||
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning.
|
||||
|
||||
**BREAKING CHANGES:***
|
||||
* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
|
||||
|
||||
## Usage Tips
|
||||
|
||||
|
@ -68,18 +75,26 @@ Symmetric signing methods, such as HSA, use only a single secret. This is probab
|
|||
|
||||
Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
|
||||
|
||||
### Signing Methods and Key Types
|
||||
|
||||
Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
|
||||
|
||||
* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
|
||||
* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
|
||||
* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
|
||||
|
||||
### JWT and OAuth
|
||||
|
||||
It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
|
||||
|
||||
Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
|
||||
|
||||
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
|
||||
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
|
||||
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
|
||||
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
|
||||
|
||||
|
||||
## More
|
||||
|
||||
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
|
||||
|
||||
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation.
|
||||
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
|
||||
|
|
|
@ -1,5 +1,18 @@
|
|||
## `jwt-go` Version History
|
||||
|
||||
#### 3.2.0
|
||||
|
||||
* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
|
||||
* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
|
||||
* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
|
||||
* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
|
||||
|
||||
#### 3.1.0
|
||||
|
||||
* Improvements to `jwt` command line tool
|
||||
* Added `SkipClaimsValidation` option to `Parser`
|
||||
* Documentation updates
|
||||
|
||||
#### 3.0.0
|
||||
|
||||
* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
|
||||
|
|
|
@ -14,6 +14,7 @@ var (
|
|||
)
|
||||
|
||||
// Implements the ECDSA family of signing methods signing methods
|
||||
// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
|
||||
type SigningMethodECDSA struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
|
|
|
@ -51,13 +51,9 @@ func (e ValidationError) Error() string {
|
|||
} else {
|
||||
return "token is invalid"
|
||||
}
|
||||
return e.Inner.Error()
|
||||
}
|
||||
|
||||
// No errors
|
||||
func (e *ValidationError) valid() bool {
|
||||
if e.Errors > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return e.Errors == 0
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
)
|
||||
|
||||
// Implements the HMAC-SHA family of signing methods signing methods
|
||||
// Expects key type of []byte for both signing and validation
|
||||
type SigningMethodHMAC struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
|
@ -90,5 +91,5 @@ func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string,
|
|||
return EncodeSegment(hasher.Sum(nil)), nil
|
||||
}
|
||||
|
||||
return "", ErrInvalidKey
|
||||
return "", ErrInvalidKeyType
|
||||
}
|
||||
|
|
|
@ -8,8 +8,9 @@ import (
|
|||
)
|
||||
|
||||
type Parser struct {
|
||||
ValidMethods []string // If populated, only these methods will be considered valid
|
||||
UseJSONNumber bool // Use JSON Number format in JSON decoder
|
||||
ValidMethods []string // If populated, only these methods will be considered valid
|
||||
UseJSONNumber bool // Use JSON Number format in JSON decoder
|
||||
SkipClaimsValidation bool // Skip claims validation during token parsing
|
||||
}
|
||||
|
||||
// Parse, validate, and return a token.
|
||||
|
@ -20,55 +21,9 @@ func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
|
|||
}
|
||||
|
||||
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
|
||||
parts := strings.Split(tokenString, ".")
|
||||
if len(parts) != 3 {
|
||||
return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
|
||||
}
|
||||
|
||||
var err error
|
||||
token := &Token{Raw: tokenString}
|
||||
|
||||
// parse Header
|
||||
var headerBytes []byte
|
||||
if headerBytes, err = DecodeSegment(parts[0]); err != nil {
|
||||
if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
|
||||
return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
|
||||
}
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
|
||||
// parse Claims
|
||||
var claimBytes []byte
|
||||
token.Claims = claims
|
||||
|
||||
if claimBytes, err = DecodeSegment(parts[1]); err != nil {
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
|
||||
if p.UseJSONNumber {
|
||||
dec.UseNumber()
|
||||
}
|
||||
// JSON Decode. Special case for map type to avoid weird pointer behavior
|
||||
if c, ok := token.Claims.(MapClaims); ok {
|
||||
err = dec.Decode(&c)
|
||||
} else {
|
||||
err = dec.Decode(&claims)
|
||||
}
|
||||
// Handle decode error
|
||||
token, parts, err := p.ParseUnverified(tokenString, claims)
|
||||
if err != nil {
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
|
||||
// Lookup signature method
|
||||
if method, ok := token.Header["alg"].(string); ok {
|
||||
if token.Method = GetSigningMethod(method); token.Method == nil {
|
||||
return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
|
||||
}
|
||||
} else {
|
||||
return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// Verify signing method is in the required set
|
||||
|
@ -95,20 +50,25 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
|
|||
}
|
||||
if key, err = keyFunc(token); err != nil {
|
||||
// keyFunc returned an error
|
||||
if ve, ok := err.(*ValidationError); ok {
|
||||
return token, ve
|
||||
}
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
|
||||
}
|
||||
|
||||
vErr := &ValidationError{}
|
||||
|
||||
// Validate Claims
|
||||
if err := token.Claims.Valid(); err != nil {
|
||||
if !p.SkipClaimsValidation {
|
||||
if err := token.Claims.Valid(); err != nil {
|
||||
|
||||
// If the Claims Valid returned an error, check if it is a validation error,
|
||||
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
|
||||
if e, ok := err.(*ValidationError); !ok {
|
||||
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
|
||||
} else {
|
||||
vErr = e
|
||||
// If the Claims Valid returned an error, check if it is a validation error,
|
||||
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
|
||||
if e, ok := err.(*ValidationError); !ok {
|
||||
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
|
||||
} else {
|
||||
vErr = e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -126,3 +86,63 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
|
|||
|
||||
return token, vErr
|
||||
}
|
||||
|
||||
// WARNING: Don't use this method unless you know what you're doing
|
||||
//
|
||||
// This method parses the token but doesn't validate the signature. It's only
|
||||
// ever useful in cases where you know the signature is valid (because it has
|
||||
// been checked previously in the stack) and you want to extract values from
|
||||
// it.
|
||||
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
|
||||
parts = strings.Split(tokenString, ".")
|
||||
if len(parts) != 3 {
|
||||
return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
|
||||
}
|
||||
|
||||
token = &Token{Raw: tokenString}
|
||||
|
||||
// parse Header
|
||||
var headerBytes []byte
|
||||
if headerBytes, err = DecodeSegment(parts[0]); err != nil {
|
||||
if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
|
||||
return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
|
||||
}
|
||||
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
|
||||
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
|
||||
// parse Claims
|
||||
var claimBytes []byte
|
||||
token.Claims = claims
|
||||
|
||||
if claimBytes, err = DecodeSegment(parts[1]); err != nil {
|
||||
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
|
||||
if p.UseJSONNumber {
|
||||
dec.UseNumber()
|
||||
}
|
||||
// JSON Decode. Special case for map type to avoid weird pointer behavior
|
||||
if c, ok := token.Claims.(MapClaims); ok {
|
||||
err = dec.Decode(&c)
|
||||
} else {
|
||||
err = dec.Decode(&claims)
|
||||
}
|
||||
// Handle decode error
|
||||
if err != nil {
|
||||
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
|
||||
// Lookup signature method
|
||||
if method, ok := token.Header["alg"].(string); ok {
|
||||
if token.Method = GetSigningMethod(method); token.Method == nil {
|
||||
return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
|
||||
}
|
||||
} else {
|
||||
return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
|
||||
}
|
||||
|
||||
return token, parts, nil
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
)
|
||||
|
||||
// Implements the RSA family of signing methods signing methods
|
||||
// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
|
||||
type SigningMethodRSA struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
|
@ -44,7 +45,7 @@ func (m *SigningMethodRSA) Alg() string {
|
|||
}
|
||||
|
||||
// Implements the Verify method from SigningMethod
|
||||
// For this signing method, must be an rsa.PublicKey structure.
|
||||
// For this signing method, must be an *rsa.PublicKey structure.
|
||||
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
|
||||
var err error
|
||||
|
||||
|
@ -73,7 +74,7 @@ func (m *SigningMethodRSA) Verify(signingString, signature string, key interface
|
|||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod
|
||||
// For this signing method, must be an rsa.PrivateKey structure.
|
||||
// For this signing method, must be an *rsa.PrivateKey structure.
|
||||
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
|
||||
var rsaKey *rsa.PrivateKey
|
||||
var ok bool
|
||||
|
|
|
@ -39,6 +39,38 @@ func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
|
|||
return pkey, nil
|
||||
}
|
||||
|
||||
// Parse PEM encoded PKCS1 or PKCS8 private key protected with password
|
||||
func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
var parsedKey interface{}
|
||||
|
||||
var blockDecrypted []byte
|
||||
if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
|
||||
if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var pkey *rsa.PrivateKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
|
||||
return nil, ErrNotRSAPrivateKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
||||
|
||||
// Parse PEM encoded PKCS1 or PKCS8 public key
|
||||
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
|
||||
var err error
|
||||
|
|
|
@ -152,10 +152,20 @@ func (c *Client) RunWithString(command string, stdin string) (string, string, in
|
|||
}
|
||||
|
||||
var outWriter, errWriter bytes.Buffer
|
||||
go io.Copy(&outWriter, cmd.Stdout)
|
||||
go io.Copy(&errWriter, cmd.Stderr)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
io.Copy(&outWriter, cmd.Stdout)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
io.Copy(&errWriter, cmd.Stderr)
|
||||
}()
|
||||
|
||||
cmd.Wait()
|
||||
wg.Wait()
|
||||
|
||||
return outWriter.String(), errWriter.String(), cmd.ExitCode(), cmd.err
|
||||
}
|
||||
|
@ -176,11 +186,24 @@ func (c Client) RunWithInput(command string, stdout, stderr io.Writer, stdin io.
|
|||
return 1, err
|
||||
}
|
||||
|
||||
go io.Copy(cmd.Stdin, stdin)
|
||||
go io.Copy(stdout, cmd.Stdout)
|
||||
go io.Copy(stderr, cmd.Stderr)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(3)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
io.Copy(cmd.Stdin, stdin)
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
io.Copy(stdout, cmd.Stdout)
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
io.Copy(stderr, cmd.Stderr)
|
||||
}()
|
||||
|
||||
cmd.Wait()
|
||||
wg.Wait()
|
||||
|
||||
return cmd.ExitCode(), cmd.err
|
||||
|
||||
|
|
|
@ -629,10 +629,11 @@
|
|||
"revisionTime": "2017-11-27T16:20:29Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "D37uI+U+FYvTJIdG2TTozXe7i7U=",
|
||||
"comment": "v3.0.0",
|
||||
"checksumSHA1": "4772zXrOaPVeDeSgdiV7Vp4KEjk=",
|
||||
"comment": "v3.2.0",
|
||||
"path": "github.com/dgrijalva/jwt-go",
|
||||
"revision": "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
|
||||
"revision": "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e",
|
||||
"revisionTime": "2018-03-08T23:13:08Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "W1LGm0UNirwMDVCMFv5vZrOpUJI=",
|
||||
|
@ -988,16 +989,16 @@
|
|||
"revision": "95ba30457eb1121fa27753627c774c7cd4e90083"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "8z5kCCFRsBkhXic9jxxeIV3bBn8=",
|
||||
"checksumSHA1": "dVQEUn5TxdIAXczK7rh6qUrq44Q=",
|
||||
"path": "github.com/masterzen/winrm",
|
||||
"revision": "a2df6b1315e6fd5885eb15c67ed259e85854125f",
|
||||
"revisionTime": "2017-08-14T13:39:27Z"
|
||||
"revision": "7e40f93ae939004a1ef3bd5ff5c88c756ee762bb",
|
||||
"revisionTime": "2018-02-24T16:03:50Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "XFSXma+KmkhkIPsh4dTd/eyja5s=",
|
||||
"path": "github.com/masterzen/winrm/soap",
|
||||
"revision": "a2df6b1315e6fd5885eb15c67ed259e85854125f",
|
||||
"revisionTime": "2017-08-14T13:39:27Z"
|
||||
"revision": "7e40f93ae939004a1ef3bd5ff5c88c756ee762bb",
|
||||
"revisionTime": "2018-02-24T16:03:50Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "NkbetqlpWBi3gP08JDneC+axTKw=",
|
||||
|
|
|
@ -213,8 +213,10 @@ each category, the available configuration keys are alphabetized.
|
|||
where the `.Device` variable is replaced with the name of the device where
|
||||
the volume is attached.
|
||||
|
||||
- `mount_partition` (number) - The partition number containing the
|
||||
/ partition. By default this is the first partition of the volume.
|
||||
- `mount_partition` (string) - The partition number containing the
|
||||
/ partition. By default this is the first partition of the volume, (for
|
||||
example, `xvda1`) but you can designate the entire block device by setting
|
||||
`"mount_partition": "0"` in your config, which will mount `xvda` instead.
|
||||
|
||||
- `mount_options` (array of strings) - Options to supply the `mount` command
|
||||
when mounting devices. Each option will be prefixed with `-o` and supplied
|
||||
|
@ -223,6 +225,14 @@ each category, the available configuration keys are alphabetized.
|
|||
command](http://linuxcommand.org/man_pages/mount8.html) for valid file
|
||||
system specific options
|
||||
|
||||
- `nvme_device_path` (string) - When we call the mount command (by default
|
||||
`mount -o device dir`), the string provided in `nvme_mount_path` will
|
||||
replace `device` in that command. When this option is not set, `device` in
|
||||
that command will be something like `/dev/sdf1`, mirroring the attached
|
||||
device name. This assumption works for most instances but will fail with c5
|
||||
and m5 instances. In order to use the chroot builder with c5 and m5
|
||||
instances, you must manually set `nvme_device_path` and `device_path`.
|
||||
|
||||
- `pre_mount_commands` (array of strings) - A series of commands to execute
|
||||
after attaching the root volume and before mounting the chroot. This is not
|
||||
required unless using `from_scratch`. If so, this should include any
|
||||
|
@ -291,9 +301,9 @@ each category, the available configuration keys are alphabetized.
|
|||
This is most useful for selecting a daily distro build.
|
||||
|
||||
You may set this in place of `source_ami` or in conjunction with it. If you
|
||||
set this in conjunction with `source_ami`, the `source_ami` will be added to
|
||||
set this in conjunction with `source_ami`, the `source_ami` will be added to
|
||||
the filter. The provided `source_ami` must meet all of the filtering criteria
|
||||
provided in `source_ami_filter`; this pins the AMI returned by the filter,
|
||||
provided in `source_ami_filter`; this pins the AMI returned by the filter,
|
||||
but will cause Packer to fail if the `source_ami` does not exist.
|
||||
|
||||
- `sriov_support` (boolean) - Enable enhanced networking (SriovNetSupport but not ENA)
|
||||
|
@ -368,6 +378,7 @@ its internals such as finding an available device.
|
|||
|
||||
## Gotchas
|
||||
|
||||
### Unmounting the Filesystem
|
||||
One of the difficulties with using the chroot builder is that your provisioning
|
||||
scripts must not leave any processes running or packer will be unable to unmount
|
||||
the filesystem.
|
||||
|
@ -397,6 +408,53 @@ services:
|
|||
}
|
||||
```
|
||||
|
||||
### Using Instances with NVMe block devices.
|
||||
In C5, C5d, M5, and i3.metal instances, EBS volumes are exposed as NVMe block
|
||||
devices [reference](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html).
|
||||
In order to correctly mount these devices, you have to do some extra legwork,
|
||||
involving the `nvme_device_path` option above. Read that for more information.
|
||||
|
||||
A working example for mounting an NVMe device is below:
|
||||
|
||||
```
|
||||
{
|
||||
"variables": {
|
||||
"region" : "us-east-2"
|
||||
},
|
||||
"builders": [
|
||||
{
|
||||
"type": "amazon-chroot",
|
||||
"region": "{{user `region`}}",
|
||||
"source_ami_filter": {
|
||||
"filters": {
|
||||
"virtualization-type": "hvm",
|
||||
"name": "amzn-ami-hvm-*",
|
||||
"root-device-type": "ebs"
|
||||
},
|
||||
"owners": ["137112412989"],
|
||||
"most_recent": true
|
||||
},
|
||||
"ena_support": true,
|
||||
"ami_name": "amazon-chroot-test-{{timestamp}}",
|
||||
"nvme_device_path": "/dev/nvme1n1p",
|
||||
"device_path": "/dev/sdf"
|
||||
}
|
||||
],
|
||||
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell",
|
||||
"inline": ["echo Test > /tmp/test.txt"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Note that in the `nvme_device_path` you must end with the `p`; if you try to
|
||||
define the partition in this path (e.g. "nvme_device_path": `/dev/nvme1n1p1`)
|
||||
and haven't also set the `"mount_partition": 0`, a `1` will be appended to the
|
||||
`nvme_device_path` and Packer will fail.
|
||||
|
||||
## Building From Scratch
|
||||
|
||||
This example demonstrates the essentials of building an image from scratch. A
|
||||
|
|
|
@ -169,6 +169,30 @@ builder.
|
|||
Note: you must make sure enhanced networking is enabled on your instance. See [Amazon's
|
||||
documentation on enabling enhanced networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking). Default `false`.
|
||||
|
||||
- `enable_t2_unlimited` (boolean) - Enabling T2 Unlimited allows the source
|
||||
instance to burst additional CPU beyond its available [CPU Credits]
|
||||
(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html)
|
||||
for as long as the demand exists.
|
||||
This is in contrast to the standard configuration that only allows an
|
||||
instance to consume up to its available CPU Credits.
|
||||
See the AWS documentation for [T2 Unlimited]
|
||||
(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html)
|
||||
and the 'T2 Unlimited Pricing' section of the [Amazon EC2 On-Demand
|
||||
Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for more
|
||||
information.
|
||||
By default this option is disabled and Packer will set up a [T2
|
||||
Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html)
|
||||
instance instead.
|
||||
|
||||
To use T2 Unlimited you must use a T2 instance type e.g. t2.micro.
|
||||
Additionally, T2 Unlimited cannot be used in conjunction with Spot
|
||||
Instances e.g. when the `spot_price` option has been configured.
|
||||
Attempting to do so will cause an error.
|
||||
|
||||
!> **Warning!** Additional costs may be incurred by enabling T2
|
||||
Unlimited - even for instances that would usually qualify for the
|
||||
[AWS Free Tier](https://aws.amazon.com/free/).
|
||||
|
||||
- `force_deregister` (boolean) - Force Packer to first deregister an existing
|
||||
AMI if one with the same name already exists. Default `false`.
|
||||
|
||||
|
|
|
@ -162,6 +162,30 @@ builder.
|
|||
Note: you must make sure enhanced networking is enabled on your instance. See [Amazon's
|
||||
documentation on enabling enhanced networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking). Default `false`.
|
||||
|
||||
- `enable_t2_unlimited` (boolean) - Enabling T2 Unlimited allows the source
|
||||
instance to burst additional CPU beyond its available [CPU Credits]
|
||||
(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html)
|
||||
for as long as the demand exists.
|
||||
This is in contrast to the standard configuration that only allows an
|
||||
instance to consume up to its available CPU Credits.
|
||||
See the AWS documentation for [T2 Unlimited]
|
||||
(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html)
|
||||
and the 'T2 Unlimited Pricing' section of the [Amazon EC2 On-Demand
|
||||
Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for more
|
||||
information.
|
||||
By default this option is disabled and Packer will set up a [T2
|
||||
Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html)
|
||||
instance instead.
|
||||
|
||||
To use T2 Unlimited you must use a T2 instance type e.g. t2.micro.
|
||||
Additionally, T2 Unlimited cannot be used in conjunction with Spot
|
||||
Instances e.g. when the `spot_price` option has been configured.
|
||||
Attempting to do so will cause an error.
|
||||
|
||||
!> **Warning!** Additional costs may be incurred by enabling T2
|
||||
Unlimited - even for instances that would usually qualify for the
|
||||
[AWS Free Tier](https://aws.amazon.com/free/).
|
||||
|
||||
- `force_deregister` (boolean) - Force Packer to first deregister an existing
|
||||
AMI if one with the same name already exists. Default `false`.
|
||||
|
||||
|
|
|
@ -120,6 +120,30 @@ builder.
|
|||
Note: you must make sure enhanced networking is enabled on your instance. See [Amazon's
|
||||
documentation on enabling enhanced networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking). Default `false`.
|
||||
|
||||
- `enable_t2_unlimited` (boolean) - Enabling T2 Unlimited allows the source
|
||||
instance to burst additional CPU beyond its available [CPU Credits]
|
||||
(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html)
|
||||
for as long as the demand exists.
|
||||
This is in contrast to the standard configuration that only allows an
|
||||
instance to consume up to its available CPU Credits.
|
||||
See the AWS documentation for [T2 Unlimited]
|
||||
(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html)
|
||||
and the 'T2 Unlimited Pricing' section of the [Amazon EC2 On-Demand
|
||||
Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for more
|
||||
information.
|
||||
By default this option is disabled and Packer will set up a [T2
|
||||
Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html)
|
||||
instance instead.
|
||||
|
||||
To use T2 Unlimited you must use a T2 instance type e.g. t2.micro.
|
||||
Additionally, T2 Unlimited cannot be used in conjunction with Spot
|
||||
Instances e.g. when the `spot_price` option has been configured.
|
||||
Attempting to do so will cause an error.
|
||||
|
||||
!> **Warning!** Additional costs may be incurred by enabling T2
|
||||
Unlimited - even for instances that would usually qualify for the
|
||||
[AWS Free Tier](https://aws.amazon.com/free/).
|
||||
|
||||
- `iam_instance_profile` (string) - The name of an [IAM instance
|
||||
profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html)
|
||||
to launch the EC2 instance with.
|
||||
|
|
|
@ -193,6 +193,30 @@ builder.
|
|||
Note: you must make sure enhanced networking is enabled on your instance. See [Amazon's
|
||||
documentation on enabling enhanced networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking). Default `false`.
|
||||
|
||||
- `enable_t2_unlimited` (boolean) - Enabling T2 Unlimited allows the source
|
||||
instance to burst additional CPU beyond its available [CPU Credits]
|
||||
(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html)
|
||||
for as long as the demand exists.
|
||||
This is in contrast to the standard configuration that only allows an
|
||||
instance to consume up to its available CPU Credits.
|
||||
See the AWS documentation for [T2 Unlimited]
|
||||
(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html)
|
||||
and the 'T2 Unlimited Pricing' section of the [Amazon EC2 On-Demand
|
||||
Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for more
|
||||
information.
|
||||
By default this option is disabled and Packer will set up a [T2
|
||||
Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html)
|
||||
instance instead.
|
||||
|
||||
To use T2 Unlimited you must use a T2 instance type e.g. t2.micro.
|
||||
Additionally, T2 Unlimited cannot be used in conjunction with Spot
|
||||
Instances e.g. when the `spot_price` option has been configured.
|
||||
Attempting to do so will cause an error.
|
||||
|
||||
!> **Warning!** Additional costs may be incurred by enabling T2
|
||||
Unlimited - even for instances that would usually qualify for the
|
||||
[AWS Free Tier](https://aws.amazon.com/free/).
|
||||
|
||||
- `force_deregister` (boolean) - Force Packer to first deregister an existing
|
||||
AMI if one with the same name already exists. Defaults to `false`.
|
||||
|
||||
|
|
|
@ -17,8 +17,6 @@ In order to build VMs in Azure Packer needs 6 configuration options to be specif
|
|||
|
||||
- `client_secret` - service principal secret / password
|
||||
|
||||
- `object_id` - service principal object id (OSType = Windows Only)
|
||||
|
||||
- `resource_group_name` - name of the resource group where your VHD(s) will be stored
|
||||
|
||||
- `storage_account` - name of the storage account where your VHD(s) will be stored
|
||||
|
@ -31,8 +29,7 @@ In order to get all of the items above, you will need a username and password fo
|
|||
|
||||
Device login is an alternative way to authorize in Azure Packer. Device login only requires you to know your
|
||||
Subscription ID. (Device login is only supported for Linux based VMs.) Device login is intended for those who are first
|
||||
time users, and just want to ''kick the tires.'' We recommend the SPN approach if you intend to automate Packer, or for
|
||||
deploying Windows VMs.
|
||||
time users, and just want to ''kick the tires.'' We recommend the SPN approach if you intend to automate Packer.
|
||||
|
||||
> Device login is for **interactive** builds, and SPN is **automated** builds.
|
||||
|
||||
|
@ -44,12 +41,13 @@ There are three pieces of information you must provide to enable device login mo
|
|||
|
||||
> Device login mode is enabled by not setting client\_id and client\_secret.
|
||||
|
||||
> Device login mode is for the Public and US Gov clouds only, and Linux VMs only.
|
||||
> Device login mode is for the Public and US Gov clouds only.
|
||||
|
||||
The device login flow asks that you open a web browser, navigate to <http://aka.ms/devicelogin>, and input the supplied
|
||||
code. This authorizes the Packer for Azure application to act on your behalf. An OAuth token will be created, and stored
|
||||
in the user's home directory (~/.azure/packer/oauth-TenantID.json). This token is used if the token file exists, and it
|
||||
is refreshed as necessary. The token file prevents the need to continually execute the device login flow.
|
||||
is refreshed as necessary. The token file prevents the need to continually execute the device login flow. Packer will ask
|
||||
for two device login auth, one for service management endpoint and another for accessing temp keyvault secrets that it creates.
|
||||
|
||||
## Install the Azure CLI
|
||||
|
||||
|
|
|
@ -140,11 +140,6 @@ Providing `temp_resource_group_name` or `location` in combination with `build_re
|
|||
account type for a managed image. Valid values are Standard_LRS
|
||||
and Premium\_LRS. The default is Standard\_LRS.
|
||||
|
||||
- `object_id` (string) Specify an OAuth Object ID to protect WinRM certificates
|
||||
created at runtime. This variable is required when creating images based on
|
||||
Windows; this variable is not used by non-Windows builds. See `Windows`
|
||||
behavior for `os_type`, below.
|
||||
|
||||
- `os_disk_size_gb` (number) Specify the size of the OS disk in GB (gigabytes). Values of zero or less than zero are
|
||||
ignored.
|
||||
|
||||
|
@ -220,6 +215,9 @@ Providing `temp_resource_group_name` or `location` in combination with `build_re
|
|||
|
||||
CLI example `azure vm sizes -l westus`
|
||||
|
||||
- `async_resourcegroup_delete` (boolean) If you want packer to delete the temporary resource group asynchronously set this value. It's a boolean value
|
||||
and defaults to false. **Important** Setting this true means that your builds are faster, however any failed deletes are not reported.
|
||||
|
||||
## Basic Example
|
||||
|
||||
Here is a basic example for Azure.
|
||||
|
@ -409,8 +407,6 @@ A Windows build requires two templates and two deployments. Unfortunately, the K
|
|||
the same time hence the need for two templates and deployments. The time required to deploy a KeyVault template is
|
||||
minimal, so overall impact is small.
|
||||
|
||||
> The KeyVault certificate is protected using the object\_id of the SPN. This is why Windows builds require object\_id,
|
||||
> and an SPN. The KeyVault is deleted when the resource group is deleted.
|
||||
|
||||
See the [examples/azure](https://github.com/hashicorp/packer/tree/master/examples/azure) folder in the packer project
|
||||
for more examples.
|
||||
|
|
|
@ -102,6 +102,11 @@ can be configured for this builder.
|
|||
- `differencing_disk` (boolean) - If true enables differencing disks. Only the changes will be written to the new disk. This is especially useful if your
|
||||
source is a vhd/vhdx. This defaults to false.
|
||||
|
||||
- `headless` (boolean) - Packer defaults to building Hyper-V virtual
|
||||
machines by launching a GUI that shows the console of the machine
|
||||
being built. When this value is set to true, the machine will start without
|
||||
a console.
|
||||
|
||||
- `skip_export` (boolean) - If true skips VM export. If you are interested only in the vhd/vhdx files, you can enable this option. This will create
|
||||
inline disks which improves the build performance. There will not be any copying of source vhds to temp directory. This defaults to false.
|
||||
|
||||
|
|
|
@ -139,6 +139,11 @@ can be configured for this builder.
|
|||
- `guest_additions_path` (string) - The path to the iso image for guest
|
||||
additions.
|
||||
|
||||
- `headless` (boolean) - Packer defaults to building Hyper-V virtual
|
||||
machines by launching a GUI that shows the console of the machine
|
||||
being built. When this value is set to true, the machine will start without
|
||||
a console.
|
||||
|
||||
- `http_directory` (string) - Path to a directory to serve using an HTTP
|
||||
server. The files in this directory will be available over HTTP that will
|
||||
be requestable from the virtual machine. This is useful for hosting
|
||||
|
|
|
@ -335,7 +335,10 @@ all typed in sequence. It is an array only to improve readability within the
|
|||
template.
|
||||
|
||||
The boot command is sent to the VM through the `VBoxManage` utility in as few
|
||||
invocations as possible.
|
||||
invocations as possible. We send each character in groups of 25, with a default
|
||||
delay of 100ms between groups. The delay alleviates issues with latency and CPU
|
||||
contention. If you notice missing keys, you can tune this delay by specifying e.g.
|
||||
`PACKER_KEY_INTERVAL=500ms` to wait longer between each group of characters.
|
||||
|
||||
<%= partial "partials/builders/boot-command" %>
|
||||
|
||||
|
|
|
@ -298,7 +298,10 @@ all typed in sequence. It is an array only to improve readability within the
|
|||
template.
|
||||
|
||||
The boot command is sent to the VM through the `VBoxManage` utility in as few
|
||||
invocations as possible.
|
||||
invocations as possible. We send each character in groups of 25, with a default
|
||||
delay of 100ms between groups. The delay alleviates issues with latency and CPU
|
||||
contention. If you notice missing keys, you can tune this delay by specifying e.g.
|
||||
`PACKER_KEY_INTERVAL=500ms` to wait longer between each group of characters.
|
||||
|
||||
<%= partial "partials/builders/boot-command" %>
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ Type: `shell-local`
|
|||
|
||||
The local shell post processor executes scripts locally during the post
|
||||
processing stage. Shell local provides a convenient way to automate executing
|
||||
some task with the packer outputs.
|
||||
some task with packer outputs and variables.
|
||||
|
||||
## Basic example
|
||||
|
||||
|
@ -33,6 +33,9 @@ required element is either "inline" or "script". Every other option is optional.
|
|||
|
||||
Exactly *one* of the following is required:
|
||||
|
||||
- `command` (string) - This is a single command to execute. It will be written
|
||||
to a temporary file and run using the `execute_command` call below.
|
||||
|
||||
- `inline` (array of strings) - This is an array of commands to execute. The
|
||||
commands are concatenated by newlines and turned into a single file, so they
|
||||
are all executed within the same context. This allows you to change
|
||||
|
@ -52,15 +55,34 @@ Exactly *one* of the following is required:
|
|||
Optional parameters:
|
||||
|
||||
- `environment_vars` (array of strings) - An array of key/value pairs to
|
||||
inject prior to the execute\_command. The format should be `key=value`.
|
||||
inject prior to the `execute_command`. The format should be `key=value`.
|
||||
Packer injects some environmental variables by default into the environment,
|
||||
as well, which are covered in the section below.
|
||||
|
||||
- `execute_command` (string) - The command to use to execute the script. By
|
||||
default this is `chmod +x "{{.Script}}"; {{.Vars}} "{{.Script}}"`.
|
||||
The value of this is treated as [template engine](/docs/templates/engine.html).
|
||||
- `execute_command` (array of strings) - The command used to execute the script. By
|
||||
default this is `["/bin/sh", "-c", "{{.Vars}}, "{{.Script}}"]`
|
||||
on unix and `["cmd", "/c", "{{.Vars}}", "{{.Script}}"]` on windows.
|
||||
This is treated as a [template engine](/docs/templates/engine.html).
|
||||
There are two available variables: `Script`, which is the path to the script
|
||||
to run, `Vars`, which is the list of `environment_vars`, if configured.
|
||||
to run, and `Vars`, which is the list of `environment_vars`, if configured.
|
||||
If you choose to set this option, make sure that the first element in the
|
||||
array is the shell program you want to use (for example, "sh" or
|
||||
"/usr/local/bin/zsh" or even "powershell.exe" although anything other than
|
||||
a flavor of the shell command language is not explicitly supported and may
|
||||
be broken by assumptions made within Packer). It's worth noting that if you
|
||||
choose to try to use shell-local for Powershell or other Windows commands,
|
||||
the environment variables will not be set properly for your environment.
|
||||
|
||||
For backwards compatibility, `execute_command` will accept a string instead
|
||||
of an array of strings. If a single string or an array of strings with only
|
||||
one element is provided, Packer will replicate past behavior by appending
|
||||
your `execute_command` to the array of strings `["sh", "-c"]`. For example,
|
||||
if you set `"execute_command": "foo bar"`, the final `execute_command` that
|
||||
Packer runs will be ["sh", "-c", "foo bar"]. If you set `"execute_command": ["foo", "bar"]`,
|
||||
the final execute_command will remain `["foo", "bar"]`.
|
||||
|
||||
Again, the above is only provided as a backwards compatibility fix; we
|
||||
strongly recommend that you set execute_command as an array of strings.
|
||||
|
||||
- `inline_shebang` (string) - The
|
||||
[shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when
|
||||
|
@ -69,13 +91,72 @@ Optional parameters:
|
|||
**Important:** If you customize this, be sure to include something like the
|
||||
`-e` flag, otherwise individual steps failing won't fail the provisioner.
|
||||
|
||||
## Execute Command Example
|
||||
- `use_linux_pathing` (bool) - This is only relevant to windows hosts. If you
|
||||
are running Packer in a Windows environment with the Windows Subsystem for
|
||||
Linux feature enabled, and would like to invoke a bash script rather than
|
||||
invoking a Cmd script, you'll need to set this flag to true; it tells Packer
|
||||
to use the linux subsystem path for your script rather than the Windows path.
|
||||
(e.g. /mnt/c/path/to/your/file instead of C:/path/to/your/file). Please see
|
||||
the example below for more guidance on how to use this feature. If you are
|
||||
not on a Windows host, or you do not intend to use the shell-local
|
||||
post-processor to run a bash script, please ignore this option.
|
||||
If you set this flag to true, you still need to provide the standard windows
|
||||
path to the script when providing a `script`. This is a beta feature.
|
||||
|
||||
## Execute Command
|
||||
|
||||
To many new users, the `execute_command` is puzzling. However, it provides an
|
||||
important function: customization of how the command is executed. The most
|
||||
common use case for this is dealing with **sudo password prompts**. You may also
|
||||
need to customize this if you use a non-POSIX shell, such as `tcsh` on FreeBSD.
|
||||
|
||||
### The Windows Linux Subsystem
|
||||
|
||||
The shell-local post-processor was designed with the idea of allowing you to run
|
||||
commands in your local operating system's native shell. For Windows, we've
|
||||
assumed in our defaults that this is Cmd. However, it is possible to run a
|
||||
bash script as part of the Windows Linux Subsystem from the shell-local
|
||||
post-processor, by modifying the `execute_command` and the `use_linux_pathing`
|
||||
options in the post-processor config.
|
||||
|
||||
The example below is a fully functional test config.
|
||||
|
||||
One limitation of this offering is that "inline" and "command" options are not
|
||||
available to you; please limit yourself to using the "script" or "scripts"
|
||||
options instead.
|
||||
|
||||
Please note that this feature is still in beta, as the underlying WSL is also
|
||||
still in beta. There will be some limitations as a result. For example, it will
|
||||
likely not work unless both Packer and the scripts you want to run are both on
|
||||
the C drive.
|
||||
|
||||
```
|
||||
{
|
||||
"builders": [
|
||||
{
|
||||
"type": "null",
|
||||
"communicator": "none"
|
||||
}
|
||||
],
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["PROVISIONERTEST=ProvisionerTest1"],
|
||||
"execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"],
|
||||
"use_linux_pathing": true,
|
||||
"scripts": ["C:/Users/me/scripts/example_bash.sh"]
|
||||
},
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["PROVISIONERTEST=ProvisionerTest2"],
|
||||
"execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"],
|
||||
"use_linux_pathing": true,
|
||||
"script": "C:/Users/me/scripts/example_bash.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Default Environmental Variables
|
||||
|
||||
In addition to being able to specify custom environmental variables using the
|
||||
|
@ -150,3 +231,106 @@ are cleaned up.
|
|||
|
||||
For a shell script, that means the script **must** exit with a zero code. You
|
||||
*must* be extra careful to `exit 0` when necessary.
|
||||
|
||||
|
||||
## Usage Examples:
|
||||
|
||||
Example of running a .cmd file on windows:
|
||||
|
||||
```
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["SHELLLOCALTEST=ShellTest1"],
|
||||
"scripts": ["./scripts/test_cmd.cmd"]
|
||||
},
|
||||
```
|
||||
|
||||
Contents of "test_cmd.cmd":
|
||||
|
||||
```
|
||||
echo %SHELLLOCALTEST%
|
||||
```
|
||||
|
||||
Example of running an inline command on windows:
|
||||
Required customization: tempfile_extension
|
||||
|
||||
```
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["SHELLLOCALTEST=ShellTest2"],
|
||||
"tempfile_extension": ".cmd",
|
||||
"inline": ["echo %SHELLLOCALTEST%"]
|
||||
},
|
||||
```
|
||||
|
||||
Example of running a bash command on windows using WSL:
|
||||
Required customizations: use_linux_pathing and execute_command
|
||||
|
||||
```
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["SHELLLOCALTEST=ShellTest3"],
|
||||
"execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"],
|
||||
"use_linux_pathing": true,
|
||||
"script": "./scripts/example_bash.sh"
|
||||
}
|
||||
```
|
||||
|
||||
Contents of "example_bash.sh":
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
echo $SHELLLOCALTEST
|
||||
```
|
||||
|
||||
Example of running a powershell script on windows:
|
||||
Required customizations: env_var_format and execute_command
|
||||
|
||||
```
|
||||
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["SHELLLOCALTEST=ShellTest4"],
|
||||
"execute_command": ["powershell.exe", "{{.Vars}} {{.Script}}"],
|
||||
"env_var_format": "$env:%s=\"%s\"; ",
|
||||
"script": "./scripts/example_ps.ps1"
|
||||
}
|
||||
```
|
||||
|
||||
Example of running a powershell script on windows as "inline":
|
||||
Required customizations: env_var_format, tempfile_extension, and execute_command
|
||||
|
||||
```
|
||||
{
|
||||
"type": "shell-local",
|
||||
"tempfile_extension": ".ps1",
|
||||
"environment_vars": ["SHELLLOCALTEST=ShellTest5"],
|
||||
"execute_command": ["powershell.exe", "{{.Vars}} {{.Script}}"],
|
||||
"env_var_format": "$env:%s=\"%s\"; ",
|
||||
"inline": ["write-output $env:SHELLLOCALTEST"]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Example of running a bash script on linux:
|
||||
|
||||
```
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["PROVISIONERTEST=ProvisionerTest1"],
|
||||
"scripts": ["./scripts/example_bash.sh"]
|
||||
}
|
||||
```
|
||||
|
||||
Example of running a bash "inline" on linux:
|
||||
|
||||
```
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["PROVISIONERTEST=ProvisionerTest2"],
|
||||
"inline": ["echo hello",
|
||||
"echo $PROVISIONERTEST"]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
|
|
@ -37,10 +37,26 @@ The example below is fully functional.
|
|||
The reference of available configuration options is listed below. The only
|
||||
required element is "command".
|
||||
|
||||
Required:
|
||||
Exactly *one* of the following is required:
|
||||
|
||||
- `command` (string) - The command to execute. This will be executed within
|
||||
the context of a shell as specified by `execute_command`.
|
||||
- `command` (string) - This is a single command to execute. It will be written
|
||||
to a temporary file and run using the `execute_command` call below.
|
||||
|
||||
- `inline` (array of strings) - This is an array of commands to execute. The
|
||||
commands are concatenated by newlines and turned into a single file, so they
|
||||
are all executed within the same context. This allows you to change
|
||||
directories in one command and use something in the directory in the next
|
||||
and so on. Inline scripts are the easiest way to pull off simple tasks
|
||||
within the machine.
|
||||
|
||||
- `script` (string) - The path to a script to execute. This path can be
|
||||
absolute or relative. If it is relative, it is relative to the working
|
||||
directory when Packer is executed.
|
||||
|
||||
- `scripts` (array of strings) - An array of scripts to execute. The scripts
|
||||
will be executed in the order specified. Each script is executed in
|
||||
isolation, so state such as variables from one script won't carry on to the
|
||||
next.
|
||||
|
||||
Optional parameters:
|
||||
|
||||
|
@ -50,3 +66,235 @@ Optional parameters:
|
|||
treated as [configuration
|
||||
template](/docs/templates/engine.html). The only available
|
||||
variable is `Command` which is the command to execute.
|
||||
|
||||
- `environment_vars` (array of strings) - An array of key/value pairs to
|
||||
inject prior to the `execute_command`. The format should be `key=value`.
|
||||
Packer injects some environmental variables by default into the environment,
|
||||
as well, which are covered in the section below.
|
||||
|
||||
- `execute_command` (array of strings) - The command used to execute the script.
|
||||
By default this is `["/bin/sh", "-c", "{{.Vars}}, "{{.Script}}"]`
|
||||
on unix and `["cmd", "/c", "{{.Vars}}", "{{.Script}}"]` on windows.
|
||||
This is treated as a [template engine](/docs/templates/engine.html).
|
||||
There are two available variables: `Script`, which is the path to the script
|
||||
to run, and `Vars`, which is the list of `environment_vars`, if configured
|
||||
|
||||
If you choose to set this option, make sure that the first element in the
|
||||
array is the shell program you want to use (for example, "sh"), and a later
|
||||
element in the array must be `{{.Script}}`.
|
||||
|
||||
This option provides you a great deal of flexibility. You may choose to
|
||||
provide your own shell program, for example "/usr/local/bin/zsh" or even
|
||||
"powershell.exe". However, with great power comes great responsibility -
|
||||
these commands are not officially supported and things like environment
|
||||
variables may not work if you use a different shell than the default.
|
||||
|
||||
For backwards compatibility, you may also use {{.Command}}, but it is
|
||||
decoded the same way as {{.Script}}. We recommend using {{.Script}} for the
|
||||
sake of clarity, as even when you set only a single `command` to run,
|
||||
Packer writes it to a temporary file and then runs it as a script.
|
||||
|
||||
- `inline_shebang` (string) - The
|
||||
[shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when
|
||||
running commands specified by `inline`. By default, this is `/bin/sh -e`. If
|
||||
you're not using `inline`, then this configuration has no effect.
|
||||
**Important:** If you customize this, be sure to include something like the
|
||||
`-e` flag, otherwise individual steps failing won't fail the provisioner.
|
||||
|
||||
- `use_linux_pathing` (bool) - This is only relevant to windows hosts. If you
|
||||
are running Packer in a Windows environment with the Windows Subsystem for
|
||||
Linux feature enabled, and would like to invoke a bash script rather than
|
||||
invoking a Cmd script, you'll need to set this flag to true; it tells Packer
|
||||
to use the linux subsystem path for your script rather than the Windows path.
|
||||
(e.g. /mnt/c/path/to/your/file instead of C:/path/to/your/file). Please see
|
||||
the example below for more guidance on how to use this feature. If you are
|
||||
not on a Windows host, or you do not intend to use the shell-local
|
||||
provisioner to run a bash script, please ignore this option.
|
||||
|
||||
## Execute Command
|
||||
|
||||
To many new users, the `execute_command` is puzzling. However, it provides an
|
||||
important function: customization of how the command is executed. The most
|
||||
common use case for this is dealing with **sudo password prompts**. You may also
|
||||
need to customize this if you use a non-POSIX shell, such as `tcsh` on FreeBSD.
|
||||
|
||||
### The Windows Linux Subsystem
|
||||
|
||||
The shell-local provisioner was designed with the idea of allowing you to run
|
||||
commands in your local operating system's native shell. For Windows, we've
|
||||
assumed in our defaults that this is Cmd. However, it is possible to run a
|
||||
bash script as part of the Windows Linux Subsystem from the shell-local
|
||||
provisioner, by modifying the `execute_command` and the `use_linux_pathing`
|
||||
options in the provisioner config.
|
||||
|
||||
The example below is a fully functional test config.
|
||||
|
||||
One limitation of this offering is that "inline" and "command" options are not
|
||||
available to you; please limit yourself to using the "script" or "scripts"
|
||||
options instead.
|
||||
|
||||
Please note that the WSL is a beta feature, and this tool is not guaranteed to
|
||||
work as you expect it to.
|
||||
|
||||
```
|
||||
{
|
||||
"builders": [
|
||||
{
|
||||
"type": "null",
|
||||
"communicator": "none"
|
||||
}
|
||||
],
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["PROVISIONERTEST=ProvisionerTest1"],
|
||||
"execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"],
|
||||
"use_linux_pathing": true,
|
||||
"scripts": ["C:/Users/me/scripts/example_bash.sh"]
|
||||
},
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["PROVISIONERTEST=ProvisionerTest2"],
|
||||
"execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"],
|
||||
"use_linux_pathing": true,
|
||||
"script": "C:/Users/me/scripts/example_bash.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Default Environmental Variables
|
||||
|
||||
In addition to being able to specify custom environmental variables using the
|
||||
`environment_vars` configuration, the provisioner automatically defines certain
|
||||
commonly useful environmental variables:
|
||||
|
||||
- `PACKER_BUILD_NAME` is set to the name of the build that Packer is running.
|
||||
This is most useful when Packer is making multiple builds and you want to
|
||||
distinguish them slightly from a common provisioning script.
|
||||
|
||||
- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create the
|
||||
machine that the script is running on. This is useful if you want to run
|
||||
only certain parts of the script on systems built with certain builders.
|
||||
|
||||
## Safely Writing A Script
|
||||
|
||||
Whether you use the `inline` option, or pass it a direct `script` or `scripts`,
|
||||
it is important to understand a few things about how the shell-local
|
||||
provisioner works to run it safely and easily. This understanding will save
|
||||
you much time in the process.
|
||||
|
||||
### Once Per Builder
|
||||
|
||||
The `shell-local` script(s) you pass are run once per builder. That means that
|
||||
if you have an `amazon-ebs` builder and a `docker` builder, your script will be
|
||||
run twice. If you have 3 builders, it will run 3 times, once for each builder.
|
||||
|
||||
### Always Exit Intentionally
|
||||
|
||||
If any provisioner fails, the `packer build` stops and all interim artifacts
|
||||
are cleaned up.
|
||||
|
||||
For a shell script, that means the script **must** exit with a zero code. You
|
||||
*must* be extra careful to `exit 0` when necessary.
|
||||
|
||||
|
||||
## Usage Examples:
|
||||
|
||||
Example of running a .cmd file on windows:
|
||||
|
||||
```
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["SHELLLOCALTEST=ShellTest1"],
|
||||
"scripts": ["./scripts/test_cmd.cmd"]
|
||||
},
|
||||
```
|
||||
|
||||
Contents of "test_cmd.cmd":
|
||||
|
||||
```
|
||||
echo %SHELLLOCALTEST%
|
||||
```
|
||||
|
||||
Example of running an inline command on windows:
|
||||
Required customization: tempfile_extension
|
||||
|
||||
```
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["SHELLLOCALTEST=ShellTest2"],
|
||||
"tempfile_extension": ".cmd",
|
||||
"inline": ["echo %SHELLLOCALTEST%"]
|
||||
},
|
||||
```
|
||||
|
||||
Example of running a bash command on windows using WSL:
|
||||
Required customizations: use_linux_pathing and execute_command
|
||||
|
||||
```
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["SHELLLOCALTEST=ShellTest3"],
|
||||
"execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"],
|
||||
"use_linux_pathing": true,
|
||||
"script": "./scripts/example_bash.sh"
|
||||
}
|
||||
```
|
||||
|
||||
Contents of "example_bash.sh":
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
echo $SHELLLOCALTEST
|
||||
```
|
||||
|
||||
Example of running a powershell script on windows:
|
||||
Required customizations: env_var_format and execute_command
|
||||
|
||||
```
|
||||
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["SHELLLOCALTEST=ShellTest4"],
|
||||
"execute_command": ["powershell.exe", "{{.Vars}} {{.Script}}"],
|
||||
"env_var_format": "$env:%s=\"%s\"; ",
|
||||
}
|
||||
```
|
||||
|
||||
Example of running a powershell script on windows as "inline":
|
||||
Required customizations: env_var_format, tempfile_extension, and execute_command
|
||||
|
||||
```
|
||||
{
|
||||
"type": "shell-local",
|
||||
"tempfile_extension": ".ps1",
|
||||
"environment_vars": ["SHELLLOCALTEST=ShellTest5"],
|
||||
"execute_command": ["powershell.exe", "{{.Vars}} {{.Script}}"],
|
||||
"env_var_format": "$env:%s=\"%s\"; ",
|
||||
"inline": ["write-output $env:SHELLLOCALTEST"]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Example of running a bash script on linux:
|
||||
|
||||
```
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["PROVISIONERTEST=ProvisionerTest1"],
|
||||
"scripts": ["./scripts/dummy_bash.sh"]
|
||||
}
|
||||
```
|
||||
|
||||
Example of running a bash "inline" on linux:
|
||||
|
||||
```
|
||||
{
|
||||
"type": "shell-local",
|
||||
"environment_vars": ["PROVISIONERTEST=ProvisionerTest2"],
|
||||
"inline": ["echo hello",
|
||||
"echo $PROVISIONERTEST"]
|
||||
}
|
||||
```
|
||||
|
||||
|
|
Loading…
Reference in New Issue