Merge pull request #8185 from paulmey/azure-chroot

[azure] add Azure chroot builder
This commit is contained in:
Paul Meyer 2019-10-15 15:56:26 -07:00 committed by GitHub
commit ea7321c3ed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
66 changed files with 7112 additions and 286 deletions

View File

@ -168,6 +168,14 @@ type Config struct {
ctx interpolate.Context
}
func (c *Config) GetContext() interpolate.Context {
return c.ctx
}
type interpolateContextProvider interface {
GetContext() interpolate.Context
}
type wrappedCommandTemplate struct {
Command string
}
@ -392,8 +400,12 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack
&StepPostMountCommands{
Commands: b.config.PostMountCommands,
},
&StepMountExtra{},
&StepCopyFiles{},
&StepMountExtra{
ChrootMounts: b.config.ChrootMounts,
},
&StepCopyFiles{
Files: b.config.CopyFiles,
},
&StepChrootProvision{},
&StepEarlyCleanup{},
&StepSnapshot{},

View File

@ -17,20 +17,20 @@ import (
// copy_files_cleanup CleanupFunc - A function to clean up the copied files
// early.
type StepCopyFiles struct {
Files []string
files []string
}
func (s *StepCopyFiles) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
mountPath := state.Get("mount_path").(string)
ui := state.Get("ui").(packer.Ui)
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
stderr := new(bytes.Buffer)
s.files = make([]string, 0, len(config.CopyFiles))
if len(config.CopyFiles) > 0 {
s.files = make([]string, 0, len(s.Files))
if len(s.Files) > 0 {
ui.Say("Copying files from host to chroot...")
for _, path := range config.CopyFiles {
for _, path := range s.Files {
ui.Message(path)
chrootPath := filepath.Join(mountPath, path)
log.Printf("Copying '%s' to '%s'", path, chrootPath)

View File

@ -17,19 +17,19 @@ import (
// Produces:
// mount_extra_cleanup CleanupFunc - To perform early cleanup
type StepMountExtra struct {
mounts []string
ChrootMounts [][]string
mounts []string
}
func (s *StepMountExtra) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
mountPath := state.Get("mount_path").(string)
ui := state.Get("ui").(packer.Ui)
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
s.mounts = make([]string, 0, len(config.ChrootMounts))
s.mounts = make([]string, 0, len(s.ChrootMounts))
ui.Say("Mounting additional paths within the chroot...")
for _, mountInfo := range config.ChrootMounts {
for _, mountInfo := range s.ChrootMounts {
innerPath := mountPath + mountInfo[2]
if err := os.MkdirAll(innerPath, 0755); err != nil {

View File

@ -19,7 +19,7 @@ type StepPostMountCommands struct {
}
func (s *StepPostMountCommands) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
config := state.Get("config").(interpolateContextProvider)
device := state.Get("device").(string)
mountPath := state.Get("mount_path").(string)
ui := state.Get("ui").(packer.Ui)
@ -29,7 +29,7 @@ func (s *StepPostMountCommands) Run(ctx context.Context, state multistep.StateBa
return multistep.ActionContinue
}
ictx := config.ctx
ictx := config.GetContext()
ictx.Data = &postMountCommandsData{
Device: device,
MountPath: mountPath,

View File

@ -17,7 +17,7 @@ type StepPreMountCommands struct {
}
func (s *StepPreMountCommands) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
config := state.Get("config").(interpolateContextProvider)
device := state.Get("device").(string)
ui := state.Get("ui").(packer.Ui)
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
@ -26,7 +26,7 @@ func (s *StepPreMountCommands) Run(ctx context.Context, state multistep.StateBag
return multistep.ActionContinue
}
ictx := config.ctx
ictx := config.GetContext()
ictx.Data = &preMountCommandsData{Device: device}
ui.Say("Running device setup commands...")

View File

@ -0,0 +1,452 @@
//go:generate struct-markdown
// Package chroot is able to create an Azure managed image without requiring the
// launch of a new virtual machine for every build. It does this by attaching and
// mounting the root disk and chrooting into that directory.
// It then creates a managed image from that attached disk.
package chroot
import (
"context"
"errors"
"fmt"
"log"
"runtime"
"strings"
"github.com/hashicorp/packer/builder/amazon/chroot"
azcommon "github.com/hashicorp/packer/builder/azure/common"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
)
// BuilderId is the unique ID for this builder
const BuilderId = "azure.chroot"
// Config is the configuration that is chained through the steps and settable
// from the template.
type Config struct {
common.PackerConfig `mapstructure:",squash"`
ClientConfig client.Config `mapstructure:",squash"`
// When set to `true`, starts with an empty, unpartitioned disk. Defaults to `false`.
FromScratch bool `mapstructure:"from_scratch"`
// Either a managed disk resource ID or a publisher:offer:sku:version specifier for plaform image sources.
Source string `mapstructure:"source" required:"true"`
sourceType sourceType
// How to run shell commands. This may be useful to set environment variables or perhaps run
// a command with sudo or so on. This is a configuration template where the `.Command` variable
// is replaced with the command to be run. Defaults to `{{.Command}}`.
CommandWrapper string `mapstructure:"command_wrapper"`
// A series of commands to execute after attaching the root volume and before mounting the chroot.
// This is not required unless using `from_scratch`. If so, this should include any partitioning
// and filesystem creation commands. The path to the device is provided by `{{.Device}}`.
PreMountCommands []string `mapstructure:"pre_mount_commands"`
// Options to supply the `mount` command when mounting devices. Each option will be prefixed with
// `-o` and supplied to the `mount` command ran by Packer. Because this command is ran in a shell,
// user discretion is advised. See this manual page for the `mount` command for valid file system specific options.
MountOptions []string `mapstructure:"mount_options"`
// The partition number containing the / partition. By default this is the first partition of the volume.
MountPartition string `mapstructure:"mount_partition"`
// The path where the volume will be mounted. This is where the chroot environment will be. This defaults
// to `/mnt/packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration template where the `.Device`
// variable is replaced with the name of the device where the volume is attached.
MountPath string `mapstructure:"mount_path"`
// As `pre_mount_commands`, but the commands are executed after mounting the root device and before the
// extra mount and copy steps. The device and mount path are provided by `{{.Device}}` and `{{.MountPath}}`.
PostMountCommands []string `mapstructure:"post_mount_commands"`
// This is a list of devices to mount into the chroot environment. This configuration parameter requires
// some additional documentation which is in the "Chroot Mounts" section below. Please read that section
// for more information on how to use this.
ChrootMounts [][]string `mapstructure:"chroot_mounts"`
// Paths to files on the running Azure instance that will be copied into the chroot environment prior to
// provisioning. Defaults to `/etc/resolv.conf` so that DNS lookups work. Pass an empty list to skip copying
// `/etc/resolv.conf`. You may need to do this if you're building an image that uses systemd.
CopyFiles []string `mapstructure:"copy_files"`
// The name of the temporary disk that will be created in the resource group of the VM that Packer is
// running on. Will be generated if not set.
TemporaryOSDiskName string `mapstructure:"temporary_os_disk_name"`
// Try to resize the OS disk to this size on the first copy. Disks can only be englarged. If not specified,
// the disk will keep its original size. Required when using `from_scratch`
OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb"`
// The [storage SKU](https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#diskstorageaccounttypes)
// to use for the OS Disk. Defaults to `Standard_LRS`.
OSDiskStorageAccountType string `mapstructure:"os_disk_storage_account_type"`
// The [cache type](https://docs.microsoft.com/en-us/rest/api/compute/images/createorupdate#cachingtypes)
// specified in the resulting image and for attaching it to the Packer VM. Defaults to `ReadOnly`
OSDiskCacheType string `mapstructure:"os_disk_cache_type"`
// If set to `true`, leaves the temporary disk behind in the Packer VM resource group. Defaults to `false`
OSDiskSkipCleanup bool `mapstructure:"os_disk_skip_cleanup"`
// The image to create using this build.
ImageResourceID string `mapstructure:"image_resource_id" required:"true"`
// The [Hyper-V generation type](https://docs.microsoft.com/en-us/rest/api/compute/images/createorupdate#hypervgenerationtypes).
// Defaults to `V1`.
ImageHyperVGeneration string `mapstructure:"image_hyperv_generation"`
ctx interpolate.Context
}
type sourceType string
const (
sourcePlatformImage sourceType = "PlatformImage"
sourceDisk sourceType = "Disk"
)
// GetContext implements ContextProvider to allow steps to use the config context
// for template interpolation
func (c *Config) GetContext() interpolate.Context {
return c.ctx
}
type Builder struct {
config Config
runner multistep.Runner
}
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
b.config.ctx.Funcs = azcommon.TemplateFuncs
b.config.ctx.Funcs["vm"] = CreateVMMetadataTemplateFunc()
err := config.Decode(&b.config, &config.DecodeOpts{
Interpolate: true,
InterpolateContext: &b.config.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
// these fields are interpolated in the steps,
// when more information is available
"command_wrapper",
"post_mount_commands",
"pre_mount_commands",
"mount_path",
},
},
}, raws...)
if err != nil {
return nil, err
}
var errs *packer.MultiError
var warns []string
// Defaults
err = b.config.ClientConfig.SetDefaultValues()
if err != nil {
return nil, err
}
if b.config.ChrootMounts == nil {
b.config.ChrootMounts = make([][]string, 0)
}
if len(b.config.ChrootMounts) == 0 {
b.config.ChrootMounts = [][]string{
{"proc", "proc", "/proc"},
{"sysfs", "sysfs", "/sys"},
{"bind", "/dev", "/dev"},
{"devpts", "devpts", "/dev/pts"},
{"binfmt_misc", "binfmt_misc", "/proc/sys/fs/binfmt_misc"},
}
}
// set default copy file if we're not giving our own
if b.config.CopyFiles == nil {
if !b.config.FromScratch {
b.config.CopyFiles = []string{"/etc/resolv.conf"}
}
}
if b.config.CommandWrapper == "" {
b.config.CommandWrapper = "{{.Command}}"
}
if b.config.MountPath == "" {
b.config.MountPath = "/mnt/packer-azure-chroot-disks/{{.Device}}"
}
if b.config.MountPartition == "" {
b.config.MountPartition = "1"
}
if b.config.TemporaryOSDiskName == "" {
if def, err := interpolate.Render("PackerTemp-{{timestamp}}", &b.config.ctx); err == nil {
b.config.TemporaryOSDiskName = def
} else {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("unable to render temporary disk name: %s", err))
}
}
if b.config.OSDiskStorageAccountType == "" {
b.config.OSDiskStorageAccountType = string(compute.PremiumLRS)
}
if b.config.OSDiskCacheType == "" {
b.config.OSDiskCacheType = string(compute.CachingTypesReadOnly)
}
if b.config.ImageHyperVGeneration == "" {
b.config.ImageHyperVGeneration = string(compute.V1)
}
// checks, accumulate any errors or warnings
if b.config.FromScratch {
if b.config.Source != "" {
errs = packer.MultiErrorAppend(
errs, errors.New("source cannot be specified when building from_scratch"))
}
if b.config.OSDiskSizeGB == 0 {
errs = packer.MultiErrorAppend(
errs, errors.New("os_disk_size_gb is required with from_scratch"))
}
if len(b.config.PreMountCommands) == 0 {
errs = packer.MultiErrorAppend(
errs, errors.New("pre_mount_commands is required with from_scratch"))
}
} else {
if _, err := client.ParsePlatformImageURN(b.config.Source); err == nil {
log.Println("Source is platform image:", b.config.Source)
b.config.sourceType = sourcePlatformImage
} else if id, err := azure.ParseResourceID(b.config.Source); err == nil &&
strings.EqualFold(id.Provider, "Microsoft.Compute") && strings.EqualFold(id.ResourceType, "disks") {
log.Println("Source is a disk resource ID:", b.config.Source)
b.config.sourceType = sourceDisk
} else {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("source: %q is not a valid platform image specifier, nor is it a disk resource ID", b.config.Source))
}
}
if err := checkDiskCacheType(b.config.OSDiskCacheType); err != nil {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("os_disk_cache_type: %v", err))
}
if err := checkStorageAccountType(b.config.OSDiskStorageAccountType); err != nil {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("os_disk_storage_account_type: %v", err))
}
if b.config.ImageResourceID == "" {
errs = packer.MultiErrorAppend(errs, errors.New("image_resource_id is required"))
} else {
r, err := azure.ParseResourceID(b.config.ImageResourceID)
if err != nil ||
!strings.EqualFold(r.Provider, "Microsoft.Compute") ||
!strings.EqualFold(r.ResourceType, "images") {
errs = packer.MultiErrorAppend(fmt.Errorf(
"image_resource_id: %q is not a valid image resource id", b.config.ImageResourceID))
}
}
if err := checkHyperVGeneration(b.config.ImageHyperVGeneration); err != nil {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("image_hyperv_generation: %v", err))
}
if errs != nil {
return warns, errs
}
packer.LogSecretFilter.Set(b.config.ClientConfig.ClientSecret, b.config.ClientConfig.ClientJWT)
return warns, nil
}
func checkDiskCacheType(s string) interface{} {
for _, v := range compute.PossibleCachingTypesValues() {
if compute.CachingTypes(s) == v {
return nil
}
}
return fmt.Errorf("%q is not a valid value %v",
s, compute.PossibleCachingTypesValues())
}
func checkStorageAccountType(s string) interface{} {
for _, v := range compute.PossibleDiskStorageAccountTypesValues() {
if compute.DiskStorageAccountTypes(s) == v {
return nil
}
}
return fmt.Errorf("%q is not a valid value %v",
s, compute.PossibleDiskStorageAccountTypesValues())
}
func checkHyperVGeneration(s string) interface{} {
for _, v := range compute.PossibleHyperVGenerationValues() {
if compute.HyperVGeneration(s) == v {
return nil
}
}
return fmt.Errorf("%q is not a valid value %v",
s, compute.PossibleHyperVGenerationValues())
}
func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (packer.Artifact, error) {
if runtime.GOOS != "linux" {
return nil, errors.New("the azure-chroot builder only works on Linux environments")
}
err := b.config.ClientConfig.FillParameters()
if err != nil {
return nil, fmt.Errorf("error setting Azure client defaults: %v", err)
}
azcli, err := client.New(b.config.ClientConfig, ui.Say)
if err != nil {
return nil, fmt.Errorf("error creating Azure client: %v", err)
}
wrappedCommand := func(command string) (string, error) {
ictx := b.config.ctx
ictx.Data = &struct{ Command string }{Command: command}
return interpolate.Render(b.config.CommandWrapper, &ictx)
}
// Setup the state bag and initial state for the steps
state := new(multistep.BasicStateBag)
state.Put("config", &b.config)
state.Put("hook", hook)
state.Put("ui", ui)
state.Put("azureclient", azcli)
state.Put("wrappedCommand", chroot.CommandWrapper(wrappedCommand))
info, err := azcli.MetadataClient().GetComputeInfo()
if err != nil {
log.Printf("MetadataClient().GetComputeInfo(): error: %+v", err)
err := fmt.Errorf(
"Error retrieving information ARM resource ID and location" +
"of the VM that Packer is running on.\n" +
"Please verify that Packer is running on a proper Azure VM.")
ui.Error(err.Error())
return nil, err
}
state.Put("instance", info)
// Build the steps
var steps []multistep.Step
if b.config.FromScratch {
steps = append(steps,
&StepCreateNewDisk{
SubscriptionID: info.SubscriptionID,
ResourceGroup: info.ResourceGroupName,
DiskName: b.config.TemporaryOSDiskName,
DiskSizeGB: b.config.OSDiskSizeGB,
DiskStorageAccountType: b.config.OSDiskStorageAccountType,
HyperVGeneration: b.config.ImageHyperVGeneration,
Location: info.Location,
})
} else {
switch b.config.sourceType {
case sourcePlatformImage:
if pi, err := client.ParsePlatformImageURN(b.config.Source); err == nil {
if strings.EqualFold(pi.Version, "latest") {
vmi, err := azcli.VirtualMachineImagesClient().GetLatest(ctx, pi.Publisher, pi.Offer, pi.Sku, info.Location)
if err != nil {
return nil, fmt.Errorf("error retieving latest version of %q: %v", b.config.Source, err)
}
pi.Version = to.String(vmi.Name)
log.Println("Resolved latest version of source image:", pi.Version)
}
steps = append(steps,
&StepCreateNewDisk{
SubscriptionID: info.SubscriptionID,
ResourceGroup: info.ResourceGroupName,
DiskName: b.config.TemporaryOSDiskName,
DiskSizeGB: b.config.OSDiskSizeGB,
DiskStorageAccountType: b.config.OSDiskStorageAccountType,
HyperVGeneration: b.config.ImageHyperVGeneration,
Location: info.Location,
PlatformImage: pi,
SkipCleanup: b.config.OSDiskSkipCleanup,
})
} else {
panic("Unknown image source: " + b.config.Source)
}
case sourceDisk:
steps = append(steps,
&StepVerifySourceDisk{
SourceDiskResourceID: b.config.Source,
SubscriptionID: info.SubscriptionID,
Location: info.Location,
},
&StepCreateNewDisk{
SubscriptionID: info.SubscriptionID,
ResourceGroup: info.ResourceGroupName,
DiskName: b.config.TemporaryOSDiskName,
DiskSizeGB: b.config.OSDiskSizeGB,
DiskStorageAccountType: b.config.OSDiskStorageAccountType,
HyperVGeneration: b.config.ImageHyperVGeneration,
SourceDiskResourceID: b.config.Source,
Location: info.Location,
SkipCleanup: b.config.OSDiskSkipCleanup,
})
default:
panic(fmt.Errorf("Unknown source type: %+q", b.config.sourceType))
}
}
steps = append(steps,
&StepAttachDisk{}, // uses os_disk_resource_id and sets 'device' in stateBag
&chroot.StepPreMountCommands{
Commands: b.config.PreMountCommands,
},
&StepMountDevice{
MountOptions: b.config.MountOptions,
MountPartition: b.config.MountPartition,
MountPath: b.config.MountPath,
},
&chroot.StepPostMountCommands{
Commands: b.config.PostMountCommands,
},
&chroot.StepMountExtra{
ChrootMounts: b.config.ChrootMounts,
},
&chroot.StepCopyFiles{
Files: b.config.CopyFiles,
},
&chroot.StepChrootProvision{},
&chroot.StepEarlyCleanup{},
&StepCreateImage{
ImageResourceID: b.config.ImageResourceID,
ImageOSState: string(compute.Generalized),
OSDiskCacheType: b.config.OSDiskCacheType,
OSDiskStorageAccountType: b.config.OSDiskStorageAccountType,
Location: info.Location,
},
)
// Run!
b.runner = common.NewRunner(steps, b.config.PackerConfig, ui)
b.runner.Run(ctx, state)
// If there was an error, return that
if rawErr, ok := state.GetOk("error"); ok {
return nil, rawErr.(error)
}
// Build the artifact and return it
artifact := &azcommon.Artifact{
Resources: []string{b.config.ImageResourceID},
BuilderIdValue: BuilderId,
}
return artifact, nil
}
var _ packer.Builder = &Builder{}

View File

@ -0,0 +1,70 @@
package chroot
import (
"testing"
"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute"
)
func TestBuilder_Prepare(t *testing.T) {
type config map[string]interface{}
type regexMatchers map[string]string // map of regex : error message
tests := []struct {
name string
config config
validate func(Config)
wantErr bool
}{
{
name: "HappyPathFromPlatformImage",
config: config{
"client_id": "123",
"client_secret": "456",
"subscription_id": "789",
"image_resource_id": "/subscriptions/789/resourceGroups/otherrgname/providers/Microsoft.Compute/images/MyDebianOSImage-{{timestamp}}",
"source": "credativ:Debian:9:latest",
},
validate: func(c Config) {
if c.OSDiskSizeGB != 0 {
t.Errorf("Expected OSDiskSizeGB to be 0, was %+v", c.OSDiskSizeGB)
}
if c.MountPartition != "1" {
t.Errorf("Expected MountPartition to be %s, but found %s", "1", c.MountPartition)
}
if c.OSDiskStorageAccountType != string(compute.PremiumLRS) {
t.Errorf("Expected OSDiskStorageAccountType to be %s, but found %s", string(compute.PremiumLRS), c.OSDiskStorageAccountType)
}
if c.OSDiskCacheType != string(compute.CachingTypesReadOnly) {
t.Errorf("Expected OSDiskCacheType to be %s, but found %s", string(compute.CachingTypesReadOnly), c.OSDiskCacheType)
}
if c.ImageHyperVGeneration != string(compute.V1) {
t.Errorf("Expected ImageHyperVGeneration to be %s, but found %s", string(compute.V1), c.ImageHyperVGeneration)
}
},
},
{
name: "HappyPathFromPlatformImage",
config: config{
"image_resource_id": "/subscriptions/789/resourceGroups/otherrgname/providers/Microsoft.Compute/images/MyDebianOSImage-{{timestamp}}",
"source": "/subscriptions/789/resourceGroups/testrg/providers/Microsoft.Compute/disks/diskname",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
b := &Builder{}
_, err := b.Prepare(tt.config)
if (err != nil) != tt.wantErr {
t.Errorf("Builder.Prepare() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.validate != nil {
tt.validate(b.config)
}
})
}
}

View File

@ -0,0 +1,222 @@
package chroot
import (
"context"
"errors"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
)
type DiskAttacher interface {
AttachDisk(ctx context.Context, disk string) (lun int32, err error)
DiskPathForLun(lun int32) string
WaitForDevice(ctx context.Context, i int32) (device string, err error)
DetachDisk(ctx context.Context, disk string) (err error)
WaitForDetach(ctx context.Context, diskID string) error
}
var NewDiskAttacher = func(azureClient client.AzureClientSet) DiskAttacher {
return &diskAttacher{
azcli: azureClient,
}
}
type diskAttacher struct {
azcli client.AzureClientSet
vm *client.ComputeInfo // store info about this VM so that we don't have to ask metadata service on every call
}
func (diskAttacher) DiskPathForLun(lun int32) string {
return fmt.Sprintf("/dev/disk/azure/scsi1/lun%d", lun)
}
func (da diskAttacher) WaitForDevice(ctx context.Context, lun int32) (device string, err error) {
path := da.DiskPathForLun(lun)
for {
link, err := os.Readlink(path)
if err == nil {
return filepath.Abs("/dev/disk/azure/scsi1/" + link)
} else if err != os.ErrNotExist {
if pe, ok := err.(*os.PathError); ok && pe.Err != syscall.ENOENT {
return "", err
}
}
select {
case <-time.After(100 * time.Millisecond):
// continue
case <-ctx.Done():
return "", ctx.Err()
}
}
}
func (da *diskAttacher) DetachDisk(ctx context.Context, diskID string) error {
log.Println("Fetching list of disks currently attached to VM")
currentDisks, err := da.getDisks(ctx)
if err != nil {
return err
}
log.Printf("Removing %q from list of disks currently attached to VM", diskID)
newDisks := []compute.DataDisk{}
for _, disk := range currentDisks {
if disk.ManagedDisk != nil &&
!strings.EqualFold(to.String(disk.ManagedDisk.ID), diskID) {
newDisks = append(newDisks, disk)
}
}
if len(currentDisks) == len(newDisks) {
return DiskNotFoundError
}
log.Println("Updating new list of disks attached to VM")
err = da.setDisks(ctx, newDisks)
if err != nil {
return err
}
return nil
}
func (da *diskAttacher) WaitForDetach(ctx context.Context, diskID string) error {
for { // loop until disk is not attached, timeout or error
list, err := da.getDisks(ctx)
if err != nil {
return err
}
if findDiskInList(list, diskID) == nil {
log.Println("Disk is no longer in VM model, assuming detached")
return nil
}
select {
case <-time.After(time.Second): //continue
case <-ctx.Done():
return ctx.Err()
}
}
}
var DiskNotFoundError = errors.New("Disk not found")
func (da *diskAttacher) AttachDisk(ctx context.Context, diskID string) (int32, error) {
dataDisks, err := da.getDisks(ctx)
if err != nil {
return -1, err
}
// check to see if disk is already attached, remember lun if found
if disk := findDiskInList(dataDisks, diskID); disk != nil {
// disk is already attached, just take this lun
if disk.Lun == nil {
return -1, errors.New("disk is attached, but lun was not set in VM model (possibly an error in the Azure APIs)")
}
return to.Int32(disk.Lun), nil
}
// disk was not found on VM, go and actually attach it
var lun int32 = -1
findFreeLun:
for lun = 0; lun < 64; lun++ {
for _, v := range dataDisks {
if to.Int32(v.Lun) == lun {
continue findFreeLun
}
}
// no datadisk is using this lun
break
}
// append new data disk to collection
dataDisks = append(dataDisks, compute.DataDisk{
CreateOption: compute.DiskCreateOptionTypesAttach,
ManagedDisk: &compute.ManagedDiskParameters{
ID: to.StringPtr(diskID),
},
Lun: to.Int32Ptr(lun),
})
// prepare resource object for update operation
err = da.setDisks(ctx, dataDisks)
if err != nil {
return -1, err
}
return lun, nil
}
func (da *diskAttacher) getThisVM(ctx context.Context) (compute.VirtualMachine, error) {
// getting resource info for this VM
if da.vm == nil {
vm, err := da.azcli.MetadataClient().GetComputeInfo()
if err != nil {
return compute.VirtualMachine{}, err
}
da.vm = vm
}
// retrieve actual VM
vmResource, err := da.azcli.VirtualMachinesClient().Get(ctx, da.vm.ResourceGroupName, da.vm.Name, "")
if err != nil {
return compute.VirtualMachine{}, err
}
if vmResource.StorageProfile == nil {
return compute.VirtualMachine{}, errors.New("properties.storageProfile is not set on VM, this is unexpected")
}
return vmResource, nil
}
func (da diskAttacher) getDisks(ctx context.Context) ([]compute.DataDisk, error) {
vmResource, err := da.getThisVM(ctx)
if err != nil {
return []compute.DataDisk{}, err
}
return *vmResource.StorageProfile.DataDisks, nil
}
func (da diskAttacher) setDisks(ctx context.Context, disks []compute.DataDisk) error {
vmResource, err := da.getThisVM(ctx)
if err != nil {
return err
}
id, err := azure.ParseResourceID(to.String(vmResource.ID))
if err != nil {
return err
}
vmResource.StorageProfile.DataDisks = &disks
vmResource.Resources = nil
// update the VM resource, attach disk
_, err = da.azcli.VirtualMachinesClient().CreateOrUpdate(ctx, id.ResourceGroup, id.ResourceName, vmResource)
return err
}
func findDiskInList(list []compute.DataDisk, diskID string) *compute.DataDisk {
for _, disk := range list {
if disk.ManagedDisk != nil &&
strings.EqualFold(to.String(disk.ManagedDisk.ID), diskID) {
return &disk
}
}
return nil
}

View File

@ -0,0 +1,86 @@
package chroot
import (
"context"
"github.com/Azure/go-autorest/autorest/to"
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
testvm = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/testGroup/Microsoft.Compute/virtualMachines/testVM"
testdisk = "/subscriptions/00000000-0000-0000-0000-000000000001/resourceGroups/testGroup2/Microsoft.Compute/disks/testDisk"
)
// Tests assume current machine is capable of running chroot builder (i.e. an Azure VM)
func Test_DiskAttacherAttachesDiskToVM(t *testing.T) {
azcli, err := client.GetTestClientSet(t) // integration test
require.Nil(t, err)
da := NewDiskAttacher(azcli)
testDiskName := t.Name()
vm, err := azcli.MetadataClient().GetComputeInfo()
require.Nil(t, err, "Test needs to run on an Azure VM, unable to retrieve VM information")
t.Log("Creating new disk '", testDiskName, "' in ", vm.ResourceGroupName)
disk, err := azcli.DisksClient().Get(context.TODO(), vm.ResourceGroupName, testDiskName)
if err == nil {
t.Log("Disk already exists")
if disk.DiskState == compute.Attached {
t.Log("Disk is attached, assuming to this machine, trying to detach")
err = da.DetachDisk(context.TODO(), to.String(disk.ID))
require.Nil(t, err)
}
t.Log("Deleting disk")
result, err := azcli.DisksClient().Delete(context.TODO(), vm.ResourceGroupName, testDiskName)
require.Nil(t, err)
err = result.WaitForCompletionRef(context.TODO(), azcli.PollClient())
require.Nil(t, err)
}
t.Log("Creating disk")
r, err := azcli.DisksClient().CreateOrUpdate(context.TODO(), vm.ResourceGroupName, testDiskName, compute.Disk{
Location: to.StringPtr(vm.Location),
Sku: &compute.DiskSku{
Name: compute.StandardLRS,
},
DiskProperties: &compute.DiskProperties{
DiskSizeGB: to.Int32Ptr(30),
CreationData: &compute.CreationData{CreateOption: compute.Empty},
},
})
require.Nil(t, err)
err = r.WaitForCompletionRef(context.TODO(), azcli.PollClient())
require.Nil(t, err)
t.Log("Retrieving disk properties")
d, err := azcli.DisksClient().Get(context.TODO(), vm.ResourceGroupName, testDiskName)
require.Nil(t, err)
assert.NotNil(t, d)
t.Log("Attaching disk")
lun, err := da.AttachDisk(context.TODO(), to.String(d.ID))
assert.Nil(t, err)
t.Log("Waiting for device")
dev, err := da.WaitForDevice(context.TODO(), lun)
assert.Nil(t, err)
t.Log("Device path:", dev)
t.Log("Detaching disk")
err = da.DetachDisk(context.TODO(), to.String(d.ID))
require.Nil(t, err)
t.Log("Deleting disk")
result, err := azcli.DisksClient().Delete(context.TODO(), vm.ResourceGroupName, testDiskName)
if err == nil {
err = result.WaitForCompletionRef(context.TODO(), azcli.PollClient())
}
require.Nil(t, err)
}

View File

@ -0,0 +1,83 @@
package chroot
import (
"context"
"fmt"
"log"
"time"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
var _ multistep.Step = &StepAttachDisk{}
type StepAttachDisk struct {
attached bool
}
func (s *StepAttachDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
azcli := state.Get("azureclient").(client.AzureClientSet)
ui := state.Get("ui").(packer.Ui)
diskResourceID := state.Get("os_disk_resource_id").(string)
ui.Say(fmt.Sprintf("Attaching disk '%s'", diskResourceID))
da := NewDiskAttacher(azcli)
lun, err := da.AttachDisk(ctx, diskResourceID)
if err != nil {
log.Printf("StepAttachDisk.Run: error: %+v", err)
err := fmt.Errorf(
"error attaching disk '%s': %v", diskResourceID, err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
ui.Say("Disk attached, waiting for device to show up")
ctx, cancel := context.WithTimeout(ctx, time.Minute*3) // in case is not configured correctly
defer cancel()
device, err := da.WaitForDevice(ctx, lun)
if err != nil {
log.Printf("StepAttachDisk.Run: error: %+v", err)
err := fmt.Errorf(
"error attaching disk '%s': %v", diskResourceID, err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
ui.Say(fmt.Sprintf("Disk available at %q", device))
s.attached = true
state.Put("device", device)
state.Put("attach_cleanup", s)
return multistep.ActionContinue
}
func (s *StepAttachDisk) Cleanup(state multistep.StateBag) {
ui := state.Get("ui").(packer.Ui)
if err := s.CleanupFunc(state); err != nil {
ui.Error(err.Error())
}
}
func (s *StepAttachDisk) CleanupFunc(state multistep.StateBag) error {
if s.attached {
azcli := state.Get("azureclient").(client.AzureClientSet)
ui := state.Get("ui").(packer.Ui)
diskResourceID := state.Get("os_disk_resource_id").(string)
ui.Say(fmt.Sprintf("Detaching disk '%s'", diskResourceID))
da := NewDiskAttacher(azcli)
err := da.DetachDisk(context.Background(), diskResourceID)
if err != nil {
return fmt.Errorf("error detaching %q: %v", diskResourceID, err)
}
s.attached = false
}
return nil
}

View File

@ -0,0 +1,131 @@
package chroot
import (
"context"
"errors"
"io/ioutil"
"net/http"
"reflect"
"strings"
"testing"
"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute"
"github.com/Azure/go-autorest/autorest"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
func TestStepAttachDisk_Run(t *testing.T) {
type fields struct {
GetDiskResponseCode int
GetDiskResponseBody string
attachError error
waitForDeviceError error
}
tests := []struct {
name string
fields fields
want multistep.StepAction
}{
{
name: "HappyPath",
want: multistep.ActionContinue,
},
{
name: "AttachError",
fields: fields{
attachError: errors.New("unit test"),
},
want: multistep.ActionHalt,
},
{
name: "WaitForDeviceError",
fields: fields{
waitForDeviceError: errors.New("unit test"),
},
want: multistep.ActionHalt,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &StepAttachDisk{}
NewDiskAttacher = func(azcli client.AzureClientSet) DiskAttacher {
return &fakeDiskAttacher{
attachError: tt.fields.attachError,
waitForDeviceError: tt.fields.waitForDeviceError,
}
}
dm := compute.NewDisksClient("subscriptionId")
dm.Sender = autorest.SenderFunc(func(r *http.Request) (*http.Response, error) {
return &http.Response{
Request: r,
Body: ioutil.NopCloser(strings.NewReader(tt.fields.GetDiskResponseBody)),
StatusCode: tt.fields.GetDiskResponseCode,
}, nil
})
errorBuffer := &strings.Builder{}
ui := &packer.BasicUi{
Reader: strings.NewReader(""),
Writer: ioutil.Discard,
ErrorWriter: errorBuffer,
}
state := new(multistep.BasicStateBag)
state.Put("azureclient", &client.AzureClientSetMock{})
state.Put("ui", ui)
state.Put("os_disk_resource_id", "/subscriptions/12345/resourceGroups/group1/providers/Microsoft.Compute/disks/disk1")
got := s.Run(context.TODO(), state)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("StepAttachDisk.Run() = %v, want %v", got, tt.want)
}
if got == multistep.ActionHalt {
if _, ok := state.GetOk("error"); !ok {
t.Fatal("Expected 'error' to be set in statebag after failure")
}
}
})
}
}
type fakeDiskAttacher struct {
attachError error
waitForDeviceError error
}
var _ DiskAttacher = &fakeDiskAttacher{}
func (da *fakeDiskAttacher) AttachDisk(ctx context.Context, disk string) (lun int32, err error) {
if da.attachError != nil {
return 0, da.attachError
}
return 3, nil
}
func (da *fakeDiskAttacher) DiskPathForLun(lun int32) string {
panic("not implemented")
}
func (da *fakeDiskAttacher) WaitForDevice(ctx context.Context, lun int32) (device string, err error) {
if da.waitForDeviceError != nil {
return "", da.waitForDeviceError
}
if lun == 3 {
return "/dev/sdq", nil
}
panic("expected lun==3")
}
func (da *fakeDiskAttacher) DetachDisk(ctx context.Context, disk string) (err error) {
panic("not implemented")
}
func (da *fakeDiskAttacher) WaitForDetach(ctx context.Context, diskID string) error {
panic("not implemented")
}

View File

@ -0,0 +1,89 @@
package chroot
import (
"context"
"fmt"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"log"
)
var _ multistep.Step = &StepCreateImage{}
type StepCreateImage struct {
ImageResourceID string
ImageOSState string
OSDiskStorageAccountType string
OSDiskCacheType string
Location string
imageResource azure.Resource
}
func (s *StepCreateImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
azcli := state.Get("azureclient").(client.AzureClientSet)
ui := state.Get("ui").(packer.Ui)
diskResourceID := state.Get("os_disk_resource_id").(string)
ui.Say(fmt.Sprintf("Creating image %s\n using %s for os disk.",
s.ImageResourceID,
diskResourceID))
var err error
s.imageResource, err = azure.ParseResourceID(s.ImageResourceID)
if err != nil {
log.Printf("StepCreateImage.Run: error: %+v", err)
err := fmt.Errorf(
"error parsing image resource id '%s': %v", s.ImageResourceID, err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
image := compute.Image{
Location: to.StringPtr(s.Location),
ImageProperties: &compute.ImageProperties{
StorageProfile: &compute.ImageStorageProfile{
OsDisk: &compute.ImageOSDisk{
OsState: compute.OperatingSystemStateTypes(s.ImageOSState),
OsType: compute.Linux,
ManagedDisk: &compute.SubResource{
ID: &diskResourceID,
},
StorageAccountType: compute.StorageAccountTypes(s.OSDiskStorageAccountType),
Caching: compute.CachingTypes(s.OSDiskCacheType),
},
// DataDisks: nil,
// ZoneResilient: nil,
},
},
// Tags: nil,
}
f, err := azcli.ImagesClient().CreateOrUpdate(
ctx,
s.imageResource.ResourceGroup,
s.imageResource.ResourceName,
image)
if err == nil {
log.Println("Image creation in process...")
err = f.WaitForCompletionRef(ctx, azcli.PollClient())
}
if err != nil {
log.Printf("StepCreateImage.Run: error: %+v", err)
err := fmt.Errorf(
"error creating image '%s': %v", s.ImageResourceID, err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
log.Printf("Image creation complete: %s", f.Status())
return multistep.ActionContinue
}
func (*StepCreateImage) Cleanup(bag multistep.StateBag) {} // this is the final artifact, don't delete

View File

@ -0,0 +1,110 @@
package chroot
import (
"context"
"fmt"
"log"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
var _ multistep.Step = &StepCreateNewDisk{}
type StepCreateNewDisk struct {
SubscriptionID, ResourceGroup, DiskName string
DiskSizeGB int32 // optional, ignored if 0
DiskStorageAccountType string // from compute.DiskStorageAccountTypes
HyperVGeneration string
Location string
PlatformImage *client.PlatformImage
SourceDiskResourceID string
SkipCleanup bool
}
func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
azcli := state.Get("azureclient").(client.AzureClientSet)
ui := state.Get("ui").(packer.Ui)
diskResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
s.SubscriptionID,
s.ResourceGroup,
s.DiskName)
state.Put("os_disk_resource_id", diskResourceID)
ui.Say(fmt.Sprintf("Creating disk '%s'", diskResourceID))
disk := compute.Disk{
Location: to.StringPtr(s.Location),
Sku: &compute.DiskSku{
Name: compute.DiskStorageAccountTypes(s.DiskStorageAccountType),
},
//Zones: nil,
DiskProperties: &compute.DiskProperties{
OsType: "Linux",
HyperVGeneration: compute.HyperVGeneration(s.HyperVGeneration),
CreationData: &compute.CreationData{},
},
//Tags: map[string]*string{
}
if s.DiskSizeGB > 0 {
disk.DiskProperties.DiskSizeGB = to.Int32Ptr(s.DiskSizeGB)
}
if s.SourceDiskResourceID != "" {
disk.CreationData.CreateOption = compute.Copy
disk.CreationData.SourceResourceID = to.StringPtr(s.SourceDiskResourceID)
} else if s.PlatformImage == nil {
disk.CreationData.CreateOption = compute.Empty
} else {
disk.CreationData.CreateOption = compute.FromImage
disk.CreationData.ImageReference = &compute.ImageDiskReference{
ID: to.StringPtr(fmt.Sprintf(
"/subscriptions/%s/providers/Microsoft.Compute/locations/%s/publishers/%s/artifacttypes/vmimage/offers/%s/skus/%s/versions/%s",
s.SubscriptionID, s.Location, s.PlatformImage.Publisher, s.PlatformImage.Offer, s.PlatformImage.Sku, s.PlatformImage.Version)),
}
}
f, err := azcli.DisksClient().CreateOrUpdate(ctx, s.ResourceGroup, s.DiskName, disk)
if err == nil {
err = f.WaitForCompletionRef(ctx, azcli.PollClient())
}
if err != nil {
log.Printf("StepCreateNewDisk.Run: error: %+v", err)
err := fmt.Errorf(
"error creating new disk '%s': %v", diskResourceID, err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
return multistep.ActionContinue
}
func (s StepCreateNewDisk) Cleanup(state multistep.StateBag) {
if !s.SkipCleanup {
azcli := state.Get("azureclient").(client.AzureClientSet)
ui := state.Get("ui").(packer.Ui)
diskResourceID := state.Get("os_disk_resource_id").(string)
ui.Say(fmt.Sprintf("Waiting for disk %q detach to complete", diskResourceID))
err := NewDiskAttacher(azcli).WaitForDetach(context.Background(), diskResourceID)
ui.Say(fmt.Sprintf("Deleting disk %q", diskResourceID))
f, err := azcli.DisksClient().Delete(context.TODO(), s.ResourceGroup, s.DiskName)
if err == nil {
err = f.WaitForCompletionRef(context.TODO(), azcli.PollClient())
}
if err != nil {
log.Printf("StepCreateNewDisk.Cleanup: error: %+v", err)
ui.Error(fmt.Sprintf("error deleting new disk '%s': %v.", diskResourceID, err))
}
}
}

View File

@ -0,0 +1,147 @@
package chroot
import (
"context"
"io/ioutil"
"net/http"
"reflect"
"regexp"
"testing"
"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute"
"github.com/Azure/go-autorest/autorest"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
func TestStepCreateNewDisk_Run(t *testing.T) {
type fields struct {
SubscriptionID string
ResourceGroup string
DiskName string
DiskSizeGB int32
DiskStorageAccountType string
HyperVGeneration string
Location string
PlatformImage *client.PlatformImage
SourceDiskResourceID string
expectedPutDiskBody string
}
tests := []struct {
name string
fields fields
want multistep.StepAction
}{
{
name: "HappyPathDiskSource",
fields: fields{
SubscriptionID: "SubscriptionID",
ResourceGroup: "ResourceGroupName",
DiskName: "TemporaryOSDiskName",
DiskSizeGB: 42,
DiskStorageAccountType: string(compute.PremiumLRS),
HyperVGeneration: string(compute.V1),
Location: "westus",
SourceDiskResourceID: "SourceDisk",
expectedPutDiskBody: `
{
"location": "westus",
"properties": {
"osType": "Linux",
"hyperVGeneration": "V1",
"creationData": {
"createOption": "Copy",
"sourceResourceId": "SourceDisk"
},
"diskSizeGB": 42
},
"sku": {
"name": "Premium_LRS"
}
}`,
},
want: multistep.ActionContinue,
},
{
name: "HappyPathDiskSource",
fields: fields{
SubscriptionID: "SubscriptionID",
ResourceGroup: "ResourceGroupName",
DiskName: "TemporaryOSDiskName",
DiskStorageAccountType: string(compute.StandardLRS),
HyperVGeneration: string(compute.V1),
Location: "westus",
PlatformImage: &client.PlatformImage{
Publisher: "Microsoft",
Offer: "Windows",
Sku: "2016-DataCenter",
Version: "2016.1.4",
},
expectedPutDiskBody: `
{
"location": "westus",
"properties": {
"osType": "Linux",
"hyperVGeneration": "V1",
"creationData": {
"createOption":"FromImage",
"imageReference": {
"id":"/subscriptions/SubscriptionID/providers/Microsoft.Compute/locations/westus/publishers/Microsoft/artifacttypes/vmimage/offers/Windows/skus/2016-DataCenter/versions/2016.1.4"
}
}
},
"sku": {
"name": "Standard_LRS"
}
}`,
},
want: multistep.ActionContinue,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := StepCreateNewDisk{
SubscriptionID: tt.fields.SubscriptionID,
ResourceGroup: tt.fields.ResourceGroup,
DiskName: tt.fields.DiskName,
DiskSizeGB: tt.fields.DiskSizeGB,
DiskStorageAccountType: tt.fields.DiskStorageAccountType,
HyperVGeneration: tt.fields.HyperVGeneration,
Location: tt.fields.Location,
PlatformImage: tt.fields.PlatformImage,
SourceDiskResourceID: tt.fields.SourceDiskResourceID,
}
expectedPutDiskBody := regexp.MustCompile(`[\s\n]`).ReplaceAllString(tt.fields.expectedPutDiskBody, "")
m := compute.NewDisksClient("subscriptionId")
m.Sender = autorest.SenderFunc(func(r *http.Request) (*http.Response, error) {
if r.Method != "PUT" {
t.Fatal("Expected only a PUT disk call")
}
b, _ := ioutil.ReadAll(r.Body)
if string(b) != expectedPutDiskBody {
t.Fatalf("expected body to be %q, but got %q", expectedPutDiskBody, string(b))
}
return &http.Response{
Request: r,
StatusCode: 200,
}, nil
})
state := new(multistep.BasicStateBag)
state.Put("azureclient", &client.AzureClientSetMock{
DisksClientMock: m,
})
state.Put("ui", packer.TestUi(t))
if got := s.Run(context.TODO(), state); !reflect.DeepEqual(got, tt.want) {
t.Errorf("StepCreateNewDisk.Run() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -0,0 +1,132 @@
package chroot
// mostly borrowed from ./builder/amazon/chroot/step_mount_device.go
import (
"bytes"
"context"
"fmt"
"github.com/hashicorp/packer/builder/amazon/chroot"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
"log"
"os"
"path/filepath"
"strings"
)
var _ multistep.Step = &StepMountDevice{}
type StepMountDevice struct {
MountOptions []string
MountPartition string
MountPath string
mountPath string
}
func (s *StepMountDevice) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
ui := state.Get("ui").(packer.Ui)
device := state.Get("device").(string)
config := state.Get("config").(*Config)
wrappedCommand := state.Get("wrappedCommand").(chroot.CommandWrapper)
ictx := config.ctx
ictx.Data = &struct{ Device string }{Device: filepath.Base(device)}
mountPath, err := interpolate.Render(s.MountPath, &ictx)
if err != nil {
err := fmt.Errorf("error preparing mount directory: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
mountPath, err = filepath.Abs(mountPath)
if err != nil {
err := fmt.Errorf("error preparing mount directory: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
log.Printf("Mount path: %s", mountPath)
if err := os.MkdirAll(mountPath, 0755); err != nil {
err := fmt.Errorf("error creating mount directory: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
deviceMount := fmt.Sprintf("%s%s", device, s.MountPartition)
state.Put("deviceMount", deviceMount)
ui.Say("Mounting the root device...")
stderr := new(bytes.Buffer)
// build mount options from mount_options config, useful for nouuid options
// or other specific device type settings for mount
opts := ""
if len(s.MountOptions) > 0 {
opts = "-o " + strings.Join(s.MountOptions, " -o ")
}
mountCommand, err := wrappedCommand(
fmt.Sprintf("mount %s %s %s", opts, deviceMount, mountPath))
if err != nil {
err := fmt.Errorf("error creating mount command: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
log.Printf("[DEBUG] (step mount) mount command is %s", mountCommand)
cmd := chroot.ShellCommand(mountCommand)
cmd.Stderr = stderr
if err := cmd.Run(); err != nil {
err := fmt.Errorf(
"error mounting root volume: %s\nStderr: %s", err, stderr.String())
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
// Set the mount path so we remember to unmount it later
s.mountPath = mountPath
state.Put("mount_path", s.mountPath)
state.Put("mount_device_cleanup", s)
return multistep.ActionContinue
}
func (s *StepMountDevice) Cleanup(state multistep.StateBag) {
ui := state.Get("ui").(packer.Ui)
if err := s.CleanupFunc(state); err != nil {
ui.Error(err.Error())
}
}
func (s *StepMountDevice) CleanupFunc(state multistep.StateBag) error {
if s.mountPath == "" {
return nil
}
ui := state.Get("ui").(packer.Ui)
wrappedCommand := state.Get("wrappedCommand").(chroot.CommandWrapper)
ui.Say("Unmounting the root device...")
unmountCommand, err := wrappedCommand(fmt.Sprintf("umount %s", s.mountPath))
if err != nil {
return fmt.Errorf("error creating unmount command: %s", err)
}
cmd := chroot.ShellCommand(unmountCommand)
if err := cmd.Run(); err != nil {
return fmt.Errorf("error unmounting root device: %s", err)
}
s.mountPath = ""
return nil
}

View File

@ -0,0 +1,81 @@
package chroot
import (
"context"
"fmt"
"log"
"strings"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
type StepVerifySourceDisk struct {
SubscriptionID string
SourceDiskResourceID string
Location string
}
func (s StepVerifySourceDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
azcli := state.Get("azureclient").(client.AzureClientSet)
ui := state.Get("ui").(packer.Ui)
ui.Say("Checking source disk location")
resource, err := azure.ParseResourceID(s.SourceDiskResourceID)
if err != nil {
log.Printf("StepVerifySourceDisk.Run: error: %+v", err)
err := fmt.Errorf("Could not parse resource id %q: %s", s.SourceDiskResourceID, err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
if !strings.EqualFold(resource.SubscriptionID, s.SubscriptionID) {
err := fmt.Errorf("Source disk resource %q is in a different subscription than this VM (%q). "+
"Packer does not know how to handle that.",
s.SourceDiskResourceID, s.SubscriptionID)
log.Printf("StepVerifySourceDisk.Run: error: %+v", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
if !(strings.EqualFold(resource.Provider, "Microsoft.Compute") && strings.EqualFold(resource.ResourceType, "disks")) {
err := fmt.Errorf("Resource ID %q is not a managed disk resource", s.SourceDiskResourceID)
log.Printf("StepVerifySourceDisk.Run: error: %+v", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
disk, err := azcli.DisksClient().Get(ctx,
resource.ResourceGroup, resource.ResourceName)
if err != nil {
err := fmt.Errorf("Unable to retrieve disk (%q): %s", s.SourceDiskResourceID, err)
log.Printf("StepVerifySourceDisk.Run: error: %+v", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
location := to.String(disk.Location)
if !strings.EqualFold(location, s.Location) {
err := fmt.Errorf("Source disk resource %q is in a different location (%q) than this VM (%q). "+
"Packer does not know how to handle that.",
s.SourceDiskResourceID,
location,
s.Location)
log.Printf("StepVerifySourceDisk.Run: error: %+v", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
return multistep.ActionContinue
}
func (s StepVerifySourceDisk) Cleanup(state multistep.StateBag) {}

View File

@ -0,0 +1,168 @@
package chroot
import (
"context"
"io/ioutil"
"net/http"
"reflect"
"regexp"
"strings"
"testing"
"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute"
"github.com/Azure/go-autorest/autorest"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
)
func Test_StepVerifySourceDisk_Run(t *testing.T) {
type fields struct {
SubscriptionID string
SourceDiskResourceID string
Location string
GetDiskResponseCode int
GetDiskResponseBody string
}
type args struct {
state multistep.StateBag
}
tests := []struct {
name string
fields fields
args args
want multistep.StepAction
errormatch string
}{
{
name: "HappyPath",
fields: fields{
SubscriptionID: "subid1",
SourceDiskResourceID: "/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/disks/disk1",
Location: "westus2",
GetDiskResponseCode: 200,
GetDiskResponseBody: `{"location":"westus2"}`,
},
want: multistep.ActionContinue,
},
{
name: "NotAResourceID",
fields: fields{
SubscriptionID: "subid1",
SourceDiskResourceID: "/other",
Location: "westus2",
GetDiskResponseCode: 200,
GetDiskResponseBody: `{"location":"westus2"}`,
},
want: multistep.ActionHalt,
errormatch: "Could not parse resource id",
},
{
name: "DiskNotFound",
fields: fields{
SubscriptionID: "subid1",
SourceDiskResourceID: "/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/disks/disk1",
Location: "westus2",
GetDiskResponseCode: 404,
GetDiskResponseBody: `{}`,
},
want: multistep.ActionHalt,
errormatch: "Unable to retrieve",
},
{
name: "NotADisk",
fields: fields{
SubscriptionID: "subid1",
SourceDiskResourceID: "/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/images/image1",
Location: "westus2",
GetDiskResponseCode: 404,
},
want: multistep.ActionHalt,
errormatch: "not a managed disk",
},
{
name: "OtherSubscription",
fields: fields{
SubscriptionID: "subid1",
SourceDiskResourceID: "/subscriptions/subid2/resourcegroups/rg1/providers/Microsoft.Compute/disks/disk1",
Location: "westus2",
GetDiskResponseCode: 200,
GetDiskResponseBody: `{"location":"westus2"}`,
},
want: multistep.ActionHalt,
errormatch: "different subscription",
},
{
name: "OtherLocation",
fields: fields{
SubscriptionID: "subid1",
SourceDiskResourceID: "/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/disks/disk1",
Location: "eastus",
GetDiskResponseCode: 200,
GetDiskResponseBody: `{"location":"westus2"}`,
},
want: multistep.ActionHalt,
errormatch: "different location",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := StepVerifySourceDisk{
SubscriptionID: tt.fields.SubscriptionID,
SourceDiskResourceID: tt.fields.SourceDiskResourceID,
Location: tt.fields.Location,
}
m := compute.NewDisksClient("subscriptionId")
m.Sender = autorest.SenderFunc(func(r *http.Request) (*http.Response, error) {
return &http.Response{
Request: r,
Body: ioutil.NopCloser(strings.NewReader(tt.fields.GetDiskResponseBody)),
StatusCode: tt.fields.GetDiskResponseCode,
}, nil
})
errorBuffer := &strings.Builder{}
ui := &packer.BasicUi{
Reader: strings.NewReader(""),
Writer: ioutil.Discard,
ErrorWriter: errorBuffer,
}
state := new(multistep.BasicStateBag)
state.Put("azureclient", &client.AzureClientSetMock{
DisksClientMock: m,
})
state.Put("ui", ui)
got := s.Run(context.TODO(), state)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("StepVerifySourceDisk.Run() = %v, want %v", got, tt.want)
}
if tt.errormatch != "" {
if !regexp.MustCompile(tt.errormatch).MatchString(errorBuffer.String()) {
t.Errorf("Expected the error output (%q) to match %q", errorBuffer.String(), tt.errormatch)
}
}
if got == multistep.ActionHalt {
if _, ok := state.GetOk("error"); !ok {
t.Fatal("Expected 'error' to be set in statebag after failure")
}
}
})
}
}
type uiThatRemebersErrors struct {
packer.Ui
LastError string
}

View File

@ -0,0 +1,37 @@
package chroot
import (
"fmt"
"sync"
"github.com/hashicorp/packer/builder/azure/common/client"
)
// CreateVMMetadataTemplateFunc returns a template function that retrieves VM metadata. VM metadata is retrieved only once and reused for all executions of the function.
func CreateVMMetadataTemplateFunc() func(string) (string, error) {
var data *client.ComputeInfo
var dataErr error
once := sync.Once{}
return func(key string) (string, error) {
once.Do(func() {
data, dataErr = client.DefaultMetadataClient.GetComputeInfo()
})
if dataErr != nil {
return "", dataErr
}
switch key {
case "name":
return data.Name, nil
case "subscription_id":
return data.SubscriptionID, nil
case "resource_group":
return data.ResourceGroupName, nil
case "location":
return data.Location, nil
case "resource_id":
return data.ResourceID(), nil
default:
return "", fmt.Errorf("unknown metadata key: %s (supported: name, subscription_id, resource_group, location, resource_id)", key)
}
}
}

View File

@ -0,0 +1,103 @@
package common
import (
"context"
"fmt"
"log"
"sort"
"strings"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/hashicorp/packer/packer"
)
// Artifact is an artifact implementation that contains built Managed Images or Disks.
type Artifact struct {
// Array of the Azure resource IDs that were created.
Resources []string
// BuilderId is the unique ID for the builder that created this AMI
BuilderIdValue string
// Azure client for performing API stuff.
AzureClientSet client.AzureClientSet
}
func (a *Artifact) BuilderId() string {
return a.BuilderIdValue
}
func (*Artifact) Files() []string {
// We have no files
return nil
}
func (a *Artifact) Id() string {
parts := make([]string, 0, len(a.Resources))
for _, resource := range a.Resources {
parts = append(parts, strings.ToLower(resource))
}
sort.Strings(parts)
return strings.Join(parts, ",")
}
func (a *Artifact) String() string {
parts := make([]string, 0, len(a.Resources))
for _, resource := range a.Resources {
parts = append(parts, strings.ToLower(resource))
}
sort.Strings(parts)
return fmt.Sprintf("Azure resources created:\n%s\n", strings.Join(parts, "\n"))
}
func (a *Artifact) State(name string) interface{} {
switch name {
default:
return nil
}
}
func (a *Artifact) Destroy() error {
errs := make([]error, 0)
for _, resource := range a.Resources {
log.Printf("Deleting resource %s", resource)
id, err := azure.ParseResourceID(resource)
if err != nil {
return fmt.Errorf("Unable to parse resource id (%s): %v", resource, err)
}
ctx := context.TODO()
restype := strings.ToLower(fmt.Sprintf("%s/%s", id.Provider, id.ResourceType))
switch restype {
case "microsoft.compute/images":
res, err := a.AzureClientSet.ImagesClient().Delete(ctx, id.ResourceGroup, id.ResourceName)
if err != nil {
errs = append(errs, fmt.Errorf("Unable to initiate deletion of resource (%s): %v", resource, err))
} else {
err := res.WaitForCompletionRef(ctx, a.AzureClientSet.PollClient())
if err != nil {
errs = append(errs, fmt.Errorf("Unable to complete deletion of resource (%s): %v", resource, err))
}
}
default:
errs = append(errs, fmt.Errorf("Don't know how to delete resources of type %s (%s)", resource, restype))
}
}
if len(errs) > 0 {
if len(errs) == 1 {
return errs[0]
} else {
return &packer.MultiError{Errors: errs}
}
}
return nil
}

View File

@ -0,0 +1,90 @@
package client
import (
"net/http"
"regexp"
"time"
"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute"
"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/computeapi"
"github.com/Azure/go-autorest/autorest"
)
type AzureClientSet interface {
MetadataClient() MetadataClientAPI
DisksClient() computeapi.DisksClientAPI
ImagesClient() computeapi.ImagesClientAPI
VirtualMachinesClient() computeapi.VirtualMachinesClientAPI
VirtualMachineImagesClient() VirtualMachineImagesClientAPI
PollClient() autorest.Client
}
var subscriptionPathRegex = regexp.MustCompile(`/subscriptions/([[:xdigit:]]{8}(-[[:xdigit:]]{4}){3}-[[:xdigit:]]{12})`)
var _ AzureClientSet = &azureClientSet{}
type azureClientSet struct {
sender autorest.Sender
authorizer autorest.Authorizer
subscriptionID string
PollingDelay time.Duration
}
func New(c Config, say func(string)) (AzureClientSet, error) {
token, err := c.GetServicePrincipalToken(say, c.CloudEnvironment.ResourceManagerEndpoint)
if err != nil {
return nil, err
}
return &azureClientSet{
authorizer: autorest.NewBearerAuthorizer(token),
subscriptionID: c.SubscriptionID,
sender: http.DefaultClient,
PollingDelay: time.Second,
}, nil
}
func (s azureClientSet) configureAutorestClient(c *autorest.Client) {
c.Authorizer = s.authorizer
c.Sender = s.sender
}
func (s azureClientSet) MetadataClient() MetadataClientAPI {
return metadataClient{s.sender}
}
func (s azureClientSet) DisksClient() computeapi.DisksClientAPI {
c := compute.NewDisksClient(s.subscriptionID)
s.configureAutorestClient(&c.Client)
c.PollingDelay = s.PollingDelay
return c
}
func (s azureClientSet) ImagesClient() computeapi.ImagesClientAPI {
c := compute.NewImagesClient(s.subscriptionID)
s.configureAutorestClient(&c.Client)
c.PollingDelay = s.PollingDelay
return c
}
func (s azureClientSet) VirtualMachinesClient() computeapi.VirtualMachinesClientAPI {
c := compute.NewVirtualMachinesClient(s.subscriptionID)
s.configureAutorestClient(&c.Client)
c.PollingDelay = s.PollingDelay
return c
}
func (s azureClientSet) VirtualMachineImagesClient() VirtualMachineImagesClientAPI {
c := compute.NewVirtualMachineImagesClient(s.subscriptionID)
s.configureAutorestClient(&c.Client)
c.PollingDelay = s.PollingDelay
return virtualMachineImagesClientAPI{c}
}
func (s azureClientSet) PollClient() autorest.Client {
c := autorest.NewClientWithUserAgent("Packer-Azure-ClientSet")
s.configureAutorestClient(&c)
c.PollingDelay = time.Second / 3
return c
}

View File

@ -0,0 +1,46 @@
package client
import (
"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/computeapi"
"github.com/Azure/go-autorest/autorest"
)
// AzureClientSetMock provides a generic mock for AzureClientSet
type AzureClientSetMock struct {
DisksClientMock computeapi.DisksClientAPI
ImagesClientMock computeapi.ImagesClientAPI
VirtualMachineImagesClientMock VirtualMachineImagesClientAPI
VirtualMachinesClientMock computeapi.VirtualMachinesClientAPI
PollClientMock autorest.Client
MetadataClientMock MetadataClientAPI
}
// DisksClient returns a DisksClientAPI
func (m *AzureClientSetMock) DisksClient() computeapi.DisksClientAPI {
return m.DisksClientMock
}
// ImagesClient returns a ImagesClientAPI
func (m *AzureClientSetMock) ImagesClient() computeapi.ImagesClientAPI {
return m.ImagesClientMock
}
// VirtualMachineImagesClient returns a VirtualMachineImagesClientAPI
func (m *AzureClientSetMock) VirtualMachineImagesClient() VirtualMachineImagesClientAPI {
return m.VirtualMachineImagesClientMock
}
// VirtualMachinesClient returns a VirtualMachinesClientAPI
func (m *AzureClientSetMock) VirtualMachinesClient() computeapi.VirtualMachinesClientAPI {
return m.VirtualMachinesClientMock
}
// PollClient returns an autorest Client that can be used for polling async requests
func (m *AzureClientSetMock) PollClient() autorest.Client {
return m.PollClientMock
}
// MetadataClient returns a MetadataClientAPI
func (m *AzureClientSetMock) MetadataClient() MetadataClientAPI {
return m.MetadataClientMock
}

View File

@ -4,7 +4,6 @@ package client
import (
"fmt"
"github.com/hashicorp/packer/builder/azure/common"
"os"
"strings"
"time"
@ -198,60 +197,64 @@ func (c Config) UseMSI() bool {
c.TenantID == ""
}
func (c Config) GetServicePrincipalTokens(
say func(string)) (
func (c Config) GetServicePrincipalTokens(say func(string)) (
servicePrincipalToken *adal.ServicePrincipalToken,
servicePrincipalTokenVault *adal.ServicePrincipalToken,
err error) {
tenantID := c.TenantID
servicePrincipalToken, err = c.GetServicePrincipalToken(say,
c.CloudEnvironment.ResourceManagerEndpoint)
if err != nil {
return nil, nil, err
}
servicePrincipalTokenVault, err = c.GetServicePrincipalToken(say,
strings.TrimRight(c.CloudEnvironment.KeyVaultEndpoint, "/"))
if err != nil {
return nil, nil, err
}
return servicePrincipalToken, servicePrincipalTokenVault, nil
}
func (c Config) GetServicePrincipalToken(
say func(string), forResource string) (
servicePrincipalToken *adal.ServicePrincipalToken,
err error) {
var auth oAuthTokenProvider
switch c.authType {
case authTypeDeviceLogin:
say("Getting tokens using device flow")
auth = NewDeviceFlowOAuthTokenProvider(*c.CloudEnvironment, say, tenantID)
auth = NewDeviceFlowOAuthTokenProvider(*c.CloudEnvironment, say, c.TenantID)
case authTypeMSI:
say("Getting tokens using Managed Identity for Azure")
auth = NewMSIOAuthTokenProvider(*c.CloudEnvironment)
case authTypeClientSecret:
say("Getting tokens using client secret")
auth = NewSecretOAuthTokenProvider(*c.CloudEnvironment, c.ClientID, c.ClientSecret, tenantID)
auth = NewSecretOAuthTokenProvider(*c.CloudEnvironment, c.ClientID, c.ClientSecret, c.TenantID)
case authTypeClientCert:
say("Getting tokens using client certificate")
auth, err = NewCertOAuthTokenProvider(*c.CloudEnvironment, c.ClientID, c.ClientCertPath, tenantID)
auth, err = NewCertOAuthTokenProvider(*c.CloudEnvironment, c.ClientID, c.ClientCertPath, c.TenantID)
if err != nil {
return nil, nil, err
return nil, err
}
case authTypeClientBearerJWT:
say("Getting tokens using client bearer JWT")
auth = NewJWTOAuthTokenProvider(*c.CloudEnvironment, c.ClientID, c.ClientJWT, tenantID)
auth = NewJWTOAuthTokenProvider(*c.CloudEnvironment, c.ClientID, c.ClientJWT, c.TenantID)
default:
panic("authType not set, call FillParameters, or set explicitly")
}
servicePrincipalToken, err = auth.getServicePrincipalToken()
servicePrincipalToken, err = auth.getServicePrincipalTokenWithResource(forResource)
if err != nil {
return nil, nil, err
return nil, err
}
err = servicePrincipalToken.EnsureFresh()
if err != nil {
return nil, nil, err
return nil, err
}
servicePrincipalTokenVault, err = auth.getServicePrincipalTokenWithResource(
strings.TrimRight(c.CloudEnvironment.KeyVaultEndpoint, "/"))
if err != nil {
return nil, nil, err
}
err = servicePrincipalTokenVault.EnsureFresh()
if err != nil {
return nil, nil, err
}
return servicePrincipalToken, servicePrincipalTokenVault, nil
return servicePrincipalToken, nil
}
// FillParameters capture the user intent from the supplied parameter set in authType, retrieves the TenantID and CloudEnvironment if not specified.
@ -299,4 +302,4 @@ func (c *Config) FillParameters() error {
}
// allow override for unit tests
var findTenantID = common.FindTenantID
var findTenantID = FindTenantID

View File

@ -0,0 +1,8 @@
// +build !linux
package client
// IsAzure returns true if Packer is running on Azure (currently only works on Linux)
func IsAzure() bool {
return false
}

View File

@ -0,0 +1,23 @@
package client
import (
"bytes"
"io/ioutil"
)
var (
smbiosAssetTagFile = "/sys/class/dmi/id/chassis_asset_tag"
azureAssetTag = []byte("7783-7084-3265-9085-8269-3286-77\n")
)
// IsAzure returns true if Packer is running on Azure
func IsAzure() bool {
return isAzureAssetTag(smbiosAssetTagFile)
}
func isAzureAssetTag(filename string) bool {
if d, err := ioutil.ReadFile(filename); err == nil {
return bytes.Compare(d, azureAssetTag) == 0
}
return false
}

View File

@ -0,0 +1,29 @@
package client
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestIsAzure(t *testing.T) {
f, err := ioutil.TempFile("", "TestIsAzure*")
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
f.Seek(0, 0)
f.Truncate(0)
f.Write([]byte("not the azure assettag"))
assert.False(t, isAzureAssetTag(f.Name()), "asset tag is not Azure's")
f.Seek(0, 0)
f.Truncate(0)
f.Write(azureAssetTag)
assert.True(t, isAzureAssetTag(f.Name()), "asset tag is Azure's")
}

View File

@ -1,4 +1,4 @@
package common
package client
import (
"context"

View File

@ -0,0 +1,81 @@
package client
import (
"fmt"
"net/http"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
)
// DefaultMetadataClient is the default instance metadata client for Azure. Replace this variable for testing purposes only
var DefaultMetadataClient = NewMetadataClient()
// MetadataClient holds methods that Packer uses to get information about the current VM
type MetadataClientAPI interface {
GetComputeInfo() (*ComputeInfo, error)
}
type ComputeInfo struct {
Name string
ResourceGroupName string
SubscriptionID string
Location string
}
// metadataClient implements MetadataClient
type metadataClient struct {
autorest.Sender
}
var _ MetadataClientAPI = metadataClient{}
const imdsURL = "http://169.254.169.254/metadata/instance?api-version=2017-08-01"
// VMResourceID returns the resource ID of the current VM
func (client metadataClient) GetComputeInfo() (*ComputeInfo, error) {
req, err := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithHeader("Metadata", "true"),
autorest.WithBaseURL(imdsURL),
).Prepare((&http.Request{}))
if err != nil {
return nil, err
}
res, err := autorest.SendWithSender(client, req,
autorest.DoRetryForDuration(1*time.Minute, 5*time.Second))
if err != nil {
return nil, err
}
var vminfo struct {
ComputeInfo `json:"compute"`
}
err = autorest.Respond(
res,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&vminfo),
autorest.ByClosing())
if err != nil {
return nil, err
}
return &vminfo.ComputeInfo, nil
}
func (ci ComputeInfo) ResourceID() string {
return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s",
ci.SubscriptionID,
ci.ResourceGroupName,
ci.Name,
)
}
// NewMetadataClient creates a new instance metadata client
func NewMetadataClient() MetadataClientAPI {
return metadataClient{
Sender: autorest.CreateSender(),
}
}

View File

@ -0,0 +1,32 @@
package client
import (
"fmt"
"testing"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/stretchr/testify/assert"
)
func Test_MetadataReturnsComputeInfo(t *testing.T) {
if !IsAzure() {
t.Skipf("Not running on Azure, skipping live IMDS test")
}
mdc := NewMetadataClient()
info, err := mdc.GetComputeInfo()
assert.Nil(t, err)
vm, err := azure.ParseResourceID(fmt.Sprintf(
"/subscriptions/%s"+
"/resourceGroups/%s"+
"/providers/Microsoft.Compute"+
"/virtualMachines/%s",
info.SubscriptionID,
info.ResourceGroupName,
info.Name))
assert.Nil(t, err, "%q is not parsable as an Azure resource info", info)
assert.Regexp(t, "^[0-9a-fA-F-]{36}$", vm.SubscriptionID)
t.Logf("VM: %+v", vm)
}

View File

@ -0,0 +1,57 @@
package client
import (
"context"
"fmt"
"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute"
"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/computeapi"
"github.com/Azure/go-autorest/autorest/to"
"regexp"
"strings"
)
var platformImageRegex = regexp.MustCompile(`^[-_.a-zA-Z0-9]+:[-_.a-zA-Z0-9]+:[-_.a-zA-Z0-9]+:[-_.a-zA-Z0-9]+$`)
type VirtualMachineImagesClientAPI interface {
computeapi.VirtualMachineImagesClientAPI
// extensions
GetLatest(ctx context.Context, publisher, offer, sku, location string) (*compute.VirtualMachineImageResource, error)
}
var _ VirtualMachineImagesClientAPI = virtualMachineImagesClientAPI{}
type virtualMachineImagesClientAPI struct {
computeapi.VirtualMachineImagesClientAPI
}
func ParsePlatformImageURN(urn string) (image *PlatformImage, err error) {
if !platformImageRegex.Match([]byte(urn)) {
return nil, fmt.Errorf("%q is not a valid platform image specifier", urn)
}
parts := strings.Split(urn, ":")
return &PlatformImage{parts[0], parts[1], parts[2], parts[3]}, nil
}
func (c virtualMachineImagesClientAPI) GetLatest(ctx context.Context, publisher, offer, sku, location string) (*compute.VirtualMachineImageResource, error) {
result, err := c.List(ctx, location, publisher, offer, sku, "", to.Int32Ptr(1), "name desc")
if err != nil {
return nil, err
}
if result.Value == nil || len(*result.Value) == 0 {
return nil, fmt.Errorf("%s:%s:%s:latest could not be found in location %s", publisher, offer, sku, location)
}
return &(*result.Value)[0], nil
}
type PlatformImage struct {
Publisher, Offer, Sku, Version string
}
func (pi PlatformImage) URN() string {
return fmt.Sprintf("%s:%s:%s:%s",
pi.Publisher,
pi.Offer,
pi.Sku,
pi.Version)
}

View File

@ -0,0 +1,30 @@
package client
import (
"fmt"
"testing"
)
func Test_platformImageRegex(t *testing.T) {
for i, v := range []string{
"Publisher:Offer:Sku:Versions",
"Publisher:Offer-name:2.0_alpha:2.0.2019060122",
} {
t.Run(fmt.Sprintf("should_match_%d", i), func(t *testing.T) {
if !platformImageRegex.Match([]byte(v)) {
t.Fatalf("expected %q to match", v)
}
})
}
for i, v := range []string{
"Publ isher:Offer:Sku:Versions",
"Publ/isher:Offer-name:2.0_alpha:2.0.2019060122",
} {
t.Run(fmt.Sprintf("should_not_match_%d", i), func(t *testing.T) {
if platformImageRegex.Match([]byte(v)) {
t.Fatalf("did not expected %q to match", v)
}
})
}
}

View File

@ -0,0 +1,30 @@
package client
import (
"errors"
"net/http"
"os"
"testing"
"github.com/Azure/go-autorest/autorest/azure/auth"
)
func GetTestClientSet(t *testing.T) (AzureClientSet, error) {
if os.Getenv("AZURE_INTEGRATION_TEST") == "" {
t.Skip("AZURE_INTEGRATION_TEST not set")
} else {
a, err := auth.NewAuthorizerFromEnvironment()
if err == nil {
cli := azureClientSet{}
cli.authorizer = a
cli.subscriptionID = os.Getenv("AZURE_SUBSCRIPTION_ID")
cli.PollingDelay = 0
cli.sender = http.DefaultClient
return cli, nil
} else {
t.Skipf("Could not create Azure client: %v", err)
}
}
return nil, errors.New("Couldn't create client set")
}

View File

@ -6,7 +6,6 @@ import (
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
packerAzureCommon "github.com/hashicorp/packer/builder/azure/common"
)
func NewDeviceFlowOAuthTokenProvider(env azure.Environment, say func(string), tenantID string) oAuthTokenProvider {
@ -36,5 +35,5 @@ func (tp *deviceflowOauthTokenProvider) getServicePrincipalTokenWithResource(res
tp.say(fmt.Sprintf("Getting token for %s", resource))
}
return packerAzureCommon.Authenticate(tp.env, tp.tenantID, tp.say, resource)
return Authenticate(tp.env, tp.tenantID, tp.say, resource)
}

View File

@ -20,6 +20,7 @@ import (
amazonebsvolumebuilder "github.com/hashicorp/packer/builder/amazon/ebsvolume"
amazoninstancebuilder "github.com/hashicorp/packer/builder/amazon/instance"
azurearmbuilder "github.com/hashicorp/packer/builder/azure/arm"
azurechrootbuilder "github.com/hashicorp/packer/builder/azure/chroot"
cloudstackbuilder "github.com/hashicorp/packer/builder/cloudstack"
digitaloceanbuilder "github.com/hashicorp/packer/builder/digitalocean"
dockerbuilder "github.com/hashicorp/packer/builder/docker"
@ -109,6 +110,7 @@ var Builders = map[string]packer.Builder{
"amazon-ebsvolume": new(amazonebsvolumebuilder.Builder),
"amazon-instance": new(amazoninstancebuilder.Builder),
"azure-arm": new(azurearmbuilder.Builder),
"azure-chroot": new(azurechrootbuilder.Builder),
"cloudstack": new(cloudstackbuilder.Builder),
"digitalocean": new(digitaloceanbuilder.Builder),
"docker": new(dockerbuilder.Builder),

View File

@ -0,0 +1,27 @@
{
"variables": {
"client_id": "{{env `ARM_CLIENT_ID`}}",
"client_secret": "{{env `ARM_CLIENT_SECRET`}}",
"subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}",
"resource_group": "{{env `ARM_IMAGE_RESOURCEGROUP_ID`}}"
},
"builders": [{
"type": "azure-chroot",
"client_id": "{{user `client_id`}}",
"client_secret": "{{user `client_secret`}}",
"subscription_id": "{{user `subscription_id`}}",
"image_resource_id": "/subscriptions/{{user `subscription_id`}}/resourceGroups/{{user `resource_group`}}/providers/Microsoft.Compute/images/MyDebianOSImage-{{timestamp}}",
"source": "credativ:Debian:9:latest"
}],
"provisioners": [{
"inline": [
"apt-get update",
"apt-get upgrade -y"
],
"inline_shebang": "/bin/sh -x",
"type": "shell"
}]
}

1
go.mod
View File

@ -34,6 +34,7 @@ require (
github.com/digitalocean/go-libvirt v0.0.0-20190626172931-4d226dd6c437 // indirect
github.com/digitalocean/go-qemu v0.0.0-20181112162955-dd7bb9c771b8
github.com/digitalocean/godo v1.11.1
github.com/dimchansky/utfbom v1.1.0 // indirect
github.com/dnaeon/go-vcr v1.0.0 // indirect
github.com/docker/docker v0.0.0-20180422163414-57142e89befe // indirect
github.com/dustin/go-humanize v1.0.0 // indirect

2
go.sum
View File

@ -93,6 +93,8 @@ github.com/digitalocean/go-qemu v0.0.0-20181112162955-dd7bb9c771b8 h1:N7nH2py78L
github.com/digitalocean/go-qemu v0.0.0-20181112162955-dd7bb9c771b8/go.mod h1:/YnlngP1PARC0SKAZx6kaAEMOp8bNTQGqS+Ka3MctNI=
github.com/digitalocean/godo v1.11.1 h1:OsTh37YFKk+g6DnAOrkXJ9oDArTkRx5UTkBJ2EWAO38=
github.com/digitalocean/godo v1.11.1/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU=
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/dnaeon/go-vcr v1.0.0 h1:1QZ+ahihvRvppcJnFvuoHAdnZTf1PqKjO4Ftr1cfQTo=
github.com/dnaeon/go-vcr v1.0.0/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/docker v0.0.0-20180422163414-57142e89befe h1:VW8TnWi0CZgg7oCv0wH6evNwkzcJg/emnw4HrVIWws4=

View File

@ -0,0 +1,46 @@
// +build go1.9
// Copyright 2019 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This code was auto-generated by:
// github.com/Azure/azure-sdk-for-go/tools/profileBuilder
package computeapi
import original "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/computeapi"
type AvailabilitySetsClientAPI = original.AvailabilitySetsClientAPI
type ContainerServicesClientAPI = original.ContainerServicesClientAPI
type DisksClientAPI = original.DisksClientAPI
type GalleriesClientAPI = original.GalleriesClientAPI
type GalleryImageVersionsClientAPI = original.GalleryImageVersionsClientAPI
type GalleryImagesClientAPI = original.GalleryImagesClientAPI
type ImagesClientAPI = original.ImagesClientAPI
type LogAnalyticsClientAPI = original.LogAnalyticsClientAPI
type OperationsClientAPI = original.OperationsClientAPI
type ProximityPlacementGroupsClientAPI = original.ProximityPlacementGroupsClientAPI
type ResourceSkusClientAPI = original.ResourceSkusClientAPI
type SnapshotsClientAPI = original.SnapshotsClientAPI
type UsageClientAPI = original.UsageClientAPI
type VirtualMachineExtensionImagesClientAPI = original.VirtualMachineExtensionImagesClientAPI
type VirtualMachineExtensionsClientAPI = original.VirtualMachineExtensionsClientAPI
type VirtualMachineImagesClientAPI = original.VirtualMachineImagesClientAPI
type VirtualMachineRunCommandsClientAPI = original.VirtualMachineRunCommandsClientAPI
type VirtualMachineScaleSetExtensionsClientAPI = original.VirtualMachineScaleSetExtensionsClientAPI
type VirtualMachineScaleSetRollingUpgradesClientAPI = original.VirtualMachineScaleSetRollingUpgradesClientAPI
type VirtualMachineScaleSetVMsClientAPI = original.VirtualMachineScaleSetVMsClientAPI
type VirtualMachineScaleSetsClientAPI = original.VirtualMachineScaleSetsClientAPI
type VirtualMachineSizesClientAPI = original.VirtualMachineSizesClientAPI
type VirtualMachinesClientAPI = original.VirtualMachinesClientAPI

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,299 @@
package computeapi
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/go-autorest/autorest"
)
// OperationsClientAPI contains the set of methods on the OperationsClient type.
type OperationsClientAPI interface {
List(ctx context.Context) (result compute.OperationListResult, err error)
}
var _ OperationsClientAPI = (*compute.OperationsClient)(nil)
// AvailabilitySetsClientAPI contains the set of methods on the AvailabilitySetsClient type.
type AvailabilitySetsClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters compute.AvailabilitySet) (result compute.AvailabilitySet, err error)
Delete(ctx context.Context, resourceGroupName string, availabilitySetName string) (result autorest.Response, err error)
Get(ctx context.Context, resourceGroupName string, availabilitySetName string) (result compute.AvailabilitySet, err error)
List(ctx context.Context, resourceGroupName string) (result compute.AvailabilitySetListResultPage, err error)
ListAvailableSizes(ctx context.Context, resourceGroupName string, availabilitySetName string) (result compute.VirtualMachineSizeListResult, err error)
ListBySubscription(ctx context.Context) (result compute.AvailabilitySetListResultPage, err error)
Update(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters compute.AvailabilitySetUpdate) (result compute.AvailabilitySet, err error)
}
var _ AvailabilitySetsClientAPI = (*compute.AvailabilitySetsClient)(nil)
// ProximityPlacementGroupsClientAPI contains the set of methods on the ProximityPlacementGroupsClient type.
type ProximityPlacementGroupsClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters compute.ProximityPlacementGroup) (result compute.ProximityPlacementGroup, err error)
Delete(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string) (result autorest.Response, err error)
Get(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string) (result compute.ProximityPlacementGroup, err error)
ListByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.ProximityPlacementGroupListResultPage, err error)
ListBySubscription(ctx context.Context) (result compute.ProximityPlacementGroupListResultPage, err error)
Update(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters compute.ProximityPlacementGroupUpdate) (result compute.ProximityPlacementGroup, err error)
}
var _ ProximityPlacementGroupsClientAPI = (*compute.ProximityPlacementGroupsClient)(nil)
// VirtualMachineExtensionImagesClientAPI contains the set of methods on the VirtualMachineExtensionImagesClient type.
type VirtualMachineExtensionImagesClientAPI interface {
Get(ctx context.Context, location string, publisherName string, typeParameter string, version string) (result compute.VirtualMachineExtensionImage, err error)
ListTypes(ctx context.Context, location string, publisherName string) (result compute.ListVirtualMachineExtensionImage, err error)
ListVersions(ctx context.Context, location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (result compute.ListVirtualMachineExtensionImage, err error)
}
var _ VirtualMachineExtensionImagesClientAPI = (*compute.VirtualMachineExtensionImagesClient)(nil)
// VirtualMachineExtensionsClientAPI contains the set of methods on the VirtualMachineExtensionsClient type.
type VirtualMachineExtensionsClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, extensionParameters compute.VirtualMachineExtension) (result compute.VirtualMachineExtensionsCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string) (result compute.VirtualMachineExtensionsDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, expand string) (result compute.VirtualMachineExtension, err error)
List(ctx context.Context, resourceGroupName string, VMName string, expand string) (result compute.VirtualMachineExtensionsListResult, err error)
Update(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, extensionParameters compute.VirtualMachineExtensionUpdate) (result compute.VirtualMachineExtensionsUpdateFuture, err error)
}
var _ VirtualMachineExtensionsClientAPI = (*compute.VirtualMachineExtensionsClient)(nil)
// VirtualMachineImagesClientAPI contains the set of methods on the VirtualMachineImagesClient type.
type VirtualMachineImagesClientAPI interface {
Get(ctx context.Context, location string, publisherName string, offer string, skus string, version string) (result compute.VirtualMachineImage, err error)
List(ctx context.Context, location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (result compute.ListVirtualMachineImageResource, err error)
ListOffers(ctx context.Context, location string, publisherName string) (result compute.ListVirtualMachineImageResource, err error)
ListPublishers(ctx context.Context, location string) (result compute.ListVirtualMachineImageResource, err error)
ListSkus(ctx context.Context, location string, publisherName string, offer string) (result compute.ListVirtualMachineImageResource, err error)
}
var _ VirtualMachineImagesClientAPI = (*compute.VirtualMachineImagesClient)(nil)
// UsageClientAPI contains the set of methods on the UsageClient type.
type UsageClientAPI interface {
List(ctx context.Context, location string) (result compute.ListUsagesResultPage, err error)
}
var _ UsageClientAPI = (*compute.UsageClient)(nil)
// VirtualMachinesClientAPI contains the set of methods on the VirtualMachinesClient type.
type VirtualMachinesClientAPI interface {
Capture(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineCaptureParameters) (result compute.VirtualMachinesCaptureFuture, err error)
ConvertToManagedDisks(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesConvertToManagedDisksFuture, err error)
CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (result compute.VirtualMachinesCreateOrUpdateFuture, err error)
Deallocate(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesDeallocateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesDeleteFuture, err error)
Generalize(ctx context.Context, resourceGroupName string, VMName string) (result autorest.Response, err error)
Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error)
InstanceView(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachineInstanceView, err error)
List(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultPage, err error)
ListAll(ctx context.Context) (result compute.VirtualMachineListResultPage, err error)
ListAvailableSizes(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachineSizeListResult, err error)
ListByLocation(ctx context.Context, location string) (result compute.VirtualMachineListResultPage, err error)
PerformMaintenance(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesPerformMaintenanceFuture, err error)
PowerOff(ctx context.Context, resourceGroupName string, VMName string, skipShutdown *bool) (result compute.VirtualMachinesPowerOffFuture, err error)
Redeploy(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesRedeployFuture, err error)
Reimage(ctx context.Context, resourceGroupName string, VMName string, parameters *compute.VirtualMachineReimageParameters) (result compute.VirtualMachinesReimageFuture, err error)
Restart(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesRestartFuture, err error)
RunCommand(ctx context.Context, resourceGroupName string, VMName string, parameters compute.RunCommandInput) (result compute.VirtualMachinesRunCommandFuture, err error)
Start(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesStartFuture, err error)
Update(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate) (result compute.VirtualMachinesUpdateFuture, err error)
}
var _ VirtualMachinesClientAPI = (*compute.VirtualMachinesClient)(nil)
// VirtualMachineSizesClientAPI contains the set of methods on the VirtualMachineSizesClient type.
type VirtualMachineSizesClientAPI interface {
List(ctx context.Context, location string) (result compute.VirtualMachineSizeListResult, err error)
}
var _ VirtualMachineSizesClientAPI = (*compute.VirtualMachineSizesClient)(nil)
// ImagesClientAPI contains the set of methods on the ImagesClient type.
type ImagesClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, imageName string, parameters compute.Image) (result compute.ImagesCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, imageName string) (result compute.ImagesDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, imageName string, expand string) (result compute.Image, err error)
List(ctx context.Context) (result compute.ImageListResultPage, err error)
ListByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.ImageListResultPage, err error)
Update(ctx context.Context, resourceGroupName string, imageName string, parameters compute.ImageUpdate) (result compute.ImagesUpdateFuture, err error)
}
var _ ImagesClientAPI = (*compute.ImagesClient)(nil)
// VirtualMachineScaleSetsClientAPI contains the set of methods on the VirtualMachineScaleSetsClient type.
type VirtualMachineScaleSetsClientAPI interface {
ConvertToSinglePlacementGroup(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VMScaleSetConvertToSinglePlacementGroupInput) (result autorest.Response, err error)
CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) (result compute.VirtualMachineScaleSetsCreateOrUpdateFuture, err error)
Deallocate(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs) (result compute.VirtualMachineScaleSetsDeallocateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetsDeleteFuture, err error)
DeleteInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (result compute.VirtualMachineScaleSetsDeleteInstancesFuture, err error)
ForceRecoveryServiceFabricPlatformUpdateDomainWalk(ctx context.Context, resourceGroupName string, VMScaleSetName string, platformUpdateDomain int32) (result compute.RecoveryWalkResponse, err error)
Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error)
GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetInstanceView, err error)
GetOSUpgradeHistory(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetListOSUpgradeHistoryPage, err error)
List(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineScaleSetListResultPage, err error)
ListAll(ctx context.Context) (result compute.VirtualMachineScaleSetListWithLinkResultPage, err error)
ListSkus(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetListSkusResultPage, err error)
PerformMaintenance(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs) (result compute.VirtualMachineScaleSetsPerformMaintenanceFuture, err error)
PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs, skipShutdown *bool) (result compute.VirtualMachineScaleSetsPowerOffFuture, err error)
Redeploy(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs) (result compute.VirtualMachineScaleSetsRedeployFuture, err error)
Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMScaleSetReimageInput *compute.VirtualMachineScaleSetReimageParameters) (result compute.VirtualMachineScaleSetsReimageFuture, err error)
ReimageAll(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs) (result compute.VirtualMachineScaleSetsReimageAllFuture, err error)
Restart(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs) (result compute.VirtualMachineScaleSetsRestartFuture, err error)
Start(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs) (result compute.VirtualMachineScaleSetsStartFuture, err error)
Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSetUpdate) (result compute.VirtualMachineScaleSetsUpdateFuture, err error)
UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (result compute.VirtualMachineScaleSetsUpdateInstancesFuture, err error)
}
var _ VirtualMachineScaleSetsClientAPI = (*compute.VirtualMachineScaleSetsClient)(nil)
// VirtualMachineScaleSetExtensionsClientAPI contains the set of methods on the VirtualMachineScaleSetExtensionsClient type.
type VirtualMachineScaleSetExtensionsClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, extensionParameters compute.VirtualMachineScaleSetExtension) (result compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string) (result compute.VirtualMachineScaleSetExtensionsDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, expand string) (result compute.VirtualMachineScaleSetExtension, err error)
List(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetExtensionListResultPage, err error)
}
var _ VirtualMachineScaleSetExtensionsClientAPI = (*compute.VirtualMachineScaleSetExtensionsClient)(nil)
// VirtualMachineScaleSetRollingUpgradesClientAPI contains the set of methods on the VirtualMachineScaleSetRollingUpgradesClient type.
type VirtualMachineScaleSetRollingUpgradesClientAPI interface {
Cancel(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetRollingUpgradesCancelFuture, err error)
GetLatest(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.RollingUpgradeStatusInfo, err error)
StartExtensionUpgrade(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture, err error)
StartOSUpgrade(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture, err error)
}
var _ VirtualMachineScaleSetRollingUpgradesClientAPI = (*compute.VirtualMachineScaleSetRollingUpgradesClient)(nil)
// VirtualMachineScaleSetVMsClientAPI contains the set of methods on the VirtualMachineScaleSetVMsClient type.
type VirtualMachineScaleSetVMsClientAPI interface {
Deallocate(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsDeallocateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error)
GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error)
List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResultPage, err error)
PerformMaintenance(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture, err error)
PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, skipShutdown *bool) (result compute.VirtualMachineScaleSetVMsPowerOffFuture, err error)
Redeploy(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsRedeployFuture, err error)
Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMScaleSetVMReimageInput *compute.VirtualMachineScaleSetVMReimageParameters) (result compute.VirtualMachineScaleSetVMsReimageFuture, err error)
ReimageAll(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsReimageAllFuture, err error)
Restart(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsRestartFuture, err error)
RunCommand(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.RunCommandInput) (result compute.VirtualMachineScaleSetVMsRunCommandFuture, err error)
Start(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsStartFuture, err error)
Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) (result compute.VirtualMachineScaleSetVMsUpdateFuture, err error)
}
var _ VirtualMachineScaleSetVMsClientAPI = (*compute.VirtualMachineScaleSetVMsClient)(nil)
// LogAnalyticsClientAPI contains the set of methods on the LogAnalyticsClient type.
type LogAnalyticsClientAPI interface {
ExportRequestRateByInterval(ctx context.Context, parameters compute.RequestRateByIntervalInput, location string) (result compute.LogAnalyticsExportRequestRateByIntervalFuture, err error)
ExportThrottledRequests(ctx context.Context, parameters compute.ThrottledRequestsInput, location string) (result compute.LogAnalyticsExportThrottledRequestsFuture, err error)
}
var _ LogAnalyticsClientAPI = (*compute.LogAnalyticsClient)(nil)
// VirtualMachineRunCommandsClientAPI contains the set of methods on the VirtualMachineRunCommandsClient type.
type VirtualMachineRunCommandsClientAPI interface {
Get(ctx context.Context, location string, commandID string) (result compute.RunCommandDocument, err error)
List(ctx context.Context, location string) (result compute.RunCommandListResultPage, err error)
}
var _ VirtualMachineRunCommandsClientAPI = (*compute.VirtualMachineRunCommandsClient)(nil)
// ResourceSkusClientAPI contains the set of methods on the ResourceSkusClient type.
type ResourceSkusClientAPI interface {
List(ctx context.Context) (result compute.ResourceSkusResultPage, err error)
}
var _ ResourceSkusClientAPI = (*compute.ResourceSkusClient)(nil)
// DisksClientAPI contains the set of methods on the DisksClient type.
type DisksClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, disk compute.Disk) (result compute.DisksCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, diskName string) (result compute.DisksDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, err error)
GrantAccess(ctx context.Context, resourceGroupName string, diskName string, grantAccessData compute.GrantAccessData) (result compute.DisksGrantAccessFuture, err error)
List(ctx context.Context) (result compute.DiskListPage, err error)
ListByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.DiskListPage, err error)
RevokeAccess(ctx context.Context, resourceGroupName string, diskName string) (result compute.DisksRevokeAccessFuture, err error)
Update(ctx context.Context, resourceGroupName string, diskName string, disk compute.DiskUpdate) (result compute.DisksUpdateFuture, err error)
}
var _ DisksClientAPI = (*compute.DisksClient)(nil)
// SnapshotsClientAPI contains the set of methods on the SnapshotsClient type.
type SnapshotsClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot compute.Snapshot) (result compute.SnapshotsCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, snapshotName string) (result compute.SnapshotsDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, snapshotName string) (result compute.Snapshot, err error)
GrantAccess(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData compute.GrantAccessData) (result compute.SnapshotsGrantAccessFuture, err error)
List(ctx context.Context) (result compute.SnapshotListPage, err error)
ListByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.SnapshotListPage, err error)
RevokeAccess(ctx context.Context, resourceGroupName string, snapshotName string) (result compute.SnapshotsRevokeAccessFuture, err error)
Update(ctx context.Context, resourceGroupName string, snapshotName string, snapshot compute.SnapshotUpdate) (result compute.SnapshotsUpdateFuture, err error)
}
var _ SnapshotsClientAPI = (*compute.SnapshotsClient)(nil)
// GalleriesClientAPI contains the set of methods on the GalleriesClient type.
type GalleriesClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, gallery compute.Gallery) (result compute.GalleriesCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, galleryName string) (result compute.GalleriesDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, galleryName string) (result compute.Gallery, err error)
List(ctx context.Context) (result compute.GalleryListPage, err error)
ListByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.GalleryListPage, err error)
}
var _ GalleriesClientAPI = (*compute.GalleriesClient)(nil)
// GalleryImagesClientAPI contains the set of methods on the GalleryImagesClient type.
type GalleryImagesClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage compute.GalleryImage) (result compute.GalleryImagesCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result compute.GalleryImagesDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result compute.GalleryImage, err error)
ListByGallery(ctx context.Context, resourceGroupName string, galleryName string) (result compute.GalleryImageListPage, err error)
}
var _ GalleryImagesClientAPI = (*compute.GalleryImagesClient)(nil)
// GalleryImageVersionsClientAPI contains the set of methods on the GalleryImageVersionsClient type.
type GalleryImageVersionsClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion compute.GalleryImageVersion) (result compute.GalleryImageVersionsCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string) (result compute.GalleryImageVersionsDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, expand compute.ReplicationStatusTypes) (result compute.GalleryImageVersion, err error)
ListByGalleryImage(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result compute.GalleryImageVersionListPage, err error)
}
var _ GalleryImageVersionsClientAPI = (*compute.GalleryImageVersionsClient)(nil)
// ContainerServicesClientAPI contains the set of methods on the ContainerServicesClient type.
type ContainerServicesClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, containerServiceName string, parameters compute.ContainerService) (result compute.ContainerServicesCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, containerServiceName string) (result compute.ContainerServicesDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, containerServiceName string) (result compute.ContainerService, err error)
List(ctx context.Context) (result compute.ContainerServiceListResultPage, err error)
ListByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.ContainerServiceListResultPage, err error)
}
var _ ContainerServicesClientAPI = (*compute.ContainerServicesClient)(nil)

View File

@ -0,0 +1,712 @@
package auth
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"crypto/rsa"
"crypto/x509"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"unicode/utf16"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/azure/cli"
"github.com/dimchansky/utfbom"
"golang.org/x/crypto/pkcs12"
)
// The possible keys in the Values map.
const (
SubscriptionID = "AZURE_SUBSCRIPTION_ID"
TenantID = "AZURE_TENANT_ID"
ClientID = "AZURE_CLIENT_ID"
ClientSecret = "AZURE_CLIENT_SECRET"
CertificatePath = "AZURE_CERTIFICATE_PATH"
CertificatePassword = "AZURE_CERTIFICATE_PASSWORD"
Username = "AZURE_USERNAME"
Password = "AZURE_PASSWORD"
EnvironmentName = "AZURE_ENVIRONMENT"
Resource = "AZURE_AD_RESOURCE"
ActiveDirectoryEndpoint = "ActiveDirectoryEndpoint"
ResourceManagerEndpoint = "ResourceManagerEndpoint"
GraphResourceID = "GraphResourceID"
SQLManagementEndpoint = "SQLManagementEndpoint"
GalleryEndpoint = "GalleryEndpoint"
ManagementEndpoint = "ManagementEndpoint"
)
// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order:
// 1. Client credentials
// 2. Client certificate
// 3. Username password
// 4. MSI
func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) {
settings, err := GetSettingsFromEnvironment()
if err != nil {
return nil, err
}
return settings.GetAuthorizer()
}
// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order:
// 1. Client credentials
// 2. Client certificate
// 3. Username password
// 4. MSI
func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) {
settings, err := GetSettingsFromEnvironment()
if err != nil {
return nil, err
}
settings.Values[Resource] = resource
return settings.GetAuthorizer()
}
// EnvironmentSettings contains the available authentication settings.
type EnvironmentSettings struct {
Values map[string]string
Environment azure.Environment
}
// GetSettingsFromEnvironment returns the available authentication settings from the environment.
func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) {
s = EnvironmentSettings{
Values: map[string]string{},
}
s.setValue(SubscriptionID)
s.setValue(TenantID)
s.setValue(ClientID)
s.setValue(ClientSecret)
s.setValue(CertificatePath)
s.setValue(CertificatePassword)
s.setValue(Username)
s.setValue(Password)
s.setValue(EnvironmentName)
s.setValue(Resource)
if v := s.Values[EnvironmentName]; v == "" {
s.Environment = azure.PublicCloud
} else {
s.Environment, err = azure.EnvironmentFromName(v)
}
if s.Values[Resource] == "" {
s.Values[Resource] = s.Environment.ResourceManagerEndpoint
}
return
}
// GetSubscriptionID returns the available subscription ID or an empty string.
func (settings EnvironmentSettings) GetSubscriptionID() string {
return settings.Values[SubscriptionID]
}
// adds the specified environment variable value to the Values map if it exists
func (settings EnvironmentSettings) setValue(key string) {
if v := os.Getenv(key); v != "" {
settings.Values[key] = v
}
}
// helper to return client and tenant IDs
func (settings EnvironmentSettings) getClientAndTenant() (string, string) {
clientID := settings.Values[ClientID]
tenantID := settings.Values[TenantID]
return clientID, tenantID
}
// GetClientCredentials creates a config object from the available client credentials.
// An error is returned if no client credentials are available.
func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsConfig, error) {
secret := settings.Values[ClientSecret]
if secret == "" {
return ClientCredentialsConfig{}, errors.New("missing client secret")
}
clientID, tenantID := settings.getClientAndTenant()
config := NewClientCredentialsConfig(clientID, secret, tenantID)
config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
config.Resource = settings.Values[Resource]
return config, nil
}
// GetClientCertificate creates a config object from the available certificate credentials.
// An error is returned if no certificate credentials are available.
func (settings EnvironmentSettings) GetClientCertificate() (ClientCertificateConfig, error) {
certPath := settings.Values[CertificatePath]
if certPath == "" {
return ClientCertificateConfig{}, errors.New("missing certificate path")
}
certPwd := settings.Values[CertificatePassword]
clientID, tenantID := settings.getClientAndTenant()
config := NewClientCertificateConfig(certPath, certPwd, clientID, tenantID)
config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
config.Resource = settings.Values[Resource]
return config, nil
}
// GetUsernamePassword creates a config object from the available username/password credentials.
// An error is returned if no username/password credentials are available.
func (settings EnvironmentSettings) GetUsernamePassword() (UsernamePasswordConfig, error) {
username := settings.Values[Username]
password := settings.Values[Password]
if username == "" || password == "" {
return UsernamePasswordConfig{}, errors.New("missing username/password")
}
clientID, tenantID := settings.getClientAndTenant()
config := NewUsernamePasswordConfig(username, password, clientID, tenantID)
config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
config.Resource = settings.Values[Resource]
return config, nil
}
// GetMSI creates a MSI config object from the available client ID.
func (settings EnvironmentSettings) GetMSI() MSIConfig {
config := NewMSIConfig()
config.Resource = settings.Values[Resource]
config.ClientID = settings.Values[ClientID]
return config
}
// GetDeviceFlow creates a device-flow config object from the available client and tenant IDs.
func (settings EnvironmentSettings) GetDeviceFlow() DeviceFlowConfig {
clientID, tenantID := settings.getClientAndTenant()
config := NewDeviceFlowConfig(clientID, tenantID)
config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
config.Resource = settings.Values[Resource]
return config
}
// GetAuthorizer creates an Authorizer configured from environment variables in the order:
// 1. Client credentials
// 2. Client certificate
// 3. Username password
// 4. MSI
func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) {
//1.Client Credentials
if c, e := settings.GetClientCredentials(); e == nil {
return c.Authorizer()
}
//2. Client Certificate
if c, e := settings.GetClientCertificate(); e == nil {
return c.Authorizer()
}
//3. Username Password
if c, e := settings.GetUsernamePassword(); e == nil {
return c.Authorizer()
}
// 4. MSI
return settings.GetMSI().Authorizer()
}
// NewAuthorizerFromFile creates an Authorizer configured from a configuration file in the following order.
// 1. Client credentials
// 2. Client certificate
func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) {
settings, err := GetSettingsFromFile()
if err != nil {
return nil, err
}
if a, err := settings.ClientCredentialsAuthorizer(baseURI); err == nil {
return a, err
}
if a, err := settings.ClientCertificateAuthorizer(baseURI); err == nil {
return a, err
}
return nil, errors.New("auth file missing client and certificate credentials")
}
// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file in the following order.
// 1. Client credentials
// 2. Client certificate
func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) {
s, err := GetSettingsFromFile()
if err != nil {
return nil, err
}
if a, err := s.ClientCredentialsAuthorizerWithResource(resource); err == nil {
return a, err
}
if a, err := s.ClientCertificateAuthorizerWithResource(resource); err == nil {
return a, err
}
return nil, errors.New("auth file missing client and certificate credentials")
}
// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
func NewAuthorizerFromCLI() (autorest.Authorizer, error) {
settings, err := GetSettingsFromEnvironment()
if err != nil {
return nil, err
}
if settings.Values[Resource] == "" {
settings.Values[Resource] = settings.Environment.ResourceManagerEndpoint
}
return NewAuthorizerFromCLIWithResource(settings.Values[Resource])
}
// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) {
token, err := cli.GetTokenFromCLI(resource)
if err != nil {
return nil, err
}
adalToken, err := token.ToADALToken()
if err != nil {
return nil, err
}
return autorest.NewBearerAuthorizer(&adalToken), nil
}
// GetSettingsFromFile returns the available authentication settings from an Azure CLI authentication file.
func GetSettingsFromFile() (FileSettings, error) {
s := FileSettings{}
fileLocation := os.Getenv("AZURE_AUTH_LOCATION")
if fileLocation == "" {
return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set")
}
contents, err := ioutil.ReadFile(fileLocation)
if err != nil {
return s, err
}
// Auth file might be encoded
decoded, err := decode(contents)
if err != nil {
return s, err
}
authFile := map[string]interface{}{}
err = json.Unmarshal(decoded, &authFile)
if err != nil {
return s, err
}
s.Values = map[string]string{}
s.setKeyValue(ClientID, authFile["clientId"])
s.setKeyValue(ClientSecret, authFile["clientSecret"])
s.setKeyValue(CertificatePath, authFile["clientCertificate"])
s.setKeyValue(CertificatePassword, authFile["clientCertificatePassword"])
s.setKeyValue(SubscriptionID, authFile["subscriptionId"])
s.setKeyValue(TenantID, authFile["tenantId"])
s.setKeyValue(ActiveDirectoryEndpoint, authFile["activeDirectoryEndpointUrl"])
s.setKeyValue(ResourceManagerEndpoint, authFile["resourceManagerEndpointUrl"])
s.setKeyValue(GraphResourceID, authFile["activeDirectoryGraphResourceId"])
s.setKeyValue(SQLManagementEndpoint, authFile["sqlManagementEndpointUrl"])
s.setKeyValue(GalleryEndpoint, authFile["galleryEndpointUrl"])
s.setKeyValue(ManagementEndpoint, authFile["managementEndpointUrl"])
return s, nil
}
// FileSettings contains the available authentication settings.
type FileSettings struct {
Values map[string]string
}
// GetSubscriptionID returns the available subscription ID or an empty string.
func (settings FileSettings) GetSubscriptionID() string {
return settings.Values[SubscriptionID]
}
// adds the specified value to the Values map if it isn't nil
func (settings FileSettings) setKeyValue(key string, val interface{}) {
if val != nil {
settings.Values[key] = val.(string)
}
}
// returns the specified AAD endpoint or the public cloud endpoint if unspecified
func (settings FileSettings) getAADEndpoint() string {
if v, ok := settings.Values[ActiveDirectoryEndpoint]; ok {
return v
}
return azure.PublicCloud.ActiveDirectoryEndpoint
}
// ServicePrincipalTokenFromClientCredentials creates a ServicePrincipalToken from the available client credentials.
func (settings FileSettings) ServicePrincipalTokenFromClientCredentials(baseURI string) (*adal.ServicePrincipalToken, error) {
resource, err := settings.getResourceForToken(baseURI)
if err != nil {
return nil, err
}
return settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource)
}
// ClientCredentialsAuthorizer creates an authorizer from the available client credentials.
func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) {
resource, err := settings.getResourceForToken(baseURI)
if err != nil {
return nil, err
}
return settings.ClientCredentialsAuthorizerWithResource(resource)
}
// ServicePrincipalTokenFromClientCredentialsWithResource creates a ServicePrincipalToken
// from the available client credentials and the specified resource.
func (settings FileSettings) ServicePrincipalTokenFromClientCredentialsWithResource(resource string) (*adal.ServicePrincipalToken, error) {
if _, ok := settings.Values[ClientSecret]; !ok {
return nil, errors.New("missing client secret")
}
config, err := adal.NewOAuthConfig(settings.getAADEndpoint(), settings.Values[TenantID])
if err != nil {
return nil, err
}
return adal.NewServicePrincipalToken(*config, settings.Values[ClientID], settings.Values[ClientSecret], resource)
}
func (settings FileSettings) clientCertificateConfigWithResource(resource string) (ClientCertificateConfig, error) {
if _, ok := settings.Values[CertificatePath]; !ok {
return ClientCertificateConfig{}, errors.New("missing certificate path")
}
cfg := NewClientCertificateConfig(settings.Values[CertificatePath], settings.Values[CertificatePassword], settings.Values[ClientID], settings.Values[TenantID])
cfg.AADEndpoint = settings.getAADEndpoint()
cfg.Resource = resource
return cfg, nil
}
// ClientCredentialsAuthorizerWithResource creates an authorizer from the available client credentials and the specified resource.
func (settings FileSettings) ClientCredentialsAuthorizerWithResource(resource string) (autorest.Authorizer, error) {
spToken, err := settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource)
if err != nil {
return nil, err
}
return autorest.NewBearerAuthorizer(spToken), nil
}
// ServicePrincipalTokenFromClientCertificate creates a ServicePrincipalToken from the available certificate credentials.
func (settings FileSettings) ServicePrincipalTokenFromClientCertificate(baseURI string) (*adal.ServicePrincipalToken, error) {
resource, err := settings.getResourceForToken(baseURI)
if err != nil {
return nil, err
}
return settings.ServicePrincipalTokenFromClientCertificateWithResource(resource)
}
// ClientCertificateAuthorizer creates an authorizer from the available certificate credentials.
func (settings FileSettings) ClientCertificateAuthorizer(baseURI string) (autorest.Authorizer, error) {
resource, err := settings.getResourceForToken(baseURI)
if err != nil {
return nil, err
}
return settings.ClientCertificateAuthorizerWithResource(resource)
}
// ServicePrincipalTokenFromClientCertificateWithResource creates a ServicePrincipalToken from the available certificate credentials.
func (settings FileSettings) ServicePrincipalTokenFromClientCertificateWithResource(resource string) (*adal.ServicePrincipalToken, error) {
cfg, err := settings.clientCertificateConfigWithResource(resource)
if err != nil {
return nil, err
}
return cfg.ServicePrincipalToken()
}
// ClientCertificateAuthorizerWithResource creates an authorizer from the available certificate credentials and the specified resource.
func (settings FileSettings) ClientCertificateAuthorizerWithResource(resource string) (autorest.Authorizer, error) {
cfg, err := settings.clientCertificateConfigWithResource(resource)
if err != nil {
return nil, err
}
return cfg.Authorizer()
}
func decode(b []byte) ([]byte, error) {
reader, enc := utfbom.Skip(bytes.NewReader(b))
switch enc {
case utfbom.UTF16LittleEndian:
u16 := make([]uint16, (len(b)/2)-1)
err := binary.Read(reader, binary.LittleEndian, &u16)
if err != nil {
return nil, err
}
return []byte(string(utf16.Decode(u16))), nil
case utfbom.UTF16BigEndian:
u16 := make([]uint16, (len(b)/2)-1)
err := binary.Read(reader, binary.BigEndian, &u16)
if err != nil {
return nil, err
}
return []byte(string(utf16.Decode(u16))), nil
}
return ioutil.ReadAll(reader)
}
func (settings FileSettings) getResourceForToken(baseURI string) (string, error) {
// Compare dafault base URI from the SDK to the endpoints from the public cloud
// Base URI and token resource are the same string. This func finds the authentication
// file field that matches the SDK base URI. The SDK defines the public cloud
// endpoint as its default base URI
if !strings.HasSuffix(baseURI, "/") {
baseURI += "/"
}
switch baseURI {
case azure.PublicCloud.ServiceManagementEndpoint:
return settings.Values[ManagementEndpoint], nil
case azure.PublicCloud.ResourceManagerEndpoint:
return settings.Values[ResourceManagerEndpoint], nil
case azure.PublicCloud.ActiveDirectoryEndpoint:
return settings.Values[ActiveDirectoryEndpoint], nil
case azure.PublicCloud.GalleryEndpoint:
return settings.Values[GalleryEndpoint], nil
case azure.PublicCloud.GraphEndpoint:
return settings.Values[GraphResourceID], nil
}
return "", fmt.Errorf("auth: base URI not found in endpoints")
}
// NewClientCredentialsConfig creates an AuthorizerConfig object configured to obtain an Authorizer through Client Credentials.
// Defaults to Public Cloud and Resource Manager Endpoint.
func NewClientCredentialsConfig(clientID string, clientSecret string, tenantID string) ClientCredentialsConfig {
return ClientCredentialsConfig{
ClientID: clientID,
ClientSecret: clientSecret,
TenantID: tenantID,
Resource: azure.PublicCloud.ResourceManagerEndpoint,
AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint,
}
}
// NewClientCertificateConfig creates a ClientCertificateConfig object configured to obtain an Authorizer through client certificate.
// Defaults to Public Cloud and Resource Manager Endpoint.
func NewClientCertificateConfig(certificatePath string, certificatePassword string, clientID string, tenantID string) ClientCertificateConfig {
return ClientCertificateConfig{
CertificatePath: certificatePath,
CertificatePassword: certificatePassword,
ClientID: clientID,
TenantID: tenantID,
Resource: azure.PublicCloud.ResourceManagerEndpoint,
AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint,
}
}
// NewUsernamePasswordConfig creates an UsernamePasswordConfig object configured to obtain an Authorizer through username and password.
// Defaults to Public Cloud and Resource Manager Endpoint.
func NewUsernamePasswordConfig(username string, password string, clientID string, tenantID string) UsernamePasswordConfig {
return UsernamePasswordConfig{
Username: username,
Password: password,
ClientID: clientID,
TenantID: tenantID,
Resource: azure.PublicCloud.ResourceManagerEndpoint,
AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint,
}
}
// NewMSIConfig creates an MSIConfig object configured to obtain an Authorizer through MSI.
func NewMSIConfig() MSIConfig {
return MSIConfig{
Resource: azure.PublicCloud.ResourceManagerEndpoint,
}
}
// NewDeviceFlowConfig creates a DeviceFlowConfig object configured to obtain an Authorizer through device flow.
// Defaults to Public Cloud and Resource Manager Endpoint.
func NewDeviceFlowConfig(clientID string, tenantID string) DeviceFlowConfig {
return DeviceFlowConfig{
ClientID: clientID,
TenantID: tenantID,
Resource: azure.PublicCloud.ResourceManagerEndpoint,
AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint,
}
}
//AuthorizerConfig provides an authorizer from the configuration provided.
type AuthorizerConfig interface {
Authorizer() (autorest.Authorizer, error)
}
// ClientCredentialsConfig provides the options to get a bearer authorizer from client credentials.
type ClientCredentialsConfig struct {
ClientID string
ClientSecret string
TenantID string
AADEndpoint string
Resource string
}
// ServicePrincipalToken creates a ServicePrincipalToken from client credentials.
func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)
if err != nil {
return nil, err
}
return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)
}
// Authorizer gets the authorizer from client credentials.
func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {
spToken, err := ccc.ServicePrincipalToken()
if err != nil {
return nil, fmt.Errorf("failed to get oauth token from client credentials: %v", err)
}
return autorest.NewBearerAuthorizer(spToken), nil
}
// ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate.
type ClientCertificateConfig struct {
ClientID string
CertificatePath string
CertificatePassword string
TenantID string
AADEndpoint string
Resource string
}
// ServicePrincipalToken creates a ServicePrincipalToken from client certificate.
func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)
if err != nil {
return nil, err
}
certData, err := ioutil.ReadFile(ccc.CertificatePath)
if err != nil {
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err)
}
certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword)
if err != nil {
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
}
return adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource)
}
// Authorizer gets an authorizer object from client certificate.
func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) {
spToken, err := ccc.ServicePrincipalToken()
if err != nil {
return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err)
}
return autorest.NewBearerAuthorizer(spToken), nil
}
// DeviceFlowConfig provides the options to get a bearer authorizer using device flow authentication.
type DeviceFlowConfig struct {
ClientID string
TenantID string
AADEndpoint string
Resource string
}
// Authorizer gets the authorizer from device flow.
func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) {
spToken, err := dfc.ServicePrincipalToken()
if err != nil {
return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err)
}
return autorest.NewBearerAuthorizer(spToken), nil
}
// ServicePrincipalToken gets the service principal token from device flow.
func (dfc DeviceFlowConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID)
if err != nil {
return nil, err
}
oauthClient := &autorest.Client{}
deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource)
if err != nil {
return nil, fmt.Errorf("failed to start device auth flow: %s", err)
}
log.Println(*deviceCode.Message)
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
if err != nil {
return nil, fmt.Errorf("failed to finish device auth flow: %s", err)
}
return adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token)
}
func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
privateKey, certificate, err := pkcs12.Decode(pkcs, password)
if err != nil {
return nil, nil, err
}
rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey)
if !isRsaKey {
return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key")
}
return certificate, rsaPrivateKey, nil
}
// UsernamePasswordConfig provides the options to get a bearer authorizer from a username and a password.
type UsernamePasswordConfig struct {
ClientID string
Username string
Password string
TenantID string
AADEndpoint string
Resource string
}
// ServicePrincipalToken creates a ServicePrincipalToken from username and password.
func (ups UsernamePasswordConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID)
if err != nil {
return nil, err
}
return adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource)
}
// Authorizer gets the authorizer from a username and a password.
func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) {
spToken, err := ups.ServicePrincipalToken()
if err != nil {
return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err)
}
return autorest.NewBearerAuthorizer(spToken), nil
}
// MSIConfig provides the options to get a bearer authorizer through MSI.
type MSIConfig struct {
Resource string
ClientID string
}
// Authorizer gets the authorizer from MSI.
func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {
msiEndpoint, err := adal.GetMSIVMEndpoint()
if err != nil {
return nil, err
}
var spToken *adal.ServicePrincipalToken
if mc.ClientID == "" {
spToken, err = adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource)
if err != nil {
return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err)
}
} else {
spToken, err = adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, mc.Resource, mc.ClientID)
if err != nil {
return nil, fmt.Errorf("failed to get oauth token from MSI for user assigned identity: %v", err)
}
}
return autorest.NewBearerAuthorizer(spToken), nil
}

View File

@ -0,0 +1,79 @@
package cli
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/dimchansky/utfbom"
"github.com/mitchellh/go-homedir"
)
// Profile represents a Profile from the Azure CLI
type Profile struct {
InstallationID string `json:"installationId"`
Subscriptions []Subscription `json:"subscriptions"`
}
// Subscription represents a Subscription from the Azure CLI
type Subscription struct {
EnvironmentName string `json:"environmentName"`
ID string `json:"id"`
IsDefault bool `json:"isDefault"`
Name string `json:"name"`
State string `json:"state"`
TenantID string `json:"tenantId"`
User *User `json:"user"`
}
// User represents a User from the Azure CLI
type User struct {
Name string `json:"name"`
Type string `json:"type"`
}
const azureProfileJSON = "azureProfile.json"
// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI
func ProfilePath() (string, error) {
if cfgDir := os.Getenv("AZURE_CONFIG_DIR"); cfgDir != "" {
return filepath.Join(cfgDir, azureProfileJSON), nil
}
return homedir.Expand("~/.azure/" + azureProfileJSON)
}
// LoadProfile restores a Profile object from a file located at 'path'.
func LoadProfile(path string) (result Profile, err error) {
var contents []byte
contents, err = ioutil.ReadFile(path)
if err != nil {
err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
return
}
reader := utfbom.SkipOnly(bytes.NewReader(contents))
dec := json.NewDecoder(reader)
if err = dec.Decode(&result); err != nil {
err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err)
return
}
return
}

View File

@ -0,0 +1,170 @@
package cli
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/json"
"fmt"
"os"
"os/exec"
"regexp"
"runtime"
"strconv"
"time"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/date"
"github.com/mitchellh/go-homedir"
)
// Token represents an AccessToken from the Azure CLI
type Token struct {
AccessToken string `json:"accessToken"`
Authority string `json:"_authority"`
ClientID string `json:"_clientId"`
ExpiresOn string `json:"expiresOn"`
IdentityProvider string `json:"identityProvider"`
IsMRRT bool `json:"isMRRT"`
RefreshToken string `json:"refreshToken"`
Resource string `json:"resource"`
TokenType string `json:"tokenType"`
UserID string `json:"userId"`
}
// ToADALToken converts an Azure CLI `Token`` to an `adal.Token``
func (t Token) ToADALToken() (converted adal.Token, err error) {
tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn)
if err != nil {
err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err)
return
}
difference := tokenExpirationDate.Sub(date.UnixEpoch())
converted = adal.Token{
AccessToken: t.AccessToken,
Type: t.TokenType,
ExpiresIn: "3600",
ExpiresOn: json.Number(strconv.Itoa(int(difference.Seconds()))),
RefreshToken: t.RefreshToken,
Resource: t.Resource,
}
return
}
// AccessTokensPath returns the path where access tokens are stored from the Azure CLI
// TODO(#199): add unit test.
func AccessTokensPath() (string, error) {
// Azure-CLI allows user to customize the path of access tokens thorugh environment variable.
var accessTokenPath = os.Getenv("AZURE_ACCESS_TOKEN_FILE")
var err error
// Fallback logic to default path on non-cloud-shell environment.
// TODO(#200): remove the dependency on hard-coding path.
if accessTokenPath == "" {
accessTokenPath, err = homedir.Expand("~/.azure/accessTokens.json")
}
return accessTokenPath, err
}
// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object
func ParseExpirationDate(input string) (*time.Time, error) {
// CloudShell (and potentially the Azure CLI in future)
expirationDate, cloudShellErr := time.Parse(time.RFC3339, input)
if cloudShellErr != nil {
// Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone)
const cliFormat = "2006-01-02 15:04:05.999999"
expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local)
if cliErr == nil {
return &expirationDate, nil
}
return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr)
}
return &expirationDate, nil
}
// LoadTokens restores a set of Token objects from a file located at 'path'.
func LoadTokens(path string) ([]Token, error) {
file, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
}
defer file.Close()
var tokens []Token
dec := json.NewDecoder(file)
if err = dec.Decode(&tokens); err != nil {
return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err)
}
return tokens, nil
}
// GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios.
func GetTokenFromCLI(resource string) (*Token, error) {
// This is the path that a developer can set to tell this class what the install path for Azure CLI is.
const azureCLIPath = "AzureCLIPath"
// The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI.
azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles"))
// Default path for non-Windows.
const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin"
// Validate resource, since it gets sent as a command line argument to Azure CLI
const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed."
match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource)
if err != nil {
return nil, err
}
if !match {
return nil, fmt.Errorf(invalidResourceErrorTemplate, resource)
}
// Execute Azure CLI to get token
var cliCmd *exec.Cmd
if runtime.GOOS == "windows" {
cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir")))
cliCmd.Env = os.Environ()
cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows))
cliCmd.Args = append(cliCmd.Args, "/c", "az")
} else {
cliCmd = exec.Command("az")
cliCmd.Env = os.Environ()
cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath))
}
cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json", "--resource", resource)
var stderr bytes.Buffer
cliCmd.Stderr = &stderr
output, err := cliCmd.Output()
if err != nil {
return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String())
}
tokenResponse := Token{}
err = json.Unmarshal(output, &tokenResponse)
if err != nil {
return nil, err
}
return &tokenResponse, err
}

37
vendor/github.com/dimchansky/utfbom/.gitignore generated vendored Normal file
View File

@ -0,0 +1,37 @@
# Binaries for programs and plugins
*.exe
*.dll
*.so
*.dylib
*.o
*.a
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.prof
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
.glide/
# Gogland
.idea/

18
vendor/github.com/dimchansky/utfbom/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,18 @@
language: go
go:
- '1.10'
- '1.11'
# sudo=false makes the build run using a container
sudo: false
before_install:
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
- go get golang.org/x/tools/cmd/goimports
- go get github.com/golang/lint/golint
script:
- gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false)
- golint ./... # This won't break the build, just show warnings
- $HOME/gopath/bin/goveralls -service=travis-ci

201
vendor/github.com/dimchansky/utfbom/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

66
vendor/github.com/dimchansky/utfbom/README.md generated vendored Normal file
View File

@ -0,0 +1,66 @@
# utfbom [![Godoc](https://godoc.org/github.com/dimchansky/utfbom?status.png)](https://godoc.org/github.com/dimchansky/utfbom) [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Build Status](https://travis-ci.org/dimchansky/utfbom.svg?branch=master)](https://travis-ci.org/dimchansky/utfbom) [![Go Report Card](https://goreportcard.com/badge/github.com/dimchansky/utfbom)](https://goreportcard.com/report/github.com/dimchansky/utfbom) [![Coverage Status](https://coveralls.io/repos/github/dimchansky/utfbom/badge.svg?branch=master)](https://coveralls.io/github/dimchansky/utfbom?branch=master)
The package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. It can also return the encoding detected by the BOM.
## Installation
go get -u github.com/dimchansky/utfbom
## Example
```go
package main
import (
"bytes"
"fmt"
"io/ioutil"
"github.com/dimchansky/utfbom"
)
func main() {
trySkip([]byte("\xEF\xBB\xBFhello"))
trySkip([]byte("hello"))
}
func trySkip(byteData []byte) {
fmt.Println("Input:", byteData)
// just skip BOM
output, err := ioutil.ReadAll(utfbom.SkipOnly(bytes.NewReader(byteData)))
if err != nil {
fmt.Println(err)
return
}
fmt.Println("ReadAll with BOM skipping", output)
// skip BOM and detect encoding
sr, enc := utfbom.Skip(bytes.NewReader(byteData))
fmt.Printf("Detected encoding: %s\n", enc)
output, err = ioutil.ReadAll(sr)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("ReadAll with BOM detection and skipping", output)
fmt.Println()
}
```
Output:
```
$ go run main.go
Input: [239 187 191 104 101 108 108 111]
ReadAll with BOM skipping [104 101 108 108 111]
Detected encoding: UTF8
ReadAll with BOM detection and skipping [104 101 108 108 111]
Input: [104 101 108 108 111]
ReadAll with BOM skipping [104 101 108 108 111]
Detected encoding: Unknown
ReadAll with BOM detection and skipping [104 101 108 108 111]
```

1
vendor/github.com/dimchansky/utfbom/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module github.com/dimchansky/utfbom

192
vendor/github.com/dimchansky/utfbom/utfbom.go generated vendored Normal file
View File

@ -0,0 +1,192 @@
// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary.
// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader
// interface but provides automatic BOM checking and removing as necessary.
package utfbom
import (
"errors"
"io"
)
// Encoding is type alias for detected UTF encoding.
type Encoding int
// Constants to identify detected UTF encodings.
const (
// Unknown encoding, returned when no BOM was detected
Unknown Encoding = iota
// UTF8, BOM bytes: EF BB BF
UTF8
// UTF-16, big-endian, BOM bytes: FE FF
UTF16BigEndian
// UTF-16, little-endian, BOM bytes: FF FE
UTF16LittleEndian
// UTF-32, big-endian, BOM bytes: 00 00 FE FF
UTF32BigEndian
// UTF-32, little-endian, BOM bytes: FF FE 00 00
UTF32LittleEndian
)
// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface.
func (e Encoding) String() string {
switch e {
case UTF8:
return "UTF8"
case UTF16BigEndian:
return "UTF16BigEndian"
case UTF16LittleEndian:
return "UTF16LittleEndian"
case UTF32BigEndian:
return "UTF32BigEndian"
case UTF32LittleEndian:
return "UTF32LittleEndian"
default:
return "Unknown"
}
}
const maxConsecutiveEmptyReads = 100
// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary.
// It also returns the encoding detected by the BOM.
// If the detected encoding is not needed, you can call the SkipOnly function.
func Skip(rd io.Reader) (*Reader, Encoding) {
// Is it already a Reader?
b, ok := rd.(*Reader)
if ok {
return b, Unknown
}
enc, left, err := detectUtf(rd)
return &Reader{
rd: rd,
buf: left,
err: err,
}, enc
}
// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary.
func SkipOnly(rd io.Reader) *Reader {
r, _ := Skip(rd)
return r
}
// Reader implements automatic BOM (Unicode Byte Order Mark) checking and
// removing as necessary for an io.Reader object.
type Reader struct {
rd io.Reader // reader provided by the client
buf []byte // buffered data
err error // last error
}
// Read is an implementation of io.Reader interface.
// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary.
func (r *Reader) Read(p []byte) (n int, err error) {
if len(p) == 0 {
return 0, nil
}
if r.buf == nil {
if r.err != nil {
return 0, r.readErr()
}
return r.rd.Read(p)
}
// copy as much as we can
n = copy(p, r.buf)
r.buf = nilIfEmpty(r.buf[n:])
return n, nil
}
func (r *Reader) readErr() error {
err := r.err
r.err = nil
return err
}
var errNegativeRead = errors.New("utfbom: reader returned negative count from Read")
func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) {
buf, err = readBOM(rd)
if len(buf) >= 4 {
if isUTF32BigEndianBOM4(buf) {
return UTF32BigEndian, nilIfEmpty(buf[4:]), err
}
if isUTF32LittleEndianBOM4(buf) {
return UTF32LittleEndian, nilIfEmpty(buf[4:]), err
}
}
if len(buf) > 2 && isUTF8BOM3(buf) {
return UTF8, nilIfEmpty(buf[3:]), err
}
if (err != nil && err != io.EOF) || (len(buf) < 2) {
return Unknown, nilIfEmpty(buf), err
}
if isUTF16BigEndianBOM2(buf) {
return UTF16BigEndian, nilIfEmpty(buf[2:]), err
}
if isUTF16LittleEndianBOM2(buf) {
return UTF16LittleEndian, nilIfEmpty(buf[2:]), err
}
return Unknown, nilIfEmpty(buf), err
}
func readBOM(rd io.Reader) (buf []byte, err error) {
const maxBOMSize = 4
var bom [maxBOMSize]byte // used to read BOM
// read as many bytes as possible
for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] {
if n, err = rd.Read(bom[len(buf):]); n < 0 {
panic(errNegativeRead)
}
if n > 0 {
nEmpty = 0
} else {
nEmpty++
if nEmpty >= maxConsecutiveEmptyReads {
err = io.ErrNoProgress
}
}
}
return
}
func isUTF32BigEndianBOM4(buf []byte) bool {
return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF
}
func isUTF32LittleEndianBOM4(buf []byte) bool {
return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00
}
func isUTF8BOM3(buf []byte) bool {
return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF
}
func isUTF16BigEndianBOM2(buf []byte) bool {
return buf[0] == 0xFE && buf[1] == 0xFF
}
func isUTF16LittleEndianBOM2(buf []byte) bool {
return buf[0] == 0xFF && buf[1] == 0xFE
}
func nilIfEmpty(buf []byte) (res []byte) {
if len(buf) > 0 {
res = buf
}
return
}

50
vendor/golang.org/x/crypto/pkcs12/bmp-string.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkcs12
import (
"errors"
"unicode/utf16"
)
// bmpString returns s encoded in UCS-2 with a zero terminator.
func bmpString(s string) ([]byte, error) {
// References:
// https://tools.ietf.org/html/rfc7292#appendix-B.1
// https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
// - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes
// EncodeRune returns 0xfffd if the rune does not need special encoding
// - the above RFC provides the info that BMPStrings are NULL terminated.
ret := make([]byte, 0, 2*len(s)+2)
for _, r := range s {
if t, _ := utf16.EncodeRune(r); t != 0xfffd {
return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2")
}
ret = append(ret, byte(r/256), byte(r%256))
}
return append(ret, 0, 0), nil
}
func decodeBMPString(bmpString []byte) (string, error) {
if len(bmpString)%2 != 0 {
return "", errors.New("pkcs12: odd-length BMP string")
}
// strip terminator if present
if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 {
bmpString = bmpString[:l-2]
}
s := make([]uint16, 0, len(bmpString)/2)
for len(bmpString) > 0 {
s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1]))
bmpString = bmpString[2:]
}
return string(utf16.Decode(s)), nil
}

131
vendor/golang.org/x/crypto/pkcs12/crypto.go generated vendored Normal file
View File

@ -0,0 +1,131 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkcs12
import (
"bytes"
"crypto/cipher"
"crypto/des"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
"golang.org/x/crypto/pkcs12/internal/rc2"
)
var (
oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3})
oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6})
)
// pbeCipher is an abstraction of a PKCS#12 cipher.
type pbeCipher interface {
// create returns a cipher.Block given a key.
create(key []byte) (cipher.Block, error)
// deriveKey returns a key derived from the given password and salt.
deriveKey(salt, password []byte, iterations int) []byte
// deriveKey returns an IV derived from the given password and salt.
deriveIV(salt, password []byte, iterations int) []byte
}
type shaWithTripleDESCBC struct{}
func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) {
return des.NewTripleDESCipher(key)
}
func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte {
return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24)
}
func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte {
return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
}
type shaWith40BitRC2CBC struct{}
func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) {
return rc2.New(key, len(key)*8)
}
func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte {
return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5)
}
func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte {
return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
}
type pbeParams struct {
Salt []byte
Iterations int
}
func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) {
var cipherType pbeCipher
switch {
case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC):
cipherType = shaWithTripleDESCBC{}
case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC):
cipherType = shaWith40BitRC2CBC{}
default:
return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported")
}
var params pbeParams
if err := unmarshal(algorithm.Parameters.FullBytes, &params); err != nil {
return nil, 0, err
}
key := cipherType.deriveKey(params.Salt, password, params.Iterations)
iv := cipherType.deriveIV(params.Salt, password, params.Iterations)
block, err := cipherType.create(key)
if err != nil {
return nil, 0, err
}
return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil
}
func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) {
cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password)
if err != nil {
return nil, err
}
encrypted := info.Data()
if len(encrypted) == 0 {
return nil, errors.New("pkcs12: empty encrypted data")
}
if len(encrypted)%blockSize != 0 {
return nil, errors.New("pkcs12: input is not a multiple of the block size")
}
decrypted = make([]byte, len(encrypted))
cbc.CryptBlocks(decrypted, encrypted)
psLen := int(decrypted[len(decrypted)-1])
if psLen == 0 || psLen > blockSize {
return nil, ErrDecryption
}
if len(decrypted) < psLen {
return nil, ErrDecryption
}
ps := decrypted[len(decrypted)-psLen:]
decrypted = decrypted[:len(decrypted)-psLen]
if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 {
return nil, ErrDecryption
}
return
}
// decryptable abstracts an object that contains ciphertext.
type decryptable interface {
Algorithm() pkix.AlgorithmIdentifier
Data() []byte
}

23
vendor/golang.org/x/crypto/pkcs12/errors.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkcs12
import "errors"
var (
// ErrDecryption represents a failure to decrypt the input.
ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding")
// ErrIncorrectPassword is returned when an incorrect password is detected.
// Usually, P12/PFX data is signed to be able to verify the password.
ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect")
)
// NotImplementedError indicates that the input is not currently supported.
type NotImplementedError string
func (e NotImplementedError) Error() string {
return "pkcs12: " + string(e)
}

271
vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go generated vendored Normal file
View File

@ -0,0 +1,271 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package rc2 implements the RC2 cipher
/*
https://www.ietf.org/rfc/rfc2268.txt
http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf
This code is licensed under the MIT license.
*/
package rc2
import (
"crypto/cipher"
"encoding/binary"
)
// The rc2 block size in bytes
const BlockSize = 8
type rc2Cipher struct {
k [64]uint16
}
// New returns a new rc2 cipher with the given key and effective key length t1
func New(key []byte, t1 int) (cipher.Block, error) {
// TODO(dgryski): error checking for key length
return &rc2Cipher{
k: expandKey(key, t1),
}, nil
}
func (*rc2Cipher) BlockSize() int { return BlockSize }
var piTable = [256]byte{
0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d,
0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2,
0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32,
0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82,
0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc,
0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26,
0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03,
0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7,
0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a,
0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec,
0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39,
0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31,
0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9,
0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9,
0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e,
0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad,
}
func expandKey(key []byte, t1 int) [64]uint16 {
l := make([]byte, 128)
copy(l, key)
var t = len(key)
var t8 = (t1 + 7) / 8
var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8))))
for i := len(key); i < 128; i++ {
l[i] = piTable[l[i-1]+l[uint8(i-t)]]
}
l[128-t8] = piTable[l[128-t8]&tm]
for i := 127 - t8; i >= 0; i-- {
l[i] = piTable[l[i+1]^l[i+t8]]
}
var k [64]uint16
for i := range k {
k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256
}
return k
}
func rotl16(x uint16, b uint) uint16 {
return (x >> (16 - b)) | (x << b)
}
func (c *rc2Cipher) Encrypt(dst, src []byte) {
r0 := binary.LittleEndian.Uint16(src[0:])
r1 := binary.LittleEndian.Uint16(src[2:])
r2 := binary.LittleEndian.Uint16(src[4:])
r3 := binary.LittleEndian.Uint16(src[6:])
var j int
for j <= 16 {
// mix r0
r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
r0 = rotl16(r0, 1)
j++
// mix r1
r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
r1 = rotl16(r1, 2)
j++
// mix r2
r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
r2 = rotl16(r2, 3)
j++
// mix r3
r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
r3 = rotl16(r3, 5)
j++
}
r0 = r0 + c.k[r3&63]
r1 = r1 + c.k[r0&63]
r2 = r2 + c.k[r1&63]
r3 = r3 + c.k[r2&63]
for j <= 40 {
// mix r0
r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
r0 = rotl16(r0, 1)
j++
// mix r1
r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
r1 = rotl16(r1, 2)
j++
// mix r2
r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
r2 = rotl16(r2, 3)
j++
// mix r3
r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
r3 = rotl16(r3, 5)
j++
}
r0 = r0 + c.k[r3&63]
r1 = r1 + c.k[r0&63]
r2 = r2 + c.k[r1&63]
r3 = r3 + c.k[r2&63]
for j <= 60 {
// mix r0
r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
r0 = rotl16(r0, 1)
j++
// mix r1
r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
r1 = rotl16(r1, 2)
j++
// mix r2
r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
r2 = rotl16(r2, 3)
j++
// mix r3
r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
r3 = rotl16(r3, 5)
j++
}
binary.LittleEndian.PutUint16(dst[0:], r0)
binary.LittleEndian.PutUint16(dst[2:], r1)
binary.LittleEndian.PutUint16(dst[4:], r2)
binary.LittleEndian.PutUint16(dst[6:], r3)
}
func (c *rc2Cipher) Decrypt(dst, src []byte) {
r0 := binary.LittleEndian.Uint16(src[0:])
r1 := binary.LittleEndian.Uint16(src[2:])
r2 := binary.LittleEndian.Uint16(src[4:])
r3 := binary.LittleEndian.Uint16(src[6:])
j := 63
for j >= 44 {
// unmix r3
r3 = rotl16(r3, 16-5)
r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
j--
// unmix r2
r2 = rotl16(r2, 16-3)
r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
j--
// unmix r1
r1 = rotl16(r1, 16-2)
r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
j--
// unmix r0
r0 = rotl16(r0, 16-1)
r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
j--
}
r3 = r3 - c.k[r2&63]
r2 = r2 - c.k[r1&63]
r1 = r1 - c.k[r0&63]
r0 = r0 - c.k[r3&63]
for j >= 20 {
// unmix r3
r3 = rotl16(r3, 16-5)
r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
j--
// unmix r2
r2 = rotl16(r2, 16-3)
r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
j--
// unmix r1
r1 = rotl16(r1, 16-2)
r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
j--
// unmix r0
r0 = rotl16(r0, 16-1)
r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
j--
}
r3 = r3 - c.k[r2&63]
r2 = r2 - c.k[r1&63]
r1 = r1 - c.k[r0&63]
r0 = r0 - c.k[r3&63]
for j >= 0 {
// unmix r3
r3 = rotl16(r3, 16-5)
r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
j--
// unmix r2
r2 = rotl16(r2, 16-3)
r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
j--
// unmix r1
r1 = rotl16(r1, 16-2)
r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
j--
// unmix r0
r0 = rotl16(r0, 16-1)
r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
j--
}
binary.LittleEndian.PutUint16(dst[0:], r0)
binary.LittleEndian.PutUint16(dst[2:], r1)
binary.LittleEndian.PutUint16(dst[4:], r2)
binary.LittleEndian.PutUint16(dst[6:], r3)
}

45
vendor/golang.org/x/crypto/pkcs12/mac.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkcs12
import (
"crypto/hmac"
"crypto/sha1"
"crypto/x509/pkix"
"encoding/asn1"
)
type macData struct {
Mac digestInfo
MacSalt []byte
Iterations int `asn1:"optional,default:1"`
}
// from PKCS#7:
type digestInfo struct {
Algorithm pkix.AlgorithmIdentifier
Digest []byte
}
var (
oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26})
)
func verifyMac(macData *macData, message, password []byte) error {
if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) {
return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String())
}
key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20)
mac := hmac.New(sha1.New, key)
mac.Write(message)
expectedMAC := mac.Sum(nil)
if !hmac.Equal(macData.Mac.Digest, expectedMAC) {
return ErrIncorrectPassword
}
return nil
}

170
vendor/golang.org/x/crypto/pkcs12/pbkdf.go generated vendored Normal file
View File

@ -0,0 +1,170 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkcs12
import (
"bytes"
"crypto/sha1"
"math/big"
)
var (
one = big.NewInt(1)
)
// sha1Sum returns the SHA-1 hash of in.
func sha1Sum(in []byte) []byte {
sum := sha1.Sum(in)
return sum[:]
}
// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of
// repeats of pattern.
func fillWithRepeats(pattern []byte, v int) []byte {
if len(pattern) == 0 {
return nil
}
outputLen := v * ((len(pattern) + v - 1) / v)
return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen]
}
func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) {
// implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments
// Let H be a hash function built around a compression function f:
// Z_2^u x Z_2^v -> Z_2^u
// (that is, H has a chaining variable and output of length u bits, and
// the message input to the compression function of H is v bits). The
// values for u and v are as follows:
// HASH FUNCTION VALUE u VALUE v
// MD2, MD5 128 512
// SHA-1 160 512
// SHA-224 224 512
// SHA-256 256 512
// SHA-384 384 1024
// SHA-512 512 1024
// SHA-512/224 224 1024
// SHA-512/256 256 1024
// Furthermore, let r be the iteration count.
// We assume here that u and v are both multiples of 8, as are the
// lengths of the password and salt strings (which we denote by p and s,
// respectively) and the number n of pseudorandom bits required. In
// addition, u and v are of course non-zero.
// For information on security considerations for MD5 [19], see [25] and
// [1], and on those for MD2, see [18].
// The following procedure can be used to produce pseudorandom bits for
// a particular "purpose" that is identified by a byte called "ID".
// This standard specifies 3 different values for the ID byte:
// 1. If ID=1, then the pseudorandom bits being produced are to be used
// as key material for performing encryption or decryption.
// 2. If ID=2, then the pseudorandom bits being produced are to be used
// as an IV (Initial Value) for encryption or decryption.
// 3. If ID=3, then the pseudorandom bits being produced are to be used
// as an integrity key for MACing.
// 1. Construct a string, D (the "diversifier"), by concatenating v/8
// copies of ID.
var D []byte
for i := 0; i < v; i++ {
D = append(D, ID)
}
// 2. Concatenate copies of the salt together to create a string S of
// length v(ceiling(s/v)) bits (the final copy of the salt may be
// truncated to create S). Note that if the salt is the empty
// string, then so is S.
S := fillWithRepeats(salt, v)
// 3. Concatenate copies of the password together to create a string P
// of length v(ceiling(p/v)) bits (the final copy of the password
// may be truncated to create P). Note that if the password is the
// empty string, then so is P.
P := fillWithRepeats(password, v)
// 4. Set I=S||P to be the concatenation of S and P.
I := append(S, P...)
// 5. Set c=ceiling(n/u).
c := (size + u - 1) / u
// 6. For i=1, 2, ..., c, do the following:
A := make([]byte, c*20)
var IjBuf []byte
for i := 0; i < c; i++ {
// A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1,
// H(H(H(... H(D||I))))
Ai := hash(append(D, I...))
for j := 1; j < r; j++ {
Ai = hash(Ai)
}
copy(A[i*20:], Ai[:])
if i < c-1 { // skip on last iteration
// B. Concatenate copies of Ai to create a string B of length v
// bits (the final copy of Ai may be truncated to create B).
var B []byte
for len(B) < v {
B = append(B, Ai[:]...)
}
B = B[:v]
// C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit
// blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by
// setting I_j=(I_j+B+1) mod 2^v for each j.
{
Bbi := new(big.Int).SetBytes(B)
Ij := new(big.Int)
for j := 0; j < len(I)/v; j++ {
Ij.SetBytes(I[j*v : (j+1)*v])
Ij.Add(Ij, Bbi)
Ij.Add(Ij, one)
Ijb := Ij.Bytes()
// We expect Ijb to be exactly v bytes,
// if it is longer or shorter we must
// adjust it accordingly.
if len(Ijb) > v {
Ijb = Ijb[len(Ijb)-v:]
}
if len(Ijb) < v {
if IjBuf == nil {
IjBuf = make([]byte, v)
}
bytesShort := v - len(Ijb)
for i := 0; i < bytesShort; i++ {
IjBuf[i] = 0
}
copy(IjBuf[bytesShort:], Ijb)
Ijb = IjBuf
}
copy(I[j*v:(j+1)*v], Ijb)
}
}
}
}
// 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom
// bit string, A.
// 8. Use the first n bits of A as the output of this entire process.
return A[:size]
// If the above process is being used to generate a DES key, the process
// should be used to create 64 random bits, and the key's parity bits
// should be set after the 64 bits have been produced. Similar concerns
// hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any
// similar keys with parity bits "built into them".
}

349
vendor/golang.org/x/crypto/pkcs12/pkcs12.go generated vendored Normal file
View File

@ -0,0 +1,349 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pkcs12 implements some of PKCS#12.
//
// This implementation is distilled from https://tools.ietf.org/html/rfc7292
// and referenced documents. It is intended for decoding P12/PFX-stored
// certificates and keys for use with the crypto/tls package.
//
// This package is frozen. If it's missing functionality you need, consider
// an alternative like software.sslmate.com/src/go-pkcs12.
package pkcs12
import (
"crypto/ecdsa"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/hex"
"encoding/pem"
"errors"
)
var (
oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1})
oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6})
oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20})
oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21})
oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1})
)
type pfxPdu struct {
Version int
AuthSafe contentInfo
MacData macData `asn1:"optional"`
}
type contentInfo struct {
ContentType asn1.ObjectIdentifier
Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
}
type encryptedData struct {
Version int
EncryptedContentInfo encryptedContentInfo
}
type encryptedContentInfo struct {
ContentType asn1.ObjectIdentifier
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
EncryptedContent []byte `asn1:"tag:0,optional"`
}
func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier {
return i.ContentEncryptionAlgorithm
}
func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent }
type safeBag struct {
Id asn1.ObjectIdentifier
Value asn1.RawValue `asn1:"tag:0,explicit"`
Attributes []pkcs12Attribute `asn1:"set,optional"`
}
type pkcs12Attribute struct {
Id asn1.ObjectIdentifier
Value asn1.RawValue `asn1:"set"`
}
type encryptedPrivateKeyInfo struct {
AlgorithmIdentifier pkix.AlgorithmIdentifier
EncryptedData []byte
}
func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier {
return i.AlgorithmIdentifier
}
func (i encryptedPrivateKeyInfo) Data() []byte {
return i.EncryptedData
}
// PEM block types
const (
certificateType = "CERTIFICATE"
privateKeyType = "PRIVATE KEY"
)
// unmarshal calls asn1.Unmarshal, but also returns an error if there is any
// trailing data after unmarshaling.
func unmarshal(in []byte, out interface{}) error {
trailing, err := asn1.Unmarshal(in, out)
if err != nil {
return err
}
if len(trailing) != 0 {
return errors.New("pkcs12: trailing data found")
}
return nil
}
// ToPEM converts all "safe bags" contained in pfxData to PEM blocks.
func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) {
encodedPassword, err := bmpString(password)
if err != nil {
return nil, ErrIncorrectPassword
}
bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
if err != nil {
return nil, err
}
blocks := make([]*pem.Block, 0, len(bags))
for _, bag := range bags {
block, err := convertBag(&bag, encodedPassword)
if err != nil {
return nil, err
}
blocks = append(blocks, block)
}
return blocks, nil
}
func convertBag(bag *safeBag, password []byte) (*pem.Block, error) {
block := &pem.Block{
Headers: make(map[string]string),
}
for _, attribute := range bag.Attributes {
k, v, err := convertAttribute(&attribute)
if err != nil {
return nil, err
}
block.Headers[k] = v
}
switch {
case bag.Id.Equal(oidCertBag):
block.Type = certificateType
certsData, err := decodeCertBag(bag.Value.Bytes)
if err != nil {
return nil, err
}
block.Bytes = certsData
case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
block.Type = privateKeyType
key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password)
if err != nil {
return nil, err
}
switch key := key.(type) {
case *rsa.PrivateKey:
block.Bytes = x509.MarshalPKCS1PrivateKey(key)
case *ecdsa.PrivateKey:
block.Bytes, err = x509.MarshalECPrivateKey(key)
if err != nil {
return nil, err
}
default:
return nil, errors.New("found unknown private key type in PKCS#8 wrapping")
}
default:
return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String())
}
return block, nil
}
func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) {
isString := false
switch {
case attribute.Id.Equal(oidFriendlyName):
key = "friendlyName"
isString = true
case attribute.Id.Equal(oidLocalKeyID):
key = "localKeyId"
case attribute.Id.Equal(oidMicrosoftCSPName):
// This key is chosen to match OpenSSL.
key = "Microsoft CSP Name"
isString = true
default:
return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String())
}
if isString {
if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil {
return "", "", err
}
if value, err = decodeBMPString(attribute.Value.Bytes); err != nil {
return "", "", err
}
} else {
var id []byte
if err := unmarshal(attribute.Value.Bytes, &id); err != nil {
return "", "", err
}
value = hex.EncodeToString(id)
}
return key, value, nil
}
// Decode extracts a certificate and private key from pfxData. This function
// assumes that there is only one certificate and only one private key in the
// pfxData; if there are more use ToPEM instead.
func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) {
encodedPassword, err := bmpString(password)
if err != nil {
return nil, nil, err
}
bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
if err != nil {
return nil, nil, err
}
if len(bags) != 2 {
err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU")
return
}
for _, bag := range bags {
switch {
case bag.Id.Equal(oidCertBag):
if certificate != nil {
err = errors.New("pkcs12: expected exactly one certificate bag")
}
certsData, err := decodeCertBag(bag.Value.Bytes)
if err != nil {
return nil, nil, err
}
certs, err := x509.ParseCertificates(certsData)
if err != nil {
return nil, nil, err
}
if len(certs) != 1 {
err = errors.New("pkcs12: expected exactly one certificate in the certBag")
return nil, nil, err
}
certificate = certs[0]
case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
if privateKey != nil {
err = errors.New("pkcs12: expected exactly one key bag")
}
if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil {
return nil, nil, err
}
}
}
if certificate == nil {
return nil, nil, errors.New("pkcs12: certificate missing")
}
if privateKey == nil {
return nil, nil, errors.New("pkcs12: private key missing")
}
return
}
func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) {
pfx := new(pfxPdu)
if err := unmarshal(p12Data, pfx); err != nil {
return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error())
}
if pfx.Version != 3 {
return nil, nil, NotImplementedError("can only decode v3 PFX PDU's")
}
if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) {
return nil, nil, NotImplementedError("only password-protected PFX is implemented")
}
// unmarshal the explicit bytes in the content for type 'data'
if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil {
return nil, nil, err
}
if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 {
return nil, nil, errors.New("pkcs12: no MAC in data")
}
if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil {
if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 {
// some implementations use an empty byte array
// for the empty string password try one more
// time with empty-empty password
password = nil
err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password)
}
if err != nil {
return nil, nil, err
}
}
var authenticatedSafe []contentInfo
if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil {
return nil, nil, err
}
if len(authenticatedSafe) != 2 {
return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe")
}
for _, ci := range authenticatedSafe {
var data []byte
switch {
case ci.ContentType.Equal(oidDataContentType):
if err := unmarshal(ci.Content.Bytes, &data); err != nil {
return nil, nil, err
}
case ci.ContentType.Equal(oidEncryptedDataContentType):
var encryptedData encryptedData
if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil {
return nil, nil, err
}
if encryptedData.Version != 0 {
return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported")
}
if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil {
return nil, nil, err
}
default:
return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe")
}
var safeContents []safeBag
if err := unmarshal(data, &safeContents); err != nil {
return nil, nil, err
}
bags = append(bags, safeContents...)
}
return bags, password, nil
}

57
vendor/golang.org/x/crypto/pkcs12/safebags.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkcs12
import (
"crypto/x509"
"encoding/asn1"
"errors"
)
var (
// see https://tools.ietf.org/html/rfc7292#appendix-D
oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1})
oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2})
oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3})
)
type certBag struct {
Id asn1.ObjectIdentifier
Data []byte `asn1:"tag:0,explicit"`
}
func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) {
pkinfo := new(encryptedPrivateKeyInfo)
if err = unmarshal(asn1Data, pkinfo); err != nil {
return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error())
}
pkData, err := pbDecrypt(pkinfo, password)
if err != nil {
return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error())
}
ret := new(asn1.RawValue)
if err = unmarshal(pkData, ret); err != nil {
return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error())
}
if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil {
return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error())
}
return privateKey, nil
}
func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) {
bag := new(certBag)
if err := unmarshal(asn1Data, bag); err != nil {
return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error())
}
if !bag.Id.Equal(oidCertTypeX509Certificate) {
return nil, NotImplementedError("only X509 certificates are supported")
}
return bag.Data, nil
}

9
vendor/modules.txt vendored
View File

@ -11,8 +11,11 @@ contrib.go.opencensus.io/exporter/ocagent
# github.com/1and1/oneandone-cloudserver-sdk-go v1.0.1
github.com/1and1/oneandone-cloudserver-sdk-go
# github.com/Azure/azure-sdk-for-go v30.0.0+incompatible
github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute
github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/computeapi
github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute
github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute
github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/computeapi
github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-01-01/network
github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-06-01/subscriptions
github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-02-01/resources
@ -23,6 +26,8 @@ github.com/Azure/azure-sdk-for-go/version
github.com/Azure/go-autorest/autorest
github.com/Azure/go-autorest/autorest/adal
github.com/Azure/go-autorest/autorest/azure
github.com/Azure/go-autorest/autorest/azure/auth
github.com/Azure/go-autorest/autorest/azure/cli
github.com/Azure/go-autorest/autorest/date
github.com/Azure/go-autorest/autorest/to
github.com/Azure/go-autorest/autorest/validation
@ -169,6 +174,8 @@ github.com/digitalocean/go-libvirt/internal/go-xdr/xdr2
github.com/digitalocean/go-qemu/qmp
# github.com/digitalocean/godo v1.11.1
github.com/digitalocean/godo
# github.com/dimchansky/utfbom v1.1.0
github.com/dimchansky/utfbom
# github.com/docker/docker v0.0.0-20180422163414-57142e89befe
github.com/docker/docker/pkg/namesgenerator
# github.com/dustin/go-humanize v1.0.0
@ -605,6 +612,8 @@ golang.org/x/crypto/ed25519/internal/edwards25519
golang.org/x/crypto/internal/chacha20
golang.org/x/crypto/internal/subtle
golang.org/x/crypto/md4
golang.org/x/crypto/pkcs12
golang.org/x/crypto/pkcs12/internal/rc2
golang.org/x/crypto/poly1305
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent

View File

@ -1,8 +1,8 @@
---
description: 'Packer supports building VHDs in Azure Resource manager.'
layout: docs
page_title: 'Azure - Builders'
sidebar_current: 'docs-builders-azure'
page_title: 'Azure arm - Builders'
sidebar_current: 'docs-builders-azure-arm'
---
# Azure Resource Manager Builder
@ -23,7 +23,7 @@ VM from your build artifact.
Azure uses a combination of OAuth and Active Directory to authorize requests to
the ARM API. Learn how to [authorize access to
ARM](/docs/builders/azure-setup.html).
ARM](/docs/builders/azure.html#authentication-for-azure).
The documentation below references command output from the [Azure
CLI](https://azure.microsoft.com/en-us/documentation/articles/xplat-cli-install/).
@ -36,12 +36,12 @@ addition to the options listed here, a
builder.
### Required options for authentication:
If you're running packer on an Azure VM with a [managed identity](/docs/builders/azure-setup.html#managed-identities-for-azure-resources)
If you're running packer on an Azure VM with a [managed identity](/docs/builders/azure.html#azure-managed-identity)
you don't need to specify any additional configuration options.
If you would like to use interactive user authentication, you should specify
`subscription_id` only. Packer will use cached credentials or redirect you
to a website to log in.
If you want to use a [service principal](/docs/builders/azure-setup.html#create-a-service-principal)
If you want to use a [service principal](/docs/builders/azure.html#azure-active-directory-service-principal)
you should specify `subscription_id`, `client_id` and one of `client_secret`,
`client_cert_path` or `client_jwt`.

View File

@ -0,0 +1,193 @@
---
description: |
The azure-chroot Packer builder is able to create Azure Managed Images leveraging
a VM in Azure.
layout: docs
page_title: 'Azure chroot - Builders'
sidebar_current: 'docs-builders-azure-chroot'
---
# Azure Builder (chroot)
Type: `azure-chroot`
The `azure-chroot` builder is able to build Azure managed disk (MD) images. For
more information on managed disks, see [Azure Managed Disks Overview](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/managed-disks-overview).
The difference between this builder and the `azure-arm` builder is that this
builder is able to build a managed disk image without launching a new Azure VM
for every build, but instead use an already-running Azure VM. This can
dramatically speed up image builds. It also allows for more deterministic image
content and enables some capabilities that are not possible with the
`azure-arm` builder.
> **This is an advanced builder** If you're just getting started with Packer,
it is recommend to start with the [azure-arm builder](/docs/builders/azure-arm.html),
which is much easier to use.
## How Does it Work?
This builder works by creating a new MD from either an existing source or from
scratch and attaching it to the (already existing) Azure VM where Packer is
running. Once attached, a [chroot](https://en.wikipedia.org/wiki/Chroot) is set
up and made available to the [provisioners](/docs/provisioners/index.html).
After provisioning, the MD is detached, snapshotted and a MD image is created.
Using this process, minutes can be shaved off the image creation process
because Packer does not need to launch a VM instance.
There are some restrictions however:
* The host system must be a similar system (generally the same OS version,
kernel versions, etc.) as the image being built.
* If the source is a managed disk, it must be made available in the same
region as the host system.
* The host system SKU has to allow for all of the specified disks to be
attached.
## Configuration Reference
There are many configuration options available for the builder. We'll start
with authentication parameters, then go over the Azure chroot builder specific
options.
### Authentication options
None of the authentication options are required, but depending on which
ones are specified a different authentication method may be used. See the
[shared Azure builders documentation](/docs/builders/azure.html) for more
information.
<%= partial "partials/builder/azure/common/client/_Config-not-required.html" %>
### Azure chroot builder specific options
#### Required:
<%= partial "partials/builder/azure/chroot/_Config-required.html" %>
#### Optional:
<%= partial "partials/builder/azure/chroot/_Config-not-required.html" %>
## Chroot Mounts
The `chroot_mounts` configuration can be used to mount specific devices within
the chroot. By default, the following additional mounts are added into the
chroot by Packer:
- `/proc` (proc)
- `/sys` (sysfs)
- `/dev` (bind to real `/dev`)
- `/dev/pts` (devpts)
- `/proc/sys/fs/binfmt_misc` (binfmt\_misc)
These default mounts are usually good enough for anyone and are sane defaults.
However, if you want to change or add the mount points, you may using the
`chroot_mounts` configuration. Here is an example configuration which only
mounts `/prod` and `/dev`:
``` json
{
"chroot_mounts": [
["proc", "proc", "/proc"],
["bind", "/dev", "/dev"]
]
}
```
`chroot_mounts` is a list of a 3-tuples of strings. The three components of the
3-tuple, in order, are:
- The filesystem type. If this is "bind", then Packer will properly bind the
filesystem to another mount point.
- The source device.
- The mount directory.
## Additional template function
Because this builder runs on an Azure VM, there is an additional template function
available called `vm`, which returns the following VM metadata:
- name
- subscription_id
- resource_group
- location
- resource_id
This function can be used in the configuration templates, for example, use
```
"{{ vm `subscription_id` }}"
```
to fill in the subscription ID of the VM in any of the configuration options.
## Examples
Here are some examples using this builder.
This builder requires privileged actions, such as mounting disks, running
`chroot` and other admin commands. Usually it needs to be run with root
permissions, for example:
```
sudo -E packer build example.json
```
### Using a VM with a Managed Identity
On a VM with a system-assigned managed identity that has the contributor role
on its own resource group, the following config can be used to create an
updated Debian image:
``` json
{
"builders": [{
"type": "azure-chroot",
"image_resource_id": "/subscriptions/{{vm `subscription_id`}}/resourceGroups/{{vm `resource_group`}}/providers/Microsoft.Compute/images/MyDebianOSImage-{{timestamp}}",
"source": "credativ:Debian:9:latest"
}],
"provisioners": [{
"inline": [
"apt-get update",
"apt-get upgrade -y"
],
"inline_shebang": "/bin/sh -x",
"type": "shell"
}]
}
```
### Using a Service Principal
Here is an example that creates a Debian image with updated packages. Specify
all environment variables (`ARM_CLIENT_ID`, `ARM_CLIENT_SECRET`,
`ARM_SUBSCRIPTION_ID`) to use a service principal.
The identity you choose should have permission to create disks and images and also
to update your VM.
Set the `ARM_IMAGE_RESOURCEGROUP_ID` variable to an existing resource group in the
subscription where the resulting image will be created.
``` json
{
"variables": {
"client_id": "{{env `ARM_CLIENT_ID`}}",
"client_secret": "{{env `ARM_CLIENT_SECRET`}}",
"subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}",
"resource_group": "{{env `ARM_IMAGE_RESOURCEGROUP_ID`}}"
},
"builders": [{
"type": "azure-chroot",
"client_id": "{{user `client_id`}}",
"client_secret": "{{user `client_secret`}}",
"subscription_id": "{{user `subscription_id`}}",
"image_resource_id": "/subscriptions/{{user `subscription_id`}}/resourceGroups/{{user `resource_group`}}/providers/Microsoft.Compute/images/MyDebianOSImage-{{timestamp}}",
"source": "credativ:Debian:9:latest"
}],
"provisioners": [{
"inline": [
"apt-get update",
"apt-get upgrade -y"
],
"inline_shebang": "/bin/sh -x",
"type": "shell"
}]
}
```

View File

@ -1,236 +0,0 @@
---
description: |
In order to build VMs in Azure, Packer needs various configuration options.
These options and how to obtain them are documented on this page.
layout: docs
page_title: 'Setup - Azure - Builders'
sidebar_current: 'docs-builders-azure-setup'
---
# Authorizing Packer Builds in Azure
In order to build VMs in Azure, Packer needs 6 configuration options to be
specified:
- `subscription_id` - UUID identifying your Azure subscription (where billing
is handled)
- `client_id` - UUID identifying the Active Directory service principal that
will run your Packer builds
- `client_secret` - service principal secret / password
- `resource_group_name` - name of the resource group where your VHD(s) will
be stored
- `storage_account` - name of the storage account where your VHD(s) will be
stored
-&gt; Behind the scenes, Packer uses the OAuth protocol to authenticate against
Azure Active Directory and authorize requests to the Azure Service Management
API. These topics are unnecessarily complicated, so we will try to ignore them
for the rest of this document.<br /><br />You do not need to understand how
OAuth works in order to use Packer with Azure, though the Active Directory
terms "service principal" and "role" will be useful for understanding Azure's
access policies.
In order to get all of the items above, you will need a username and password
for your Azure account.
## Device Login
Device login is an alternative way to authorize in Azure Packer. Device login
only requires you to know your Subscription ID. (Device login is only supported
for Linux based VMs.) Device login is intended for those who are first time
users, and just want to ''kick the tires.'' We recommend the SPN approach if
you intend to automate Packer.
> Device login is for **interactive** builds, and SPN is for **automated** builds.
There are three pieces of information you must provide to enable device login
mode:
1. SubscriptionID
2. Resource Group - parent resource group that Packer uses to build an image.
3. Storage Account - storage account where the image will be placed.
> Device login mode is enabled by not setting client\_id and client\_secret.
> Device login mode is for the Public and US Gov clouds only.
The device login flow asks that you open a web browser, navigate to
<a href="http://aka.ms/devicelogin" class="uri">http://aka.ms/devicelogin</a>,
and input the supplied code. This authorizes the Packer for Azure application
to act on your behalf. An OAuth token will be created, and stored in the user's
home directory (\~/.azure/packer/oauth-TenantID.json). This token is used if
the token file exists, and it is refreshed as necessary. The token file
prevents the need to continually execute the device login flow. Packer will ask
for two device login auth, one for service management endpoint and another for
accessing temp keyvault secrets that it creates.
## Managed identities for Azure resources
-&gt; Managed identities for Azure resources is the new name for the service
formerly known as Managed Service Identity (MSI).
Managed identities is an alternative way to authorize in Azure Packer. Managed
identities for Azure resources are automatically managed by Azure and enable
you to authenticate to services that support Azure AD authentication without
needing to insert credentials into your buildfile. Navigate to
<a href="https://docs.microsoft.com/en-gb/azure/active-directory/managed-identities-azure-resources/overview"
class="uri">managed identities azure resources overview</a> to learn more about
this feature.
This feature will be used when no `subscription_id`, `client_id` or
`client_secret` is set in your buildfile.
## Install the Azure CLI
To get the credentials above, we will need to install the Azure CLI. Please
refer to Microsoft's official [installation
guide](https://azure.microsoft.com/en-us/documentation/articles/xplat-cli-install/).
The guides below use [JMESPath](http://jmespath.org/) queries to select and reformat output from the AZ CLI commands. JMESPath is [part of the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/query-azure-cli?view=azure-cli-latest) and can be used in the same way as the `jq` tool.
## Guided Setup
The Packer project includes a [setup
script](https://github.com/hashicorp/packer/blob/master/contrib/azure-setup.sh)
that can help you setup your account. It uses an interactive bash script to log
you into Azure, name your resources, and export your Packer configuration.
## Manual Setup
If you want more control, or the script does not work for you, you can also use
the manual instructions below to setup your Azure account. You will need to
manually keep track of the various account identifiers, resource names, and
your service principal password.
### Identify Your Tenant and Subscription IDs
Login using the Azure CLI
``` shell
$ az login
# Note, we have launched a browser for you to login. For old experience with device code, use "az login --use-device-code"
```
Once you've completed logging in, you should get a JSON array like the one
below:
``` shell
[
{
"cloudName": "AzureCloud",
"id": "$uuid",
"isDefault": false,
"name": "Pay-As-You-Go",
"state": "Enabled",
"tenantId": "$tenant_uuid",
"user": {
"name": "my_email@anywhere.com",
"type": "user"
}
}
]
```
Get your account information
``` shell
$ az account list --output table --query '[].{Name:name,subscription_id:id}'
$ az account set --subscription ACCOUNTNAME
$ az account show --output json --query 'id'
```
This will print out one line that look like this:
4f562e88-8caf-421a-b4da-e3f6786c52ec
This is your `subscription_id`. Note it for later.
### Create a Resource Group
A [resource
group](https://azure.microsoft.com/en-us/documentation/articles/resource-group-overview/#resource-groups)
is used to organize related resources. Resource groups and storage accounts are
tied to a location. To see available locations, run:
``` shell
$ az account list-locations
$ LOCATION=xxx
$ GROUPNAME=xxx
# ...
$ az group create --name $GROUPNAME --location $LOCATION
```
Your storage account (below) will need to use the same `GROUPNAME` and
`LOCATION`.
### Create a Storage Account
We will need to create a storage account where your Packer artifacts will be
stored. We will create a `LRS` storage account which is the least expensive
price/GB at the time of writing.
``` shell
$ az storage account create \
--name STORAGENAME
--resource-group $GROUPNAME \
--location $LOCATION \
--sku Standard_LRS \
--kind Storage
```
-&gt; `LRS` and `Standard_LRS` are meant as literal "LRS" or "Standard\_LRS"
and not as variables.
Make sure that `GROUPNAME` and `LOCATION` are the same as above. Also, ensure
that `GROUPNAME` is less than 24 characters long and contains only lowercase
letters and numbers.
### Create a Service Principal
A service principal acts on behalf of an application (Packer) on your Azure
subscription. To create an application and service principal for use with
Packer, run the below command specifying the subscription. This will grant
Packer the contributor role to the subscription.
The output of this command is your service principal credentials, save these in
a safe place as you will need these to configure Packer.
``` shell
az ad sp create-for-rbac -n "Packer" --role contributor \
--scopes /subscriptions/{SubID}
```
The service principal credentials.
``` shell
{
"appId": "AppId",
"displayName": "Packer",
"name": "http://Packer",
"password": "Password",
"tenant": "TenantId"
}
```
There are a lot of pre-defined roles and you can define your own with more
granular permissions, though this is out of scope. You can see a list of
pre-configured roles via:
``` shell
$ az role definition list --output table --query '[].{name:roleName, description:description}'
```
If you would rather use a certificate to autenticate your service principal,
please follow the [Azure Active Directory documentation](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-certificate-credentials#register-your-certificate-with-azure-ad).
### Configuring Packer
Now (finally) everything has been setup in Azure and our service principal has
been created. You can use the output from creating your service principal in
your template. Use the value from the `appId` field above as a value for
`client_id` in your configuration and set `client_secret` to the `password`
value from above.

View File

@ -0,0 +1,101 @@
---
description: |
Packer is able to create Azure VM images. To achieve this, Packer comes with
multiple builders depending on the strategy you want to use to build the images.
layout: docs
page_title: 'Azure images - Builders'
sidebar_current: 'docs-builders-azure'
---
# Azure Virtual Machine Image Builders
Packer can create Azure virtual machine images through variety of ways
depending on the strategy that you want to use for building the images.
Packer supports the following builders for Azure images at the moment:
- [azure-arm](/docs/builders/azure-arm.html) - Uses Azure Resource
Manager (ARM) to launch a virtual machine (VM) from which a new image is
captured after provisioning. If in doubt, use this builder; it is the
easiest builder to get started with.
- [azure-chroot](/docs/builders/azure-chroot.html) - Uses ARM to create
a managed disk that is attached to an existing Azure VM that Packer is
running on. Provisioning leverages [Chroot](https://en.wikipedia.org/wiki/Chroot)
environment. After provisioning, the disk is detached an image is created
from this disk. This is an **advanced builder and should not be used by
newcomers**. However, it is also the fastest way to build a VM image in
Azure.
-&gt; **Don't know which builder to use?** If in doubt, use the [azure-arm
builder](/docs/builders/azure-arm.html). It is much easier to use.
# Authentication for Azure
The Packer Azure builders provide a couple of ways to authenticate to Azure. The
following methods are available and are explained below:
- Azure Active Directory interactive login. Interactive login is available
for the Public and US Gov clouds only.
- Azure Managed Identity
- Azure Active Directory Service Principal
-&gt; **Don't know which authentication method to use?** Go with interactive
login to try out the builders. If you need packer to run automatically,
switch to using a Service Principal or Managed Identity.
No matter which method you choose, the identity you use will need the
appropriate permissions on Azure resources for Packer to operate. The minimal
set of permissions is highly dependent on the builder and its configuration.
An easy way to get started is to assign the identity the `Contributor` role at
the subscription level.
## Azure Active Directory interactive login
If your organization allows it, you can use a command line interactive login
method based on oAuth 'device code flow'. Packer will select this method when
you only specify a `subscription_id` in your builder configuration. When you
run Packer, it will ask you to visit a web site and input a code. This web site
will then authenticate you, satisfying any two-factor authentication policies
that your organization might have. The tokens are cached under the `.azure/packer`
directory in your home directory and will be reused if they are still valid
on subsequent runs.
## Azure Managed Identity
Azure provides the option to assign an identity to a virtual machine ([Azure
documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)). Packer can
use a system assigned identity for a VM where Packer is running to orchestrate
Azure API's. This is the default behavior and requires no configuration
properties to be set. It does, however, require that you run Packer on an
Azure VM.
To enable this method, [let Azure assign a system-assigned identity to your VM](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
Then, [grant your VM access to the appropriate resources](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/howto-assign-access-portal).
To get started, try assigning the `Contributor` role at the subscription level to
your VM. Then, when you discover your exact scenario, scope the permissions
appropriately or isolate Packer builds in a separate subscription.
## Azure Active Directory Service Principal
Azure Active Directory models service accounts as 'Service Principal' (SP)
objects. An SP represents an application accessing your Azure resources. It
is identified by a client ID (aka application ID) and can use a password or a
certificate to authenticate. To use a Service Principal, specify the
`subscription_id` and `client_id`, as well as either `client_secret`,
`client_cert_path` or `client_jwt`. Each of these last three represent a different
way to authenticate the SP to AAD:
- `client_secret` - allows the user to provide a password/secret registered
for the AAD SP.
- `client_cert_path` - allows usage of a certificate to be used to
authenticate as the specified AAD SP.
- `client_jwt` - For advanced scenario's where the used cannot provide Packer
the full certificate, they can provide a JWT bearer token for client auth
(RFC 7523, Sec. 2.2). These bearer tokens are created and signed using a
certificate registered in AAD and have a user-chosen expiry time, limiting
the validity of the token. This is also the underlying mechanism used to
authenticate when using `client_cert_path`.
To create a service principal, you can follow [the Azure documentation on this
subject](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli?view=azure-cli-latest).

View File

@ -94,8 +94,11 @@
<li<%= sidebar_current("docs-builders-azure") %>>
<a href="/docs/builders/azure.html">Azure</a>
<ul class="nav">
<li<%= sidebar_current("docs-builders-azure-setup") %>>
<a href="/docs/builders/azure-setup.html">Setup</a>
<li<%= sidebar_current("docs-builders-azure-arm") %>>
<a href="/docs/builders/azure-arm.html">ARM</a>
</li>
<li<%= sidebar_current("docs-builders-azure-chroot") %>>
<a href="/docs/builders/azure-chroot.html">chroot</a>
</li>
</ul>
</li>

View File

@ -7,7 +7,7 @@
/docs/command-line/machine-readable.html /docs/commands/index.html
/docs/command-line/introduction.html /docs/commands/index.html
/docs/templates/introduction.html /docs/templates/index.html
/docs/builders/azure-arm.html /docs/builders/azure.html
/docs/builders/azure-setup.html /docs/builders/azure.html
/docs/templates/veewee-to-packer.html /guides/veewee-to-packer.html
/docs/extend/developing-plugins.html /docs/extending/plugins.html
/docs/extending/developing-plugins.html /docs/extending/plugins.html

View File

@ -0,0 +1,50 @@
<!-- Code generated from the comments of the Config struct in builder/azure/chroot/builder.go; DO NOT EDIT MANUALLY -->
- `from_scratch` (bool) - When set to `true`, starts with an empty, unpartitioned disk. Defaults to `false`.
- `command_wrapper` (string) - How to run shell commands. This may be useful to set environment variables or perhaps run
a command with sudo or so on. This is a configuration template where the `.Command` variable
is replaced with the command to be run. Defaults to `{{.Command}}`.
- `pre_mount_commands` ([]string) - A series of commands to execute after attaching the root volume and before mounting the chroot.
This is not required unless using `from_scratch`. If so, this should include any partitioning
and filesystem creation commands. The path to the device is provided by `{{.Device}}`.
- `mount_options` ([]string) - Options to supply the `mount` command when mounting devices. Each option will be prefixed with
`-o` and supplied to the `mount` command ran by Packer. Because this command is ran in a shell,
user discretion is advised. See this manual page for the `mount` command for valid file system specific options.
- `mount_partition` (string) - The partition number containing the / partition. By default this is the first partition of the volume.
- `mount_path` (string) - The path where the volume will be mounted. This is where the chroot environment will be. This defaults
to `/mnt/packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration template where the `.Device`
variable is replaced with the name of the device where the volume is attached.
- `post_mount_commands` ([]string) - As `pre_mount_commands`, but the commands are executed after mounting the root device and before the
extra mount and copy steps. The device and mount path are provided by `{{.Device}}` and `{{.MountPath}}`.
- `chroot_mounts` ([][]string) - This is a list of devices to mount into the chroot environment. This configuration parameter requires
some additional documentation which is in the "Chroot Mounts" section below. Please read that section
for more information on how to use this.
- `copy_files` ([]string) - Paths to files on the running Azure instance that will be copied into the chroot environment prior to
provisioning. Defaults to `/etc/resolv.conf` so that DNS lookups work. Pass an empty list to skip copying
`/etc/resolv.conf`. You may need to do this if you're building an image that uses systemd.
- `temporary_os_disk_name` (string) - The name of the temporary disk that will be created in the resource group of the VM that Packer is
running on. Will be generated if not set.
- `os_disk_size_gb` (int32) - Try to resize the OS disk to this size on the first copy. Disks can only be englarged. If not specified,
the disk will keep its original size. Required when using `from_scratch`
- `os_disk_storage_account_type` (string) - The [storage SKU](https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#diskstorageaccounttypes)
to use for the OS Disk. Defaults to `Standard_LRS`.
- `os_disk_cache_type` (string) - The [cache type](https://docs.microsoft.com/en-us/rest/api/compute/images/createorupdate#cachingtypes)
specified in the resulting image and for attaching it to the Packer VM. Defaults to `ReadOnly`
- `os_disk_skip_cleanup` (bool) - If set to `true`, leaves the temporary disk behind in the Packer VM resource group. Defaults to `false`
- `image_hyperv_generation` (string) - The [Hyper-V generation type](https://docs.microsoft.com/en-us/rest/api/compute/images/createorupdate#hypervgenerationtypes).
Defaults to `V1`.

View File

@ -0,0 +1,6 @@
<!-- Code generated from the comments of the Config struct in builder/azure/chroot/builder.go; DO NOT EDIT MANUALLY -->
- `source` (string) - Either a managed disk resource ID or a publisher:offer:sku:version specifier for plaform image sources.
- `image_resource_id` (string) - The image to create using this build.

View File

@ -0,0 +1,3 @@
<!-- Code generated from the comments of the Config struct in builder/azure/chroot/builder.go; DO NOT EDIT MANUALLY -->
Config is the configuration that is chained through the steps and settable
from the template.