Make from_scratch work

This commit is contained in:
Paul Meyer 2019-06-03 05:27:33 +00:00
parent b9b5bb2951
commit b5401d552a
5 changed files with 191 additions and 110 deletions

View File

@ -4,8 +4,10 @@ import (
"context"
"errors"
"fmt"
"github.com/Azure/go-autorest/autorest/azure"
"log"
"runtime"
"strings"
"github.com/hashicorp/packer/builder/amazon/chroot"
azcommon "github.com/hashicorp/packer/builder/azure/common"
@ -35,16 +37,22 @@ type Config struct {
ChrootMounts [][]string `mapstructure:"chroot_mounts"`
CopyFiles []string `mapstructure:"copy_files"`
TemporaryOSDiskName string `mapstructure:"temporary_os_disk_name"`
OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb"`
OSDiskStorageAccountType string `mapstructure:"os_disk_storage_account_type"`
OSDiskCacheType string `mapstructure:"os_disk_cache_type"`
ImageResourceID string `mapstructure:"image_resource_id"`
ImageOSState string `mapstructure:"image_os_state"`
ImageHyperVGeneration string `mapstructure:"image_hyperv_generation"`
ctx interpolate.Context
}
func (c *Config) GetContext() interpolate.Context {
return c.ctx
}
type Builder struct {
config Config
runner multistep.Runner
@ -106,6 +114,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
b.config.MountPartition = "1"
}
if b.config.TemporaryOSDiskName == "" {
b.config.TemporaryOSDiskName = "PackerTemp-{{timestamp}}"
}
if b.config.OSDiskStorageAccountType == "" {
b.config.OSDiskStorageAccountType = string(compute.PremiumLRS)
}
@ -116,7 +128,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
if b.config.ImageOSState == "" {
b.config.ImageOSState = string(compute.Generalized)
}
if b.config.ImageHyperVGeneration == "" {
b.config.ImageHyperVGeneration = string(compute.V1)
}
// checks, accumulate any errors or warnings
@ -136,16 +151,34 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
errs = packer.MultiErrorAppend(errors.New("only 'from_scratch'=true is supported right now"))
}
if err := checkOSState(b.config.ImageOSState); err != nil {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("image_os_state: %v", err))
}
if err := checkDiskCacheType(b.config.OSDiskCacheType); err != nil {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("os_disk_cache_type: %v", err))
}
if err := checkStorageAccountType(b.config.OSDiskStorageAccountType); err != nil {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("os_disk_storage_account_type: %v", err))
}
if b.config.ImageResourceID == "" {
errs = packer.MultiErrorAppend(errs, errors.New("image_resource_id is required"))
} else {
r, err := azure.ParseResourceID(b.config.ImageResourceID)
if err != nil ||
!strings.EqualFold(r.Provider, "Microsoft.Compute") ||
!strings.EqualFold(r.ResourceType, "images") {
errs = packer.MultiErrorAppend(fmt.Errorf(
"image_resource_id: %q is not a valid image resource id", b.config.ImageResourceID))
}
}
if err := checkOSState(b.config.ImageOSState); err != nil {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("image_os_state: %v", err))
}
if err := checkHyperVGeneration(b.config.ImageHyperVGeneration); err != nil {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("image_hyperv_generation: %v", err))
}
if errs != nil {
return warns, errs
}
@ -160,7 +193,7 @@ func checkOSState(s string) interface{} {
return nil
}
}
return fmt.Errorf("%q is not a valid value (%v)",
return fmt.Errorf("%q is not a valid value %v",
s, compute.PossibleOperatingSystemStateTypesValues())
}
@ -170,7 +203,7 @@ func checkDiskCacheType(s string) interface{} {
return nil
}
}
return fmt.Errorf("%q is not a valid value (%v)",
return fmt.Errorf("%q is not a valid value %v",
s, compute.PossibleCachingTypesValues())
}
@ -180,10 +213,20 @@ func checkStorageAccountType(s string) interface{} {
return nil
}
}
return fmt.Errorf("%q is not a valid value (%v)",
return fmt.Errorf("%q is not a valid value %v",
s, compute.PossibleDiskStorageAccountTypesValues())
}
func checkHyperVGeneration(s string) interface{} {
for _, v := range compute.PossibleHyperVGenerationValues() {
if compute.HyperVGeneration(s) == v {
return nil
}
}
return fmt.Errorf("%q is not a valid value %v",
s, compute.PossibleHyperVGenerationValues())
}
func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (packer.Artifact, error) {
if runtime.GOOS != "linux" {
return nil, errors.New("the azure-chroot builder only works on Linux environments")
@ -223,8 +266,6 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack
return nil, err
}
osDiskName := "PackerBuiltOsDisk"
state.Put("instance", info)
// Build the steps
@ -238,9 +279,11 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack
&StepCreateNewDisk{
SubscriptionID: info.SubscriptionID,
ResourceGroup: info.ResourceGroupName,
DiskName: osDiskName,
DiskName: b.config.TemporaryOSDiskName,
DiskSizeGB: b.config.OSDiskSizeGB,
DiskStorageAccountType: b.config.OSDiskStorageAccountType,
HyperVGeneration: b.config.ImageHyperVGeneration,
Location: info.Location,
})
}
@ -270,6 +313,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack
ImageOSState: b.config.ImageOSState,
OSDiskCacheType: b.config.OSDiskCacheType,
OSDiskStorageAccountType: b.config.OSDiskStorageAccountType,
Location: info.Location,
},
)

View File

@ -4,9 +4,11 @@ import (
"context"
"errors"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/hashicorp/packer/builder/azure/common/client"
@ -16,38 +18,43 @@ import (
"github.com/Azure/go-autorest/autorest/to"
)
type VirtualMachinesClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (
result compute.VirtualMachinesCreateOrUpdateFuture, err error)
Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (
result compute.VirtualMachine, err error)
}
type DiskAttacher interface {
AttachDisk(ctx context.Context, disk string) (lun int32, err error)
DetachDisk(ctx context.Context, disk string) (err error)
WaitForDevice(ctx context.Context, i int32) (device string, err error)
DiskPathForLun(lun int32) string
}
func NewDiskAttacher(azureClient client.AzureClientSet) DiskAttacher {
return diskAttacher{azureClient}
return &diskAttacher{
azcli: azureClient,
}
}
type diskAttacher struct {
azcli client.AzureClientSet
vm *client.ComputeInfo // store info about this VM so that we don't have to ask metadata service on every call
}
func (da diskAttacher) WaitForDevice(ctx context.Context, i int32) (device string, err error) {
path := fmt.Sprintf("/dev/disk/azure/scsi1/lun%d", i)
func (diskAttacher) DiskPathForLun(lun int32) string {
return fmt.Sprintf("/dev/disk/azure/scsi1/lun%d", lun)
}
func (da diskAttacher) WaitForDevice(ctx context.Context, lun int32) (device string, err error) {
path := da.DiskPathForLun(lun)
for {
l, err := os.Readlink(path)
link, err := os.Readlink(path)
if err == nil {
return filepath.Abs("/dev/disk/azure/scsi1/" + l)
return filepath.Abs("/dev/disk/azure/scsi1/" + link)
}
if err != nil && err != os.ErrNotExist {
if pe, ok := err.(*os.PathError); ok && pe.Err != syscall.ENOENT {
return "", err
}
}
select {
case <-time.After(100 * time.Millisecond):
// continue
@ -57,13 +64,14 @@ func (da diskAttacher) WaitForDevice(ctx context.Context, i int32) (device strin
}
}
func (da diskAttacher) DetachDisk(ctx context.Context, diskID string) error {
func (da *diskAttacher) DetachDisk(ctx context.Context, diskID string) error {
log.Println("Fetching list of disks currently attached to VM")
currentDisks, err := da.getDisks(ctx)
if err != nil {
return err
}
// copy all disks to new array that not match diskID
log.Printf("Removing %q from list of disks currently attached to VM", diskID)
newDisks := []compute.DataDisk{}
for _, disk := range currentDisks {
if disk.ManagedDisk != nil &&
@ -75,34 +83,52 @@ func (da diskAttacher) DetachDisk(ctx context.Context, diskID string) error {
return DiskNotFoundError
}
return da.setDisks(ctx, newDisks)
log.Println("Updating new list of disks attached to VM")
err = da.setDisks(ctx, newDisks)
if err != nil {
return err
}
// waiting for VM update to finish takes way to long
for { // loop until disk is not attached, timeout or error
list, err := da.getDisks(ctx)
if err != nil {
return err
}
if findDiskInList(list, diskID) == nil {
log.Println("Disk is no longer in VM model, assuming detached")
return nil
}
select {
case <-time.After(time.Second): //continue
case <-ctx.Done():
return ctx.Err()
}
}
}
var DiskNotFoundError = errors.New("Disk not found")
func (da diskAttacher) AttachDisk(ctx context.Context, diskID string) (int32, error) {
func (da *diskAttacher) AttachDisk(ctx context.Context, diskID string) (int32, error) {
dataDisks, err := da.getDisks(ctx)
if err != nil {
return -1, err
}
// check to see if disk is already attached, remember lun if found
var lun int32 = -1
for _, disk := range dataDisks {
if disk.ManagedDisk != nil &&
strings.EqualFold(to.String(disk.ManagedDisk.ID), diskID) {
if disk := findDiskInList(dataDisks, diskID); disk != nil {
// disk is already attached, just take this lun
if disk.Lun != nil {
lun = to.Int32(disk.Lun)
break
}
if disk.Lun == nil {
return -1, errors.New("disk is attached, but lun was not set in VM model (possibly an error in the Azure APIs)")
}
return to.Int32(disk.Lun), nil
}
if lun == -1 {
// disk was not found on VM, go and actually attach it
findFreeLun:
var lun int32 = -1
findFreeLun:
for lun = 0; lun < 64; lun++ {
for _, v := range dataDisks {
if to.Int32(v.Lun) == lun {
@ -127,19 +153,22 @@ func (da diskAttacher) AttachDisk(ctx context.Context, diskID string) (int32, er
if err != nil {
return -1, err
}
}
return lun, nil
}
func (da diskAttacher) getThisVM(ctx context.Context) (compute.VirtualMachine, error) {
func (da *diskAttacher) getThisVM(ctx context.Context) (compute.VirtualMachine, error) {
// getting resource info for this VM
if da.vm == nil {
vm, err := da.azcli.MetadataClient().GetComputeInfo()
if err != nil {
return compute.VirtualMachine{}, err
}
da.vm = vm
}
// retrieve actual VM
vmResource, err := da.azcli.VirtualMachinesClient().Get(ctx, vm.ResourceGroupName, vm.Name, "")
vmResource, err := da.azcli.VirtualMachinesClient().Get(ctx, da.vm.ResourceGroupName, da.vm.Name, "")
if err != nil {
return compute.VirtualMachine{}, err
}
@ -173,10 +202,18 @@ func (da diskAttacher) setDisks(ctx context.Context, disks []compute.DataDisk) e
vmResource.StorageProfile.DataDisks = &disks
vmResource.Resources = nil
// update the VM resource, attaching disk
f, err := da.azcli.VirtualMachinesClient().CreateOrUpdate(ctx, id.ResourceGroup, id.ResourceName, vmResource)
if err == nil {
err = f.WaitForCompletionRef(ctx, da.azcli.PollClient())
}
// update the VM resource, attach disk
_, err = da.azcli.VirtualMachinesClient().CreateOrUpdate(ctx, id.ResourceGroup, id.ResourceName, vmResource)
return err
}
func findDiskInList(list []compute.DataDisk, diskID string) *compute.DataDisk {
for _, disk := range list {
if disk.ManagedDisk != nil &&
strings.EqualFold(to.String(disk.ManagedDisk.ID), diskID) {
return &disk
}
}
return nil
}

View File

@ -14,9 +14,10 @@ import (
var _ multistep.Step = &StepAttachDisk{}
type StepAttachDisk struct {
attached bool
}
func (s StepAttachDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
func (s *StepAttachDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
azcli := state.Get("azureclient").(client.AzureClientSet)
ui := state.Get("ui").(packer.Ui)
diskResourceID := state.Get("os_disk_resource_id").(string)
@ -38,14 +39,23 @@ func (s StepAttachDisk) Run(ctx context.Context, state multistep.StateBag) multi
ctx, cancel := context.WithTimeout(ctx, time.Minute*3) // in case is not configured correctly
defer cancel()
device, err := da.WaitForDevice(ctx, lun)
if err != nil {
log.Printf("StepAttachDisk.Run: error: %+v", err)
err := fmt.Errorf(
"error attaching disk '%s': %v", diskResourceID, err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
ui.Say(fmt.Sprintf("Disk available at %q", device))
s.attached = true
state.Put("device", device)
state.Put("attach_cleanup", s)
return multistep.ActionContinue
}
func (s StepAttachDisk) Cleanup(state multistep.StateBag) {
func (s *StepAttachDisk) Cleanup(state multistep.StateBag) {
ui := state.Get("ui").(packer.Ui)
if err := s.CleanupFunc(state); err != nil {
ui.Error(err.Error())
@ -53,6 +63,8 @@ func (s StepAttachDisk) Cleanup(state multistep.StateBag) {
}
func (s *StepAttachDisk) CleanupFunc(state multistep.StateBag) error {
if s.attached {
azcli := state.Get("azureclient").(client.AzureClientSet)
ui := state.Get("ui").(packer.Ui)
diskResourceID := state.Get("os_disk_resource_id").(string)
@ -64,5 +76,8 @@ func (s *StepAttachDisk) CleanupFunc(state multistep.StateBag) error {
if err != nil {
return fmt.Errorf("error detaching %q: %v", diskResourceID, err)
}
s.attached = false
}
return nil
}

View File

@ -5,6 +5,7 @@ import (
"fmt"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
@ -18,6 +19,7 @@ type StepCreateImage struct {
ImageOSState string
OSDiskStorageAccountType string
OSDiskCacheType string
Location string
imageResource azure.Resource
}
@ -44,16 +46,14 @@ func (s *StepCreateImage) Run(ctx context.Context, state multistep.StateBag) mul
}
image := compute.Image{
Location: to.StringPtr(s.Location),
ImageProperties: &compute.ImageProperties{
StorageProfile: &compute.ImageStorageProfile{
OsDisk: &compute.ImageOSDisk{
OsType: "Linux",
OsState: compute.OperatingSystemStateTypes(s.ImageOSState),
ManagedDisk: &compute.SubResource{
ID: &diskResourceID,
},
Caching: compute.CachingTypes(s.OSDiskCacheType),
StorageAccountType: compute.StorageAccountTypes(s.OSDiskStorageAccountType),
},
// DataDisks: nil,
// ZoneResilient: nil,
@ -67,6 +67,7 @@ func (s *StepCreateImage) Run(ctx context.Context, state multistep.StateBag) mul
s.imageResource.ResourceName,
image)
if err == nil {
log.Println("Image creation in process...")
err = f.WaitForCompletionRef(ctx, azcli.PollClient())
}
if err != nil {
@ -77,25 +78,9 @@ func (s *StepCreateImage) Run(ctx context.Context, state multistep.StateBag) mul
ui.Error(err.Error())
return multistep.ActionHalt
}
log.Printf("Image creation complete: %s", f.Status())
return multistep.ActionContinue
}
func (s *StepCreateImage) Cleanup(state multistep.StateBag) {
azcli := state.Get("azureclient").(client.AzureClientSet)
ui := state.Get("ui").(packer.Ui)
ctx := context.Background()
f, err := azcli.ImagesClient().Delete(
ctx,
s.imageResource.ResourceGroup,
s.imageResource.ResourceName)
if err == nil {
err = f.WaitForCompletionRef(ctx, azcli.PollClient())
}
if err != nil {
log.Printf("StepCreateImage.Cleanup: error: %+v", err)
ui.Error(fmt.Sprintf(
"error deleting image '%s': %v", s.ImageResourceID, err))
}
}
func (*StepCreateImage) Cleanup(bag multistep.StateBag) {} // this is the final artifact, don't delete

View File

@ -18,6 +18,8 @@ type StepCreateNewDisk struct {
SubscriptionID, ResourceGroup, DiskName string
DiskSizeGB int32 // optional, ignored if 0
DiskStorageAccountType string // from compute.DiskStorageAccountTypes
HyperVGeneration string
Location string
}
func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
@ -32,13 +34,14 @@ func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) mu
ui.Say(fmt.Sprintf("Creating disk '%s'", diskResourceID))
disk := compute.Disk{
Location: to.StringPtr(s.Location),
Sku: &compute.DiskSku{
Name: compute.DiskStorageAccountTypes(s.DiskStorageAccountType),
},
//Zones: nil,
DiskProperties: &compute.DiskProperties{
OsType: "",
HyperVGeneration: "",
OsType: "Linux",
HyperVGeneration: compute.HyperVGeneration(s.HyperVGeneration),
CreationData: &compute.CreationData{
CreateOption: compute.Empty,
},
@ -70,11 +73,8 @@ func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) mu
func (s StepCreateNewDisk) Cleanup(state multistep.StateBag) {
azcli := state.Get("azureclient").(client.AzureClientSet)
ui := state.Get("ui").(packer.Ui)
diskResourceID := state.Get("os_disk_resource_id")
diskResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
s.SubscriptionID,
s.ResourceGroup,
s.DiskName)
ui.Say(fmt.Sprintf("Deleting disk '%s'", diskResourceID))
f, err := azcli.DisksClient().Delete(context.TODO(), s.ResourceGroup, s.DiskName)
@ -83,6 +83,6 @@ func (s StepCreateNewDisk) Cleanup(state multistep.StateBag) {
}
if err != nil {
log.Printf("StepCreateNewDisk.Cleanup: error: %+v", err)
ui.Error(fmt.Sprintf("Error deleting new disk '%s': %v.", diskResourceID, err))
ui.Error(fmt.Sprintf("error deleting new disk '%s': %v.", diskResourceID, err))
}
}