Create disk attacher component

This commit is contained in:
Paul Meyer 2019-04-25 23:12:18 +00:00
parent 3c33aa4fc5
commit 369ec9a84c
4 changed files with 268 additions and 95 deletions

View File

@ -0,0 +1,182 @@
package chroot
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
)
type VirtualMachinesClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (
result compute.VirtualMachinesCreateOrUpdateFuture, err error)
Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (
result compute.VirtualMachine, err error)
}
type DiskAttacher interface {
AttachDisk(ctx context.Context, disk string) (lun int32, err error)
DetachDisk(ctx context.Context, disk string) (err error)
WaitForDevice(ctx context.Context, i int32) (device string, err error)
}
func NewDiskAttacher(azureClient client.AzureClientSet) DiskAttacher {
return diskAttacher{azureClient}
}
type diskAttacher struct {
azcli client.AzureClientSet
}
func (da diskAttacher) WaitForDevice(ctx context.Context, i int32) (device string, err error) {
path := fmt.Sprintf("/dev/disk/azure/scsi1/lun%d", i)
for {
l, err := os.Readlink(path)
if err == nil {
return filepath.Abs("/dev/disk/azure/scsi1/" + l)
}
if err != nil && err != os.ErrNotExist {
return "", err
}
select {
case <-time.After(100 * time.Millisecond):
// continue
case <-ctx.Done():
return "", ctx.Err()
}
}
}
func (da diskAttacher) DetachDisk(ctx context.Context, diskID string) error {
currentDisks, err := da.getDisks(ctx)
if err != nil {
return err
}
// copy all disks to new array that not match diskID
newDisks := []compute.DataDisk{}
for _, disk := range currentDisks {
if disk.ManagedDisk != nil &&
!strings.EqualFold(to.String(disk.ManagedDisk.ID), diskID) {
newDisks = append(newDisks, disk)
}
}
if len(currentDisks) == len(newDisks) {
return DiskNotFoundError
}
return da.setDisks(ctx, newDisks)
}
var DiskNotFoundError = errors.New("Disk not found")
func (da diskAttacher) AttachDisk(ctx context.Context, diskID string) (int32, error) {
dataDisks, err := da.getDisks(ctx)
if err != nil {
return -1, err
}
// check to see if disk is already attached, remember lun if found
var lun int32 = -1
for _, disk := range dataDisks {
if disk.ManagedDisk != nil &&
strings.EqualFold(to.String(disk.ManagedDisk.ID), diskID) {
// disk is already attached, just take this lun
if disk.Lun != nil {
lun = to.Int32(disk.Lun)
break
}
}
}
if lun == -1 {
// disk was not found on VM, go and actually attach it
findFreeLun:
for lun = 0; lun < 64; lun++ {
for _, v := range dataDisks {
if to.Int32(v.Lun) == lun {
continue findFreeLun
}
}
// no datadisk is using this lun
break
}
// append new data disk to collection
dataDisks = append(dataDisks, compute.DataDisk{
CreateOption: compute.DiskCreateOptionTypesAttach,
ManagedDisk: &compute.ManagedDiskParameters{
ID: to.StringPtr(diskID),
},
Lun: to.Int32Ptr(lun),
})
// prepare resource object for update operation
err = da.setDisks(ctx, dataDisks)
if err != nil {
return -1, err
}
}
return lun, nil
}
func (da diskAttacher) getThisVM(ctx context.Context) (compute.VirtualMachine, error) {
// getting resource info for this VM
vm, err := da.azcli.MetadataClient().GetComputeInfo()
if err != nil {
return compute.VirtualMachine{}, err
}
// retrieve actual VM
vmResource, err := da.azcli.VirtualMachinesClient().Get(ctx, vm.ResourceGroupName, vm.Name, "")
if err != nil {
return compute.VirtualMachine{}, err
}
if vmResource.StorageProfile == nil {
return compute.VirtualMachine{}, errors.New("properties.storageProfile is not set on VM, this is unexpected")
}
return vmResource, nil
}
func (da diskAttacher) getDisks(ctx context.Context) ([]compute.DataDisk, error) {
vmResource, err := da.getThisVM(ctx)
if err != nil {
return []compute.DataDisk{}, err
}
return *vmResource.StorageProfile.DataDisks, nil
}
func (da diskAttacher) setDisks(ctx context.Context, disks []compute.DataDisk) error {
vmResource, err := da.getThisVM(ctx)
if err != nil {
return err
}
id, err := azure.ParseResourceID(to.String(vmResource.ID))
if err != nil {
return err
}
vmResource.StorageProfile.DataDisks = &disks
vmResource.Resources = nil
// update the VM resource, attaching disk
f, err := da.azcli.VirtualMachinesClient().CreateOrUpdate(ctx, id.ResourceGroup, id.ResourceName, vmResource)
if err == nil {
err = f.WaitForCompletionRef(ctx, da.azcli.PollClient())
}
return err
}

View File

@ -0,0 +1,86 @@
package chroot
import (
"context"
"github.com/Azure/go-autorest/autorest/to"
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute"
"github.com/hashicorp/packer/builder/azure/chroot/client"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
testvm = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/testGroup/Microsoft.Compute/virtualMachines/testVM"
testdisk = "/subscriptions/00000000-0000-0000-0000-000000000001/resourceGroups/testGroup2/Microsoft.Compute/disks/testDisk"
)
// Tests assume current machine is capable of running chroot builder (i.e. an Azure VM)
func Test_DiskAttacherAttachesDiskToVM(t *testing.T) {
azcli, err := client.GetTestClientSet(t)
require.Nil(t, err)
da := NewDiskAttacher(azcli)
testDiskName := t.Name()
vm, err := azcli.MetadataClient().GetComputeInfo()
require.Nil(t, err, "Test needs to run on an Azure VM, unable to retrieve VM information")
t.Log("Creating new disk '", testDiskName, "' in ", vm.ResourceGroupName)
disk, err := azcli.DisksClient().Get(context.TODO(), vm.ResourceGroupName, testDiskName)
if err == nil {
t.Log("Disk already exists")
if disk.DiskState == compute.Attached {
t.Log("Disk is attached, assuming to this machine, trying to detach")
err = da.DetachDisk(context.TODO(), to.String(disk.ID))
require.Nil(t, err)
}
t.Log("Deleting disk")
result, err := azcli.DisksClient().Delete(context.TODO(), vm.ResourceGroupName, testDiskName)
require.Nil(t, err)
err = result.WaitForCompletionRef(context.TODO(), azcli.PollClient())
require.Nil(t, err)
}
t.Log("Creating disk")
r, err := azcli.DisksClient().CreateOrUpdate(context.TODO(), vm.ResourceGroupName, testDiskName, compute.Disk{
Location: to.StringPtr(vm.Location),
Sku: &compute.DiskSku{
Name: compute.StandardLRS,
},
DiskProperties: &compute.DiskProperties{
DiskSizeGB: to.Int32Ptr(30),
CreationData: &compute.CreationData{CreateOption: compute.Empty},
},
})
require.Nil(t, err)
err = r.WaitForCompletionRef(context.TODO(), azcli.PollClient())
require.Nil(t, err)
t.Log("Retrieving disk properties")
d, err := azcli.DisksClient().Get(context.TODO(), vm.ResourceGroupName, testDiskName)
require.Nil(t, err)
assert.NotNil(t, d)
t.Log("Attaching disk")
lun, err := da.AttachDisk(context.TODO(), to.String(d.ID))
assert.Nil(t, err)
t.Log("Waiting for device")
dev, err := da.WaitForDevice(context.TODO(), lun)
assert.Nil(t, err)
t.Log("Device path:", dev)
t.Log("Detaching disk")
err = da.DetachDisk(context.TODO(), to.String(d.ID))
require.Nil(t, err)
t.Log("Deleting disk")
result, err := azcli.DisksClient().Delete(context.TODO(), vm.ResourceGroupName, testDiskName)
if err == nil {
err = result.WaitForCompletionRef(context.TODO(), azcli.PollClient())
}
require.Nil(t, err)
}

View File

@ -1,71 +0,0 @@
package chroot
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"github.com/hashicorp/go-retryablehttp"
)
// DefaultMetadataClient is the default instance metadata client for Azure. Replace this variable for testing purposes only
var DefaultMetadataClient = NewMetadataClient()
// MetadataClient holds methods that Packer uses to get information about the current VM
type MetadataClient interface {
VMResourceID() (string, error)
}
// metadataClient implements MetadataClient
type metadataClient struct{}
const imdsURL = "http://169.254.169.254/metadata/instance?api-version=2017-08-01"
// VMResourceID returns the resource ID of the current VM
func (metadataClient) VMResourceID() (string, error) {
wc := retryablehttp.NewClient()
wc.RetryMax = 5
req, err := retryablehttp.NewRequest(http.MethodGet, imdsURL, nil)
if err != nil {
return "", err
}
req.Header.Add("Metadata", "true")
res, err := wc.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
d, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
var vminfo struct {
Compute struct {
Name string
ResourceGroupName string
SubscriptionID string
}
}
err = json.Unmarshal(d, &vminfo)
if err != nil {
return "", err
}
return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s",
vminfo.Compute.Name,
vminfo.Compute.ResourceGroupName,
vminfo.Compute.SubscriptionID,
), nil
}
// NewMetadataClient creates a new instance metadata client
func NewMetadataClient() MetadataClient {
return metadataClient{}
}

View File

@ -1,24 +0,0 @@
package chroot
import (
"testing"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/hashicorp/packer/builder/azure/common"
"github.com/stretchr/testify/assert"
)
func Test_MetadataReturnsVMResourceID(t *testing.T) {
if !common.IsAzure() {
t.Skipf("Not running on Azure, skipping live IMDS test")
}
mdc := NewMetadataClient()
id, err := mdc.VMResourceID()
assert.Nil(t, err)
assert.NotEqual(t, id, "", "Expected VMResourceID to return non-empty string because we are running on Azure")
vm, err := azure.ParseResourceID(id)
assert.Nil(t, err, "%q is not parsable as an Azure resource id", id)
t.Logf("VM: %+v", vm)
}