From 23c320d59e503df08adca4df26aff69681578934 Mon Sep 17 00:00:00 2001 From: larohra Date: Mon, 30 Sep 2019 16:17:22 -0700 Subject: [PATCH 01/55] Increased the default polling duration to 60mins --- builder/azure/arm/azure_client.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/builder/azure/arm/azure_client.go b/builder/azure/arm/azure_client.go index ab4330e37..130eba2a0 100644 --- a/builder/azure/arm/azure_client.go +++ b/builder/azure/arm/azure_client.go @@ -139,72 +139,84 @@ func NewAzureClient(subscriptionID, resourceGroupName, storageAccountName string azureClient.DeploymentsClient.RequestInspector = withInspection(maxlen) azureClient.DeploymentsClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.DeploymentsClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.DeploymentsClient.UserAgent) + azureClient.DeploymentsClient.Client.PollingDuration = SharedGalleryTimeout azureClient.DeploymentOperationsClient = resources.NewDeploymentOperationsClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.DeploymentOperationsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.DeploymentOperationsClient.RequestInspector = withInspection(maxlen) azureClient.DeploymentOperationsClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.DeploymentOperationsClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.DeploymentOperationsClient.UserAgent) + azureClient.DeploymentOperationsClient.Client.PollingDuration = SharedGalleryTimeout azureClient.DisksClient = compute.NewDisksClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.DisksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.DisksClient.RequestInspector = withInspection(maxlen) azureClient.DisksClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.DisksClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.DisksClient.UserAgent) + azureClient.DisksClient.Client.PollingDuration = SharedGalleryTimeout azureClient.GroupsClient = resources.NewGroupsClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.GroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.GroupsClient.RequestInspector = withInspection(maxlen) azureClient.GroupsClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.GroupsClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.GroupsClient.UserAgent) + azureClient.GroupsClient.Client.PollingDuration = SharedGalleryTimeout azureClient.ImagesClient = compute.NewImagesClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.ImagesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.ImagesClient.RequestInspector = withInspection(maxlen) azureClient.ImagesClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.ImagesClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.ImagesClient.UserAgent) + azureClient.ImagesClient.Client.PollingDuration = SharedGalleryTimeout azureClient.InterfacesClient = network.NewInterfacesClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.InterfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.InterfacesClient.RequestInspector = withInspection(maxlen) azureClient.InterfacesClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.InterfacesClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.InterfacesClient.UserAgent) + azureClient.InterfacesClient.Client.PollingDuration = SharedGalleryTimeout azureClient.SubnetsClient = network.NewSubnetsClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.SubnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.SubnetsClient.RequestInspector = withInspection(maxlen) azureClient.SubnetsClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.SubnetsClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.SubnetsClient.UserAgent) + azureClient.SubnetsClient.Client.PollingDuration = SharedGalleryTimeout azureClient.VirtualNetworksClient = network.NewVirtualNetworksClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.VirtualNetworksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.VirtualNetworksClient.RequestInspector = withInspection(maxlen) azureClient.VirtualNetworksClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.VirtualNetworksClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.VirtualNetworksClient.UserAgent) + azureClient.VirtualNetworksClient.Client.PollingDuration = SharedGalleryTimeout azureClient.PublicIPAddressesClient = network.NewPublicIPAddressesClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.PublicIPAddressesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.PublicIPAddressesClient.RequestInspector = withInspection(maxlen) azureClient.PublicIPAddressesClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.PublicIPAddressesClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.PublicIPAddressesClient.UserAgent) + azureClient.PublicIPAddressesClient.Client.PollingDuration = SharedGalleryTimeout azureClient.VirtualMachinesClient = compute.NewVirtualMachinesClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.VirtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.VirtualMachinesClient.RequestInspector = withInspection(maxlen) azureClient.VirtualMachinesClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), templateCapture(azureClient), errorCapture(azureClient)) azureClient.VirtualMachinesClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.VirtualMachinesClient.UserAgent) + azureClient.VirtualMachinesClient.Client.PollingDuration = SharedGalleryTimeout azureClient.SnapshotsClient = compute.NewSnapshotsClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.SnapshotsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.SnapshotsClient.RequestInspector = withInspection(maxlen) azureClient.SnapshotsClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.SnapshotsClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.SnapshotsClient.UserAgent) + azureClient.SnapshotsClient.Client.PollingDuration = SharedGalleryTimeout azureClient.AccountsClient = armStorage.NewAccountsClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.AccountsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.AccountsClient.RequestInspector = withInspection(maxlen) azureClient.AccountsClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.AccountsClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.AccountsClient.UserAgent) + azureClient.AccountsClient.Client.PollingDuration = SharedGalleryTimeout azureClient.GalleryImageVersionsClient = newCompute.NewGalleryImageVersionsClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.GalleryImageVersionsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) @@ -218,6 +230,7 @@ func NewAzureClient(subscriptionID, resourceGroupName, storageAccountName string azureClient.GalleryImagesClient.RequestInspector = withInspection(maxlen) azureClient.GalleryImagesClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.GalleryImagesClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.GalleryImagesClient.UserAgent) + azureClient.GalleryImageVersionsClient.Client.PollingDuration = SharedGalleryTimeout keyVaultURL, err := url.Parse(cloud.KeyVaultEndpoint) if err != nil { @@ -229,6 +242,7 @@ func NewAzureClient(subscriptionID, resourceGroupName, storageAccountName string azureClient.VaultClient.RequestInspector = withInspection(maxlen) azureClient.VaultClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.VaultClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.VaultClient.UserAgent) + azureClient.VaultClient.Client.PollingDuration = SharedGalleryTimeout // TODO(boumenot) - SDK still does not have a full KeyVault client. // There are two ways that KeyVault has to be accessed, and each one has their own SPN. An authenticated SPN @@ -243,6 +257,7 @@ func NewAzureClient(subscriptionID, resourceGroupName, storageAccountName string azureClient.VaultClientDelete.RequestInspector = withInspection(maxlen) azureClient.VaultClientDelete.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.VaultClientDelete.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.VaultClientDelete.UserAgent) + azureClient.VaultClientDelete.Client.PollingDuration = SharedGalleryTimeout // If this is a managed disk build, this should be ignored. if resourceGroupName != "" && storageAccountName != "" { From 936ae42b0050f280a801a635d0ac1c27fb2c73b0 Mon Sep 17 00:00:00 2001 From: larohra Date: Fri, 11 Oct 2019 15:29:02 -0700 Subject: [PATCH 02/55] Added a new parameter in config to override the default Azure Go SDK PollingDuration timeout --- builder/azure/arm/azure_client.go | 32 +++++++++++++++---------------- builder/azure/arm/builder.go | 1 + builder/azure/arm/config.go | 15 +++++++++++++++ 3 files changed, 32 insertions(+), 16 deletions(-) diff --git a/builder/azure/arm/azure_client.go b/builder/azure/arm/azure_client.go index 130eba2a0..5ca281c90 100644 --- a/builder/azure/arm/azure_client.go +++ b/builder/azure/arm/azure_client.go @@ -127,7 +127,7 @@ func byConcatDecorators(decorators ...autorest.RespondDecorator) autorest.Respon } func NewAzureClient(subscriptionID, resourceGroupName, storageAccountName string, - cloud *azure.Environment, SharedGalleryTimeout time.Duration, + cloud *azure.Environment, SharedGalleryTimeout time.Duration, PollingDuration time.Duration, servicePrincipalToken, servicePrincipalTokenVault *adal.ServicePrincipalToken) (*AzureClient, error) { var azureClient = &AzureClient{} @@ -139,84 +139,84 @@ func NewAzureClient(subscriptionID, resourceGroupName, storageAccountName string azureClient.DeploymentsClient.RequestInspector = withInspection(maxlen) azureClient.DeploymentsClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.DeploymentsClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.DeploymentsClient.UserAgent) - azureClient.DeploymentsClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.DeploymentsClient.Client.PollingDuration = PollingDuration azureClient.DeploymentOperationsClient = resources.NewDeploymentOperationsClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.DeploymentOperationsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.DeploymentOperationsClient.RequestInspector = withInspection(maxlen) azureClient.DeploymentOperationsClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.DeploymentOperationsClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.DeploymentOperationsClient.UserAgent) - azureClient.DeploymentOperationsClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.DeploymentOperationsClient.Client.PollingDuration = PollingDuration azureClient.DisksClient = compute.NewDisksClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.DisksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.DisksClient.RequestInspector = withInspection(maxlen) azureClient.DisksClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.DisksClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.DisksClient.UserAgent) - azureClient.DisksClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.DisksClient.Client.PollingDuration = PollingDuration azureClient.GroupsClient = resources.NewGroupsClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.GroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.GroupsClient.RequestInspector = withInspection(maxlen) azureClient.GroupsClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.GroupsClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.GroupsClient.UserAgent) - azureClient.GroupsClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.GroupsClient.Client.PollingDuration = PollingDuration azureClient.ImagesClient = compute.NewImagesClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.ImagesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.ImagesClient.RequestInspector = withInspection(maxlen) azureClient.ImagesClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.ImagesClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.ImagesClient.UserAgent) - azureClient.ImagesClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.ImagesClient.Client.PollingDuration = PollingDuration azureClient.InterfacesClient = network.NewInterfacesClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.InterfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.InterfacesClient.RequestInspector = withInspection(maxlen) azureClient.InterfacesClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.InterfacesClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.InterfacesClient.UserAgent) - azureClient.InterfacesClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.InterfacesClient.Client.PollingDuration = PollingDuration azureClient.SubnetsClient = network.NewSubnetsClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.SubnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.SubnetsClient.RequestInspector = withInspection(maxlen) azureClient.SubnetsClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.SubnetsClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.SubnetsClient.UserAgent) - azureClient.SubnetsClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.SubnetsClient.Client.PollingDuration = PollingDuration azureClient.VirtualNetworksClient = network.NewVirtualNetworksClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.VirtualNetworksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.VirtualNetworksClient.RequestInspector = withInspection(maxlen) azureClient.VirtualNetworksClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.VirtualNetworksClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.VirtualNetworksClient.UserAgent) - azureClient.VirtualNetworksClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.VirtualNetworksClient.Client.PollingDuration = PollingDuration azureClient.PublicIPAddressesClient = network.NewPublicIPAddressesClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.PublicIPAddressesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.PublicIPAddressesClient.RequestInspector = withInspection(maxlen) azureClient.PublicIPAddressesClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.PublicIPAddressesClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.PublicIPAddressesClient.UserAgent) - azureClient.PublicIPAddressesClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.PublicIPAddressesClient.Client.PollingDuration = PollingDuration azureClient.VirtualMachinesClient = compute.NewVirtualMachinesClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.VirtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.VirtualMachinesClient.RequestInspector = withInspection(maxlen) azureClient.VirtualMachinesClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), templateCapture(azureClient), errorCapture(azureClient)) azureClient.VirtualMachinesClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.VirtualMachinesClient.UserAgent) - azureClient.VirtualMachinesClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.VirtualMachinesClient.Client.PollingDuration = PollingDuration azureClient.SnapshotsClient = compute.NewSnapshotsClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.SnapshotsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.SnapshotsClient.RequestInspector = withInspection(maxlen) azureClient.SnapshotsClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.SnapshotsClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.SnapshotsClient.UserAgent) - azureClient.SnapshotsClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.SnapshotsClient.Client.PollingDuration = PollingDuration azureClient.AccountsClient = armStorage.NewAccountsClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.AccountsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) azureClient.AccountsClient.RequestInspector = withInspection(maxlen) azureClient.AccountsClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.AccountsClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.AccountsClient.UserAgent) - azureClient.AccountsClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.AccountsClient.Client.PollingDuration = PollingDuration azureClient.GalleryImageVersionsClient = newCompute.NewGalleryImageVersionsClientWithBaseURI(cloud.ResourceManagerEndpoint, subscriptionID) azureClient.GalleryImageVersionsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) @@ -230,7 +230,7 @@ func NewAzureClient(subscriptionID, resourceGroupName, storageAccountName string azureClient.GalleryImagesClient.RequestInspector = withInspection(maxlen) azureClient.GalleryImagesClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.GalleryImagesClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.GalleryImagesClient.UserAgent) - azureClient.GalleryImageVersionsClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.GalleryImageVersionsClient.Client.PollingDuration = PollingDuration keyVaultURL, err := url.Parse(cloud.KeyVaultEndpoint) if err != nil { @@ -242,7 +242,7 @@ func NewAzureClient(subscriptionID, resourceGroupName, storageAccountName string azureClient.VaultClient.RequestInspector = withInspection(maxlen) azureClient.VaultClient.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.VaultClient.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.VaultClient.UserAgent) - azureClient.VaultClient.Client.PollingDuration = SharedGalleryTimeout + azureClient.VaultClient.Client.PollingDuration = PollingDuration // TODO(boumenot) - SDK still does not have a full KeyVault client. // There are two ways that KeyVault has to be accessed, and each one has their own SPN. An authenticated SPN @@ -257,7 +257,7 @@ func NewAzureClient(subscriptionID, resourceGroupName, storageAccountName string azureClient.VaultClientDelete.RequestInspector = withInspection(maxlen) azureClient.VaultClientDelete.ResponseInspector = byConcatDecorators(byInspecting(maxlen), errorCapture(azureClient)) azureClient.VaultClientDelete.UserAgent = fmt.Sprintf("%s %s", useragent.String(), azureClient.VaultClientDelete.UserAgent) - azureClient.VaultClientDelete.Client.PollingDuration = SharedGalleryTimeout + azureClient.VaultClientDelete.Client.PollingDuration = PollingDuration // If this is a managed disk build, this should be ignored. if resourceGroupName != "" && storageAccountName != "" { diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index 27bc32a4a..f569ed19b 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -81,6 +81,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack b.config.StorageAccount, b.config.ClientConfig.CloudEnvironment, b.config.SharedGalleryTimeout, + b.config.PollingDurationTimeout, spnCloud, spnKeyVault) diff --git a/builder/azure/arm/config.go b/builder/azure/arm/config.go index 7243bd2a8..d830e3394 100644 --- a/builder/azure/arm/config.go +++ b/builder/azure/arm/config.go @@ -311,6 +311,14 @@ type Config struct { // 4. PlanPromotionCode // PlanInfo PlanInformation `mapstructure:"plan_info" required:"false"` + // The default PollingDuration for azure is 15mins, this property will override + // that value. See [Azure DefaultPollingDuration](https://godoc.org/github.com/Azure/go-autorest/autorest#pkg-constants) + // If your Packer build is failing on the + // ARM deployment step with the error `Original Error: + // context deadline exceeded`, then you probably need to increase this timeout from + // its default of "15m" (valid time units include `s` for seconds, `m` for + // minutes, and `h` for hours.) + PollingDurationTimeout time.Duration `mapstructure:"polling_duration_timeout" required:"false"` // If either Linux or Windows is specified Packer will // automatically configure authentication credentials for the provisioned // machine. For Linux this configures an SSH authorized key. For Windows @@ -889,6 +897,13 @@ func assertRequiredParametersSet(c *Config, errs *packer.MultiError) { } } + ///////////////////////////////////////////// + // Polling Duration Timeout + if c.PollingDurationTimeout == 0 { + // In the sdk, the default is 15 m. + c.PollingDurationTimeout = 15 * time.Minute + } + ///////////////////////////////////////////// // OS if strings.EqualFold(c.OSType, constants.Target_Linux) { From 9dc622af0568286c89cb963fd130dc0450870b5b Mon Sep 17 00:00:00 2001 From: larohra Date: Mon, 14 Oct 2019 19:31:44 +0000 Subject: [PATCH 03/55] Fixed circleCi test --- .../builder/azure/arm/_Config-not-required.html.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/website/source/partials/builder/azure/arm/_Config-not-required.html.md b/website/source/partials/builder/azure/arm/_Config-not-required.html.md index af8816b87..ebeed1d38 100644 --- a/website/source/partials/builder/azure/arm/_Config-not-required.html.md +++ b/website/source/partials/builder/azure/arm/_Config-not-required.html.md @@ -187,6 +187,14 @@ 3. PlanPublisher 4. PlanPromotionCode +- `polling_duration_timeout` (time.Duration) - The default PollingDuration for azure is 15mins, this property will override + that value. See [Azure DefaultPollingDuration](https://godoc.org/github.com/Azure/go-autorest/autorest#pkg-constants) + If your Packer build is failing on the + ARM deployment step with the error `Original Error: + context deadline exceeded`, then you probably need to increase this timeout from + its default of "15m" (valid time units include `s` for seconds, `m` for + minutes, and `h` for hours.) + - `os_type` (string) - If either Linux or Windows is specified Packer will automatically configure authentication credentials for the provisioned machine. For Linux this configures an SSH authorized key. For Windows From 8628c75de18f394cfeb40bc738a10262a17d3eaa Mon Sep 17 00:00:00 2001 From: Joakim Westin Date: Tue, 15 Oct 2019 10:34:48 +0200 Subject: [PATCH 04/55] Changed from jq to JMESPath queries Changed the examples to use AZ CLI built-in JMESPath query instead of needing `jq` tool installed (functionally they are the same) --- .../source/docs/builders/azure-setup.html.md | 22 ++++--------------- 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/website/source/docs/builders/azure-setup.html.md b/website/source/docs/builders/azure-setup.html.md index 6da8f184c..ee73b1715 100644 --- a/website/source/docs/builders/azure-setup.html.md +++ b/website/source/docs/builders/azure-setup.html.md @@ -90,17 +90,7 @@ To get the credentials above, we will need to install the Azure CLI. Please refer to Microsoft's official [installation guide](https://azure.microsoft.com/en-us/documentation/articles/xplat-cli-install/). --> The guides below also use a tool called -[`jq`](https://stedolan.github.io/jq/) to simplify the output from the Azure -CLI, though this is optional. If you use homebrew you can simply -`brew install node jq`. - -You can also use the Azure CLI in Docker. It also comes with `jq` -pre-installed: - -``` shell -$ docker run -it microsoft/azure-cli -``` +The guides below use [JMESPath](http://jmespath.org/) queries to select and reformat output from the AZ CLI commands. JMESPath is [part of the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/query-azure-cli?view=azure-cli-latest) and can be used in the same way as the `jq` tool. ## Guided Setup @@ -148,15 +138,11 @@ below: Get your account information ``` shell -$ az account list --output json | jq -r '.[].name' +$ az account list --output table --query '[].{Name:name,subscription_id:id}' $ az account set --subscription ACCOUNTNAME -$ az account show --output json | jq -r '.id' +$ az account show --output json --query 'id' ``` --> Throughout this document when you see a command pipe to `jq` you may -instead omit `--output json` and everything after it, but the output will be -more verbose. For example you can simply run `az account list` instead. - This will print out one line that look like this: 4f562e88-8caf-421a-b4da-e3f6786c52ec @@ -235,7 +221,7 @@ granular permissions, though this is out of scope. You can see a list of pre-configured roles via: ``` shell -$ az role definition list --output json | jq ".[] | {name:.roleName, description:.description}" +$ az role definition list --output table --query '[].{name:roleName, description:description}' ``` If you would rather use a certificate to autenticate your service principal, From d6994df188c52b67e9ba833b8714642b6748f90d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vladim=C3=ADr?= Date: Tue, 15 Oct 2019 15:14:22 +0200 Subject: [PATCH 05/55] Select first adapter attached to host OS's switch If more than one vNic is attached to vmSwitch, use the first one only. --- common/powershell/hyperv/hyperv.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/powershell/hyperv/hyperv.go b/common/powershell/hyperv/hyperv.go index ecfb40ec2..b1dd6f47d 100644 --- a/common/powershell/hyperv/hyperv.go +++ b/common/powershell/hyperv/hyperv.go @@ -32,7 +32,7 @@ type scriptOptions struct { func GetHostAdapterIpAddressForSwitch(switchName string) (string, error) { var script = ` param([string]$switchName, [int]$addressIndex) -$HostVMAdapter = Hyper-V\Get-VMNetworkAdapter -ManagementOS -SwitchName $switchName +$HostVMAdapter = Hyper-V\Get-VMNetworkAdapter -ManagementOS -SwitchName $switchName | Select-Object -First 1 if ($HostVMAdapter){ $HostNetAdapter = Get-NetAdapter | Where-Object { $_.DeviceId -eq $HostVMAdapter.DeviceId } if ($HostNetAdapter){ From 72fdce09ba80487d015a76d99a1ee783bf40b895 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 25 Sep 2019 21:12:33 +0000 Subject: [PATCH 06/55] Allow GetServicePrincipalToken to be called independently --- builder/azure/common/client/config.go | 50 +++++++++++++++------------ 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/builder/azure/common/client/config.go b/builder/azure/common/client/config.go index 9a4a736e8..230188eb5 100644 --- a/builder/azure/common/client/config.go +++ b/builder/azure/common/client/config.go @@ -198,60 +198,64 @@ func (c Config) UseMSI() bool { c.TenantID == "" } -func (c Config) GetServicePrincipalTokens( - say func(string)) ( +func (c Config) GetServicePrincipalTokens(say func(string)) ( servicePrincipalToken *adal.ServicePrincipalToken, servicePrincipalTokenVault *adal.ServicePrincipalToken, err error) { - tenantID := c.TenantID + servicePrincipalToken, err = c.GetServicePrincipalToken(say, + c.CloudEnvironment.ResourceManagerEndpoint) + if err != nil { + return nil, nil, err + } + servicePrincipalTokenVault, err = c.GetServicePrincipalToken(say, + strings.TrimRight(c.CloudEnvironment.KeyVaultEndpoint, "/")) + if err != nil { + return nil, nil, err + } + return servicePrincipalToken, servicePrincipalTokenVault, nil +} + +func (c Config) GetServicePrincipalToken( + say func(string), forResource string) ( + servicePrincipalToken *adal.ServicePrincipalToken, + err error) { var auth oAuthTokenProvider switch c.authType { case authTypeDeviceLogin: say("Getting tokens using device flow") - auth = NewDeviceFlowOAuthTokenProvider(*c.CloudEnvironment, say, tenantID) + auth = NewDeviceFlowOAuthTokenProvider(*c.CloudEnvironment, say, c.TenantID) case authTypeMSI: say("Getting tokens using Managed Identity for Azure") auth = NewMSIOAuthTokenProvider(*c.CloudEnvironment) case authTypeClientSecret: say("Getting tokens using client secret") - auth = NewSecretOAuthTokenProvider(*c.CloudEnvironment, c.ClientID, c.ClientSecret, tenantID) + auth = NewSecretOAuthTokenProvider(*c.CloudEnvironment, c.ClientID, c.ClientSecret, c.TenantID) case authTypeClientCert: say("Getting tokens using client certificate") - auth, err = NewCertOAuthTokenProvider(*c.CloudEnvironment, c.ClientID, c.ClientCertPath, tenantID) + auth, err = NewCertOAuthTokenProvider(*c.CloudEnvironment, c.ClientID, c.ClientCertPath, c.TenantID) if err != nil { - return nil, nil, err + return nil, err } case authTypeClientBearerJWT: say("Getting tokens using client bearer JWT") - auth = NewJWTOAuthTokenProvider(*c.CloudEnvironment, c.ClientID, c.ClientJWT, tenantID) + auth = NewJWTOAuthTokenProvider(*c.CloudEnvironment, c.ClientID, c.ClientJWT, c.TenantID) default: panic("authType not set, call FillParameters, or set explicitly") } - servicePrincipalToken, err = auth.getServicePrincipalToken() + servicePrincipalToken, err = auth.getServicePrincipalTokenWithResource(forResource) if err != nil { - return nil, nil, err + return nil, err } err = servicePrincipalToken.EnsureFresh() if err != nil { - return nil, nil, err + return nil, err } - servicePrincipalTokenVault, err = auth.getServicePrincipalTokenWithResource( - strings.TrimRight(c.CloudEnvironment.KeyVaultEndpoint, "/")) - if err != nil { - return nil, nil, err - } - - err = servicePrincipalTokenVault.EnsureFresh() - if err != nil { - return nil, nil, err - } - - return servicePrincipalToken, servicePrincipalTokenVault, nil + return servicePrincipalToken, nil } // FillParameters capture the user intent from the supplied parameter set in authType, retrieves the TenantID and CloudEnvironment if not specified. From 2ded8f25ecf7c435ea12006d69303084f3adbe5c Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 25 Sep 2019 21:13:09 +0000 Subject: [PATCH 07/55] Add metadata client --- builder/azure/common/client/metadata.go | 81 ++++++++++++++++++++ builder/azure/common/client/metadata_test.go | 33 ++++++++ 2 files changed, 114 insertions(+) create mode 100644 builder/azure/common/client/metadata.go create mode 100644 builder/azure/common/client/metadata_test.go diff --git a/builder/azure/common/client/metadata.go b/builder/azure/common/client/metadata.go new file mode 100644 index 000000000..ad76cb007 --- /dev/null +++ b/builder/azure/common/client/metadata.go @@ -0,0 +1,81 @@ +package client + +import ( + "fmt" + "net/http" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// DefaultMetadataClient is the default instance metadata client for Azure. Replace this variable for testing purposes only +var DefaultMetadataClient = NewMetadataClient() + +// MetadataClient holds methods that Packer uses to get information about the current VM +type MetadataClientAPI interface { + GetComputeInfo() (*ComputeInfo, error) +} + +type ComputeInfo struct{ + Name string + ResourceGroupName string + SubscriptionID string +Location string +} + +// metadataClient implements MetadataClient +type metadataClient struct { + autorest.Sender +} + +var _ MetadataClientAPI = metadataClient{} + +const imdsURL = "http://169.254.169.254/metadata/instance?api-version=2017-08-01" + +// VMResourceID returns the resource ID of the current VM +func (client metadataClient) GetComputeInfo() (*ComputeInfo, error) { + req, err := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithHeader("Metadata", "true"), + autorest.WithBaseURL(imdsURL), + ).Prepare((&http.Request{})) + if err != nil { + return nil, err + } + + res, err := autorest.SendWithSender(client, req, + autorest.DoRetryForDuration(1*time.Minute, 5*time.Second)) + if err != nil { + return nil, err + } + + var vminfo struct { + ComputeInfo `json:"compute"` + } + + err = autorest.Respond( + res, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&vminfo), + autorest.ByClosing()) + if err != nil { + return nil, err + } + return &vminfo.ComputeInfo, nil +} + +func(ci ComputeInfo) ResourceID() string{ + return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s", + ci.SubscriptionID, + ci.ResourceGroupName, + ci.Name, + ) +} + +// NewMetadataClient creates a new instance metadata client +func NewMetadataClient() MetadataClientAPI { + return metadataClient{ + Sender: autorest.CreateSender(), + } +} diff --git a/builder/azure/common/client/metadata_test.go b/builder/azure/common/client/metadata_test.go new file mode 100644 index 000000000..dddc9a2a0 --- /dev/null +++ b/builder/azure/common/client/metadata_test.go @@ -0,0 +1,33 @@ +package client + +import ( + "fmt" + "testing" + + "github.com/Azure/go-autorest/autorest/azure" + + "github.com/hashicorp/packer/builder/azure/common" + "github.com/stretchr/testify/assert" +) + +func Test_MetadataReturnsComputeInfo(t *testing.T) { + if !common.IsAzure() { + t.Skipf("Not running on Azure, skipping live IMDS test") + } + mdc := NewMetadataClient() + info, err := mdc.GetComputeInfo() + assert.Nil(t, err) + + vm, err := azure.ParseResourceID(fmt.Sprintf( + "/subscriptions/%s"+ + "/resourceGroups/%s"+ + "/providers/Microsoft.Compute"+ + "/virtualMachines/%s", + info.SubscriptionID, + info.ResourceGroupName, + info.Name)) + assert.Nil(t, err, "%q is not parsable as an Azure resource info", info) + + assert.Regexp(t, "^[0-9a-fA-F-]{36}$", vm.SubscriptionID) + t.Logf("VM: %+v", vm) +} From f106adbd12cb7d45aa76194c3011a2874a4179b3 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 25 Sep 2019 21:13:23 +0000 Subject: [PATCH 08/55] Add Azure client set for chroot builder --- .../azure/common/client/azure_client_set.go | 90 +++++++++++++++++++ builder/azure/common/client/platform_image.go | 57 ++++++++++++ .../common/client/platform_image_test.go | 30 +++++++ builder/azure/common/client/testclient.go | 31 +++++++ 4 files changed, 208 insertions(+) create mode 100644 builder/azure/common/client/azure_client_set.go create mode 100644 builder/azure/common/client/platform_image.go create mode 100644 builder/azure/common/client/platform_image_test.go create mode 100644 builder/azure/common/client/testclient.go diff --git a/builder/azure/common/client/azure_client_set.go b/builder/azure/common/client/azure_client_set.go new file mode 100644 index 000000000..c0dfb12e8 --- /dev/null +++ b/builder/azure/common/client/azure_client_set.go @@ -0,0 +1,90 @@ +package client + +import ( + "net/http" + "regexp" + "time" + + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/computeapi" + "github.com/Azure/go-autorest/autorest" +) + +type AzureClientSet interface { + MetadataClient() MetadataClientAPI + + DisksClient() computeapi.DisksClientAPI + ImagesClient() computeapi.ImagesClientAPI + VirtualMachinesClient() computeapi.VirtualMachinesClientAPI + VirtualMachineImagesClient() VirtualMachineImagesClientAPI + + PollClient() autorest.Client +} + +var subscriptionPathRegex = regexp.MustCompile(`/subscriptions/([[:xdigit:]]{8}(-[[:xdigit:]]{4}){3}-[[:xdigit:]]{12})`) + +var _ AzureClientSet = &azureClientSet{} + +type azureClientSet struct { + sender autorest.Sender + authorizer autorest.Authorizer + subscriptionID string + PollingDelay time.Duration +} + +func New(c Config, say func(string)) (AzureClientSet, error) { + token, err := c.GetServicePrincipalToken(say, c.CloudEnvironment.ResourceManagerEndpoint) + if err != nil { + return nil, err + } + return &azureClientSet{ + authorizer: autorest.NewBearerAuthorizer(token), + subscriptionID: c.SubscriptionID, + sender: http.DefaultClient, + PollingDelay: time.Second, + }, nil +} + +func (s azureClientSet) configureAutorestClient(c *autorest.Client) { + c.Authorizer = s.authorizer + c.Sender = s.sender +} + +func (s azureClientSet) MetadataClient() MetadataClientAPI { + return metadataClient{s.sender} +} + +func (s azureClientSet) DisksClient() computeapi.DisksClientAPI { + c := compute.NewDisksClient(s.subscriptionID) + s.configureAutorestClient(&c.Client) + c.PollingDelay = s.PollingDelay + return c +} + +func (s azureClientSet) ImagesClient() computeapi.ImagesClientAPI { + c := compute.NewImagesClient(s.subscriptionID) + s.configureAutorestClient(&c.Client) + c.PollingDelay = s.PollingDelay + return c +} + +func (s azureClientSet) VirtualMachinesClient() computeapi.VirtualMachinesClientAPI { + c := compute.NewVirtualMachinesClient(s.subscriptionID) + s.configureAutorestClient(&c.Client) + c.PollingDelay = s.PollingDelay + return c +} + +func (s azureClientSet) VirtualMachineImagesClient() VirtualMachineImagesClientAPI { + c := compute.NewVirtualMachineImagesClient(s.subscriptionID) + s.configureAutorestClient(&c.Client) + c.PollingDelay = s.PollingDelay + return virtualMachineImagesClientAPI{c} +} + +func (s azureClientSet) PollClient() autorest.Client { + c := autorest.NewClientWithUserAgent("Packer-Azure-ClientSet") + s.configureAutorestClient(&c) + c.PollingDelay = time.Second / 3 + return c +} diff --git a/builder/azure/common/client/platform_image.go b/builder/azure/common/client/platform_image.go new file mode 100644 index 000000000..ac11ec935 --- /dev/null +++ b/builder/azure/common/client/platform_image.go @@ -0,0 +1,57 @@ +package client + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/computeapi" + "github.com/Azure/go-autorest/autorest/to" + "regexp" + "strings" +) + +var platformImageRegex = regexp.MustCompile(`^[-_.a-zA-Z0-9]+:[-_.a-zA-Z0-9]+:[-_.a-zA-Z0-9]+:[-_.a-zA-Z0-9]+$`) + +type VirtualMachineImagesClientAPI interface { + computeapi.VirtualMachineImagesClientAPI + // extensions + GetLatest(ctx context.Context, publisher, offer, sku, location string) (*compute.VirtualMachineImageResource, error) +} + +var _ VirtualMachineImagesClientAPI = virtualMachineImagesClientAPI{} + +type virtualMachineImagesClientAPI struct { + computeapi.VirtualMachineImagesClientAPI +} + +func ParsePlatformImageURN(urn string) (image *PlatformImage, err error) { + if !platformImageRegex.Match([]byte(urn)) { + return nil, fmt.Errorf("%q is not a valid platform image specifier", urn) + } + parts := strings.Split(urn, ":") + return &PlatformImage{parts[0], parts[1], parts[2], parts[3]}, nil +} + +func (c virtualMachineImagesClientAPI) GetLatest(ctx context.Context, publisher, offer, sku, location string) (*compute.VirtualMachineImageResource, error) { + result, err := c.List(ctx, location, publisher, offer, sku, "", to.Int32Ptr(1), "name desc") + if err != nil { + return nil, err + } + if result.Value == nil || len(*result.Value) == 0 { + return nil, fmt.Errorf("%s:%s:%s:latest could not be found in location %s", publisher, offer, sku, location) + } + + return &(*result.Value)[0], nil +} + +type PlatformImage struct { + Publisher, Offer, Sku, Version string +} + +func (pi PlatformImage) URN() string { + return fmt.Sprintf("%s:%s:%s:%s", + pi.Publisher, + pi.Offer, + pi.Sku, + pi.Version) +} diff --git a/builder/azure/common/client/platform_image_test.go b/builder/azure/common/client/platform_image_test.go new file mode 100644 index 000000000..981031d45 --- /dev/null +++ b/builder/azure/common/client/platform_image_test.go @@ -0,0 +1,30 @@ +package client + +import ( + "fmt" + "testing" +) + +func Test_platformImageRegex(t *testing.T) { + for i, v := range []string{ + "Publisher:Offer:Sku:Versions", + "Publisher:Offer-name:2.0_alpha:2.0.2019060122", + } { + t.Run(fmt.Sprintf("should_match_%d", i), func(t *testing.T) { + if !platformImageRegex.Match([]byte(v)) { + t.Fatalf("expected %q to match", v) + } + }) + } + + for i, v := range []string{ + "Publ isher:Offer:Sku:Versions", + "Publ/isher:Offer-name:2.0_alpha:2.0.2019060122", + } { + t.Run(fmt.Sprintf("should_not_match_%d", i), func(t *testing.T) { + if platformImageRegex.Match([]byte(v)) { + t.Fatalf("did not expected %q to match", v) + } + }) + } +} diff --git a/builder/azure/common/client/testclient.go b/builder/azure/common/client/testclient.go new file mode 100644 index 000000000..964e04795 --- /dev/null +++ b/builder/azure/common/client/testclient.go @@ -0,0 +1,31 @@ +package client + +import ( + "os" + "testing" + "net/http" + "errors" + + "github.com/Azure/go-autorest/autorest/azure/auth" +) + +func GetTestClientSet(t *testing.T) (AzureClientSet, error) { + if (os.Getenv("AZURE_INTEGRATION_TEST") == "") { + t.Skip("AZURE_INTEGRATION_TEST not set") + } else { + a, err := auth.NewAuthorizerFromEnvironment() + if err == nil { + cli := azureClientSet{} + cli.authorizer = a + cli.subscriptionID = os.Getenv("AZURE_SUBSCRIPTION_ID") + cli.PollingDelay = 0 + cli.sender = http.DefaultClient + return cli, nil + } else { + t.Skipf("Could not create Azure client: %v", err) + } + } + + return nil, errors.New("Couldn't create client set") +} + From 113dc1234617d748152fe039c24511de3cf3e2fc Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 25 Sep 2019 21:15:25 +0000 Subject: [PATCH 09/55] Add function to detect whether Packer is running on Azure --- builder/azure/common/detect_azure.go | 8 +++++ builder/azure/common/detect_azure_linux.go | 23 +++++++++++++++ .../azure/common/detect_azure_linux_test.go | 29 +++++++++++++++++++ 3 files changed, 60 insertions(+) create mode 100644 builder/azure/common/detect_azure.go create mode 100644 builder/azure/common/detect_azure_linux.go create mode 100644 builder/azure/common/detect_azure_linux_test.go diff --git a/builder/azure/common/detect_azure.go b/builder/azure/common/detect_azure.go new file mode 100644 index 000000000..001ca1b4f --- /dev/null +++ b/builder/azure/common/detect_azure.go @@ -0,0 +1,8 @@ +// +build !linux + +package common + +// IsAzure returns true if Packer is running on Azure (currently only works on Linux) +func IsAzure() bool { + return false +} diff --git a/builder/azure/common/detect_azure_linux.go b/builder/azure/common/detect_azure_linux.go new file mode 100644 index 000000000..f57eb42b9 --- /dev/null +++ b/builder/azure/common/detect_azure_linux.go @@ -0,0 +1,23 @@ +package common + +import ( + "bytes" + "io/ioutil" +) + +var ( + smbiosAssetTagFile = "/sys/class/dmi/id/chassis_asset_tag" + azureAssetTag = []byte("7783-7084-3265-9085-8269-3286-77\n") +) + +// IsAzure returns true if Packer is running on Azure +func IsAzure() bool { + return isAzureAssetTag(smbiosAssetTagFile) +} + +func isAzureAssetTag(filename string) bool { + if d, err := ioutil.ReadFile(filename); err == nil { + return bytes.Compare(d, azureAssetTag) == 0 + } + return false +} diff --git a/builder/azure/common/detect_azure_linux_test.go b/builder/azure/common/detect_azure_linux_test.go new file mode 100644 index 000000000..9d755cfd9 --- /dev/null +++ b/builder/azure/common/detect_azure_linux_test.go @@ -0,0 +1,29 @@ +package common + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsAzure(t *testing.T) { + f, err := ioutil.TempFile("", "TestIsAzure*") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + + f.Seek(0, 0) + f.Truncate(0) + f.Write([]byte("not the azure assettag")) + + assert.False(t, isAzureAssetTag(f.Name()), "asset tag is not Azure's") + + f.Seek(0, 0) + f.Truncate(0) + f.Write(azureAssetTag) + + assert.True(t, isAzureAssetTag(f.Name()), "asset tag is Azure's") +} From 3c33aa4fc52c80ace98b3979d60f220e366dcfc2 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 19 Apr 2019 22:39:24 +0000 Subject: [PATCH 10/55] Add metadata client --- builder/azure/chroot/metadata.go | 71 +++++++++++++++++++++++++++ builder/azure/chroot/metadata_test.go | 24 +++++++++ 2 files changed, 95 insertions(+) create mode 100644 builder/azure/chroot/metadata.go create mode 100644 builder/azure/chroot/metadata_test.go diff --git a/builder/azure/chroot/metadata.go b/builder/azure/chroot/metadata.go new file mode 100644 index 000000000..6dea16017 --- /dev/null +++ b/builder/azure/chroot/metadata.go @@ -0,0 +1,71 @@ +package chroot + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + + "github.com/hashicorp/go-retryablehttp" +) + +// DefaultMetadataClient is the default instance metadata client for Azure. Replace this variable for testing purposes only +var DefaultMetadataClient = NewMetadataClient() + +// MetadataClient holds methods that Packer uses to get information about the current VM +type MetadataClient interface { + VMResourceID() (string, error) +} + +// metadataClient implements MetadataClient +type metadataClient struct{} + +const imdsURL = "http://169.254.169.254/metadata/instance?api-version=2017-08-01" + +// VMResourceID returns the resource ID of the current VM +func (metadataClient) VMResourceID() (string, error) { + wc := retryablehttp.NewClient() + wc.RetryMax = 5 + + req, err := retryablehttp.NewRequest(http.MethodGet, imdsURL, nil) + if err != nil { + return "", err + } + req.Header.Add("Metadata", "true") + + res, err := wc.Do(req) + if err != nil { + return "", err + } + defer res.Body.Close() + + d, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", err + } + + var vminfo struct { + Compute struct { + Name string + ResourceGroupName string + SubscriptionID string + } + } + + err = json.Unmarshal(d, &vminfo) + if err != nil { + return "", err + } + + return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s", + vminfo.Compute.Name, + vminfo.Compute.ResourceGroupName, + vminfo.Compute.SubscriptionID, + ), nil + +} + +// NewMetadataClient creates a new instance metadata client +func NewMetadataClient() MetadataClient { + return metadataClient{} +} diff --git a/builder/azure/chroot/metadata_test.go b/builder/azure/chroot/metadata_test.go new file mode 100644 index 000000000..a707ea3ae --- /dev/null +++ b/builder/azure/chroot/metadata_test.go @@ -0,0 +1,24 @@ +package chroot + +import ( + "testing" + + "github.com/Azure/go-autorest/autorest/azure" + + "github.com/hashicorp/packer/builder/azure/common" + "github.com/stretchr/testify/assert" +) + +func Test_MetadataReturnsVMResourceID(t *testing.T) { + if !common.IsAzure() { + t.Skipf("Not running on Azure, skipping live IMDS test") + } + mdc := NewMetadataClient() + id, err := mdc.VMResourceID() + assert.Nil(t, err) + assert.NotEqual(t, id, "", "Expected VMResourceID to return non-empty string because we are running on Azure") + + vm, err := azure.ParseResourceID(id) + assert.Nil(t, err, "%q is not parsable as an Azure resource id", id) + t.Logf("VM: %+v", vm) +} From 369ec9a84cac22f980531b2cd7b08f65c805a7c0 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Thu, 25 Apr 2019 23:12:18 +0000 Subject: [PATCH 11/55] Create disk attacher component --- builder/azure/chroot/diskattacher.go | 182 ++++++++++++++++++++++ builder/azure/chroot/diskattacher_test.go | 86 ++++++++++ builder/azure/chroot/metadata.go | 71 --------- builder/azure/chroot/metadata_test.go | 24 --- 4 files changed, 268 insertions(+), 95 deletions(-) create mode 100644 builder/azure/chroot/diskattacher.go create mode 100644 builder/azure/chroot/diskattacher_test.go delete mode 100644 builder/azure/chroot/metadata.go delete mode 100644 builder/azure/chroot/metadata_test.go diff --git a/builder/azure/chroot/diskattacher.go b/builder/azure/chroot/diskattacher.go new file mode 100644 index 000000000..2f21ebf45 --- /dev/null +++ b/builder/azure/chroot/diskattacher.go @@ -0,0 +1,182 @@ +package chroot + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/packer/builder/azure/common/client" + + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" +) + +type VirtualMachinesClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) ( + result compute.VirtualMachinesCreateOrUpdateFuture, err error) + Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) ( + result compute.VirtualMachine, err error) +} + +type DiskAttacher interface { + AttachDisk(ctx context.Context, disk string) (lun int32, err error) + DetachDisk(ctx context.Context, disk string) (err error) + WaitForDevice(ctx context.Context, i int32) (device string, err error) +} + +func NewDiskAttacher(azureClient client.AzureClientSet) DiskAttacher { + return diskAttacher{azureClient} +} + +type diskAttacher struct { + azcli client.AzureClientSet +} + +func (da diskAttacher) WaitForDevice(ctx context.Context, i int32) (device string, err error) { + path := fmt.Sprintf("/dev/disk/azure/scsi1/lun%d", i) + + for { + l, err := os.Readlink(path) + if err == nil { + return filepath.Abs("/dev/disk/azure/scsi1/" + l) + } + if err != nil && err != os.ErrNotExist { + return "", err + } + select { + case <-time.After(100 * time.Millisecond): + // continue + case <-ctx.Done(): + return "", ctx.Err() + } + } +} + +func (da diskAttacher) DetachDisk(ctx context.Context, diskID string) error { + currentDisks, err := da.getDisks(ctx) + if err != nil { + return err + } + + // copy all disks to new array that not match diskID + newDisks := []compute.DataDisk{} + for _, disk := range currentDisks { + if disk.ManagedDisk != nil && + !strings.EqualFold(to.String(disk.ManagedDisk.ID), diskID) { + newDisks = append(newDisks, disk) + } + } + if len(currentDisks) == len(newDisks) { + return DiskNotFoundError + } + + return da.setDisks(ctx, newDisks) +} + +var DiskNotFoundError = errors.New("Disk not found") + +func (da diskAttacher) AttachDisk(ctx context.Context, diskID string) (int32, error) { + dataDisks, err := da.getDisks(ctx) + if err != nil { + return -1, err + } + + // check to see if disk is already attached, remember lun if found + var lun int32 = -1 + for _, disk := range dataDisks { + if disk.ManagedDisk != nil && + strings.EqualFold(to.String(disk.ManagedDisk.ID), diskID) { + // disk is already attached, just take this lun + if disk.Lun != nil { + lun = to.Int32(disk.Lun) + break + } + } + } + + if lun == -1 { + // disk was not found on VM, go and actually attach it + + findFreeLun: + for lun = 0; lun < 64; lun++ { + for _, v := range dataDisks { + if to.Int32(v.Lun) == lun { + continue findFreeLun + } + } + // no datadisk is using this lun + break + } + + // append new data disk to collection + dataDisks = append(dataDisks, compute.DataDisk{ + CreateOption: compute.DiskCreateOptionTypesAttach, + ManagedDisk: &compute.ManagedDiskParameters{ + ID: to.StringPtr(diskID), + }, + Lun: to.Int32Ptr(lun), + }) + + // prepare resource object for update operation + err = da.setDisks(ctx, dataDisks) + if err != nil { + return -1, err + } + } + return lun, nil +} + +func (da diskAttacher) getThisVM(ctx context.Context) (compute.VirtualMachine, error) { + // getting resource info for this VM + vm, err := da.azcli.MetadataClient().GetComputeInfo() + if err != nil { + return compute.VirtualMachine{}, err + } + + // retrieve actual VM + vmResource, err := da.azcli.VirtualMachinesClient().Get(ctx, vm.ResourceGroupName, vm.Name, "") + if err != nil { + return compute.VirtualMachine{}, err + } + if vmResource.StorageProfile == nil { + return compute.VirtualMachine{}, errors.New("properties.storageProfile is not set on VM, this is unexpected") + } + + return vmResource, nil +} + +func (da diskAttacher) getDisks(ctx context.Context) ([]compute.DataDisk, error) { + vmResource, err := da.getThisVM(ctx) + if err != nil { + return []compute.DataDisk{}, err + } + + return *vmResource.StorageProfile.DataDisks, nil +} + +func (da diskAttacher) setDisks(ctx context.Context, disks []compute.DataDisk) error { + vmResource, err := da.getThisVM(ctx) + if err != nil { + return err + } + + id, err := azure.ParseResourceID(to.String(vmResource.ID)) + if err != nil { + return err + } + + vmResource.StorageProfile.DataDisks = &disks + vmResource.Resources = nil + + // update the VM resource, attaching disk + f, err := da.azcli.VirtualMachinesClient().CreateOrUpdate(ctx, id.ResourceGroup, id.ResourceName, vmResource) + if err == nil { + err = f.WaitForCompletionRef(ctx, da.azcli.PollClient()) + } + return err +} diff --git a/builder/azure/chroot/diskattacher_test.go b/builder/azure/chroot/diskattacher_test.go new file mode 100644 index 000000000..fbdc359ed --- /dev/null +++ b/builder/azure/chroot/diskattacher_test.go @@ -0,0 +1,86 @@ +package chroot + +import ( + "context" + "github.com/Azure/go-autorest/autorest/to" + "testing" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" + "github.com/hashicorp/packer/builder/azure/chroot/client" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testvm = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/testGroup/Microsoft.Compute/virtualMachines/testVM" + testdisk = "/subscriptions/00000000-0000-0000-0000-000000000001/resourceGroups/testGroup2/Microsoft.Compute/disks/testDisk" +) + +// Tests assume current machine is capable of running chroot builder (i.e. an Azure VM) + +func Test_DiskAttacherAttachesDiskToVM(t *testing.T) { + azcli, err := client.GetTestClientSet(t) + require.Nil(t, err) + da := NewDiskAttacher(azcli) + testDiskName := t.Name() + + vm, err := azcli.MetadataClient().GetComputeInfo() + require.Nil(t, err, "Test needs to run on an Azure VM, unable to retrieve VM information") + t.Log("Creating new disk '", testDiskName, "' in ", vm.ResourceGroupName) + + disk, err := azcli.DisksClient().Get(context.TODO(), vm.ResourceGroupName, testDiskName) + if err == nil { + t.Log("Disk already exists") + if disk.DiskState == compute.Attached { + t.Log("Disk is attached, assuming to this machine, trying to detach") + err = da.DetachDisk(context.TODO(), to.String(disk.ID)) + require.Nil(t, err) + } + t.Log("Deleting disk") + result, err := azcli.DisksClient().Delete(context.TODO(), vm.ResourceGroupName, testDiskName) + require.Nil(t, err) + err = result.WaitForCompletionRef(context.TODO(), azcli.PollClient()) + require.Nil(t, err) + } + + t.Log("Creating disk") + r, err := azcli.DisksClient().CreateOrUpdate(context.TODO(), vm.ResourceGroupName, testDiskName, compute.Disk{ + Location: to.StringPtr(vm.Location), + Sku: &compute.DiskSku{ + Name: compute.StandardLRS, + }, + DiskProperties: &compute.DiskProperties{ + DiskSizeGB: to.Int32Ptr(30), + CreationData: &compute.CreationData{CreateOption: compute.Empty}, + }, + }) + require.Nil(t, err) + err = r.WaitForCompletionRef(context.TODO(), azcli.PollClient()) + require.Nil(t, err) + + t.Log("Retrieving disk properties") + d, err := azcli.DisksClient().Get(context.TODO(), vm.ResourceGroupName, testDiskName) + require.Nil(t, err) + assert.NotNil(t, d) + + t.Log("Attaching disk") + lun, err := da.AttachDisk(context.TODO(), to.String(d.ID)) + assert.Nil(t, err) + + t.Log("Waiting for device") + dev, err := da.WaitForDevice(context.TODO(), lun) + assert.Nil(t, err) + + t.Log("Device path:", dev) + + t.Log("Detaching disk") + err = da.DetachDisk(context.TODO(), to.String(d.ID)) + require.Nil(t, err) + + t.Log("Deleting disk") + result, err := azcli.DisksClient().Delete(context.TODO(), vm.ResourceGroupName, testDiskName) + if err == nil { + err = result.WaitForCompletionRef(context.TODO(), azcli.PollClient()) + } + require.Nil(t, err) +} diff --git a/builder/azure/chroot/metadata.go b/builder/azure/chroot/metadata.go deleted file mode 100644 index 6dea16017..000000000 --- a/builder/azure/chroot/metadata.go +++ /dev/null @@ -1,71 +0,0 @@ -package chroot - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - - "github.com/hashicorp/go-retryablehttp" -) - -// DefaultMetadataClient is the default instance metadata client for Azure. Replace this variable for testing purposes only -var DefaultMetadataClient = NewMetadataClient() - -// MetadataClient holds methods that Packer uses to get information about the current VM -type MetadataClient interface { - VMResourceID() (string, error) -} - -// metadataClient implements MetadataClient -type metadataClient struct{} - -const imdsURL = "http://169.254.169.254/metadata/instance?api-version=2017-08-01" - -// VMResourceID returns the resource ID of the current VM -func (metadataClient) VMResourceID() (string, error) { - wc := retryablehttp.NewClient() - wc.RetryMax = 5 - - req, err := retryablehttp.NewRequest(http.MethodGet, imdsURL, nil) - if err != nil { - return "", err - } - req.Header.Add("Metadata", "true") - - res, err := wc.Do(req) - if err != nil { - return "", err - } - defer res.Body.Close() - - d, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", err - } - - var vminfo struct { - Compute struct { - Name string - ResourceGroupName string - SubscriptionID string - } - } - - err = json.Unmarshal(d, &vminfo) - if err != nil { - return "", err - } - - return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s", - vminfo.Compute.Name, - vminfo.Compute.ResourceGroupName, - vminfo.Compute.SubscriptionID, - ), nil - -} - -// NewMetadataClient creates a new instance metadata client -func NewMetadataClient() MetadataClient { - return metadataClient{} -} diff --git a/builder/azure/chroot/metadata_test.go b/builder/azure/chroot/metadata_test.go deleted file mode 100644 index a707ea3ae..000000000 --- a/builder/azure/chroot/metadata_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package chroot - -import ( - "testing" - - "github.com/Azure/go-autorest/autorest/azure" - - "github.com/hashicorp/packer/builder/azure/common" - "github.com/stretchr/testify/assert" -) - -func Test_MetadataReturnsVMResourceID(t *testing.T) { - if !common.IsAzure() { - t.Skipf("Not running on Azure, skipping live IMDS test") - } - mdc := NewMetadataClient() - id, err := mdc.VMResourceID() - assert.Nil(t, err) - assert.NotEqual(t, id, "", "Expected VMResourceID to return non-empty string because we are running on Azure") - - vm, err := azure.ParseResourceID(id) - assert.Nil(t, err, "%q is not parsable as an Azure resource id", id) - t.Logf("VM: %+v", vm) -} From 3d329cf87d7b87842ebcca00a72513823ce36921 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 17 May 2019 00:24:53 +0000 Subject: [PATCH 12/55] Add chroot builder framework --- builder/azure/chroot/builder.go | 75 +++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 builder/azure/chroot/builder.go diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go new file mode 100644 index 000000000..073c13aac --- /dev/null +++ b/builder/azure/chroot/builder.go @@ -0,0 +1,75 @@ +package chroot + +import ( + "context" + "errors" + "runtime" + + azcommon "github.com/hashicorp/packer/builder/azure/common" + "github.com/hashicorp/packer/common" + "github.com/hashicorp/packer/helper/config" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/template/interpolate" +) + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + + ctx interpolate.Context +} + +type Builder struct { + config Config + runner multistep.Runner +} + +func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { + b.config.ctx.Funcs = azcommon.TemplateFuncs + err := config.Decode(&b.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateContext: &b.config.ctx, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + // fields to exclude from interpolation + }, + }, + }, raws...) + + // checks, accumulate any errors or warnings + var errs *packer.MultiError + var warns []string + + if err != nil { + return nil, err + } + return warns, errs +} + +func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (packer.Artifact, error) { + if runtime.GOOS != "linux" { + return nil, errors.New("The azure-chroot builder only works on Linux environments.") + } + + // Setup the state bag and initial state for the steps + state := new(multistep.BasicStateBag) + state.Put("config", &b.config) + state.Put("hook", hook) + state.Put("ui", ui) + + // Build the steps + var steps []multistep.Step + + // Run! + b.runner = common.NewRunner(steps, b.config.PackerConfig, ui) + b.runner.Run(ctx, state) + + // If there was an error, return that + if rawErr, ok := state.GetOk("error"); ok { + return nil, rawErr.(error) + } + + return nil, nil +} + +var _ packer.Builder = &Builder{} From 45d3f28c67c5aeff32316e1652beeaa3ab548238 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Mon, 27 May 2019 06:20:11 +0000 Subject: [PATCH 13/55] Add StepCreateNewDisk --- builder/azure/chroot/builder.go | 55 ++++++++++++ builder/azure/chroot/step_create_new_disk.go | 88 ++++++++++++++++++++ 2 files changed, 143 insertions(+) create mode 100644 builder/azure/chroot/step_create_new_disk.go diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index 073c13aac..add0a04c6 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -3,9 +3,13 @@ package chroot import ( "context" "errors" + "fmt" + "log" "runtime" + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" azcommon "github.com/hashicorp/packer/builder/azure/common" + "github.com/hashicorp/packer/builder/azure/common/client" "github.com/hashicorp/packer/common" "github.com/hashicorp/packer/helper/config" "github.com/hashicorp/packer/helper/multistep" @@ -16,6 +20,11 @@ import ( type Config struct { common.PackerConfig `mapstructure:",squash"` + FromScratch bool `mapstructure:"from_scratch"` + + OSDiskSizeGB int32 `mapstructure:"osdisk_size_gb"` + OSDiskStorageAccountType string `mapstructure:"osdisk_storageaccounttype"` + ctx interpolate.Context } @@ -36,10 +45,22 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { }, }, raws...) + // defaults + if b.config.OSDiskStorageAccountType == "" { + b.config.OSDiskStorageAccountType = string(compute.PremiumLRS) + } + // checks, accumulate any errors or warnings var errs *packer.MultiError var warns []string + if b.config.FromScratch { + if b.config.OSDiskSizeGB == 0 { + errs = packer.MultiErrorAppend( + errs, errors.New("osdisk_size_gb is required with from_scratch.")) + } + } + if err != nil { return nil, err } @@ -51,15 +72,49 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack return nil, errors.New("The azure-chroot builder only works on Linux environments.") } + var azcli client.AzureClientSet + // Setup the state bag and initial state for the steps state := new(multistep.BasicStateBag) state.Put("config", &b.config) state.Put("hook", hook) state.Put("ui", ui) + info, err := azcli.MetadataClient().GetComputeInfo() + if err != nil { + log.Printf("MetadataClient().GetComputeInfo(): error: %+v", err) + err := fmt.Errorf( + "Error retrieving information ARM resource ID and location" + + "of the VM that Packer is running on.\n" + + "Please verify that Packer is running on a proper Azure VM.") + ui.Error(err.Error()) + return nil, err + } + + osDiskName := "PackerBuiltOsDisk" + + state.Put("instance", info) + if err != nil { + return nil, err + } + // Build the steps var steps []multistep.Step + if !b.config.FromScratch { + panic("Only from_scratch is currently implemented") + // create disk from PIR / managed image (warn on non-linux images) + } else { + steps = append(steps, + &StepCreateNewDisk{ + SubscriptionID: info.SubscriptionID, + ResourceGroup: info.ResourceGroupName, + DiskName: osDiskName, + DiskSizeGB: b.config.OSDiskSizeGB, + DiskStorageAccountType: b.config.OSDiskStorageAccountType, + }) + } + // Run! b.runner = common.NewRunner(steps, b.config.PackerConfig, ui) b.runner.Run(ctx, state) diff --git a/builder/azure/chroot/step_create_new_disk.go b/builder/azure/chroot/step_create_new_disk.go new file mode 100644 index 000000000..098adb25f --- /dev/null +++ b/builder/azure/chroot/step_create_new_disk.go @@ -0,0 +1,88 @@ +package chroot + +import ( + "context" + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" + "github.com/Azure/go-autorest/autorest/to" + "github.com/hashicorp/packer/builder/azure/common/client" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +var _ multistep.Step = &StepCreateNewDisk{} + +type StepCreateNewDisk struct { + SubscriptionID, ResourceGroup, DiskName string + DiskSizeGB int32 // optional, ignored if 0 + DiskStorageAccountType string // from compute.DiskStorageAccountTypes +} + +func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + azcli := state.Get("azureclient").(client.AzureClientSet) + ui := state.Get("ui").(packer.Ui) + + diskResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", + s.SubscriptionID, + s.ResourceGroup, + s.DiskName) + state.Put("os_disk_resource_id", diskResourceID) + ui.Say(fmt.Sprintf("Creating disk '%s'", diskResourceID)) + + disk := compute.Disk{ + Sku: &compute.DiskSku{ + Name: "", + }, + //Zones: nil, + DiskProperties: &compute.DiskProperties{ + OsType: "", + HyperVGeneration: "", + CreationData: &compute.CreationData{ + CreateOption: compute.Empty, + }, + DiskSizeGB: to.Int32Ptr(s.DiskSizeGB), + }, + //Tags: map[string]*string{ + } + + if s.DiskSizeGB > 0 { + disk.DiskProperties.DiskSizeGB = to.Int32Ptr(s.DiskSizeGB) + } + + f, err := azcli.DisksClient().CreateOrUpdate(ctx, s.ResourceGroup, s.DiskName, disk) + if err == nil { + err = f.WaitForCompletionRef(ctx, azcli.PollClient()) + } + if err != nil { + log.Printf("StepCreateNewDisk.Run: error: %+v", err) + err := fmt.Errorf( + "error creating new disk '%s': %v", diskResourceID, err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (s StepCreateNewDisk) Cleanup(state multistep.StateBag) { + azcli := state.Get("azureclient").(client.AzureClientSet) + ui := state.Get("ui").(packer.Ui) + + diskResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", + s.SubscriptionID, + s.ResourceGroup, + s.DiskName) + ui.Say(fmt.Sprintf("Deleting disk '%s'", diskResourceID)) + + f, err := azcli.DisksClient().Delete(context.TODO(), s.ResourceGroup, s.DiskName) + if err == nil { + err = f.WaitForCompletionRef(context.TODO(), azcli.PollClient()) + } + if err != nil { + log.Printf("StepCreateNewDisk.Cleanup: error: %+v", err) + ui.Error(fmt.Sprintf("Error deleting new disk '%s': %v.", diskResourceID, err)) + } +} From 4d750ddefa7c79365c53dfa4943866df352c6614 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 29 May 2019 00:40:44 +0000 Subject: [PATCH 14/55] Reuse amazon/chroot/step_pre_mount_commands.go --- builder/amazon/chroot/builder.go | 8 +++++++ .../amazon/chroot/step_pre_mount_commands.go | 4 ++-- builder/azure/chroot/builder.go | 22 +++++++++++++++++++ 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index f51ae9255..21eeefc13 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -168,6 +168,14 @@ type Config struct { ctx interpolate.Context } +func (c *Config) GetContext() interpolate.Context { + return c.ctx +} + +type interpolateContextProvider interface { + GetContext() interpolate.Context +} + type wrappedCommandTemplate struct { Command string } diff --git a/builder/amazon/chroot/step_pre_mount_commands.go b/builder/amazon/chroot/step_pre_mount_commands.go index 9a60256fe..635ee84d0 100644 --- a/builder/amazon/chroot/step_pre_mount_commands.go +++ b/builder/amazon/chroot/step_pre_mount_commands.go @@ -17,7 +17,7 @@ type StepPreMountCommands struct { } func (s *StepPreMountCommands) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*Config) + config := state.Get("config").(interpolateContextProvider) device := state.Get("device").(string) ui := state.Get("ui").(packer.Ui) wrappedCommand := state.Get("wrappedCommand").(CommandWrapper) @@ -26,7 +26,7 @@ func (s *StepPreMountCommands) Run(ctx context.Context, state multistep.StateBag return multistep.ActionContinue } - ictx := config.ctx + ictx := config.GetContext() ictx.Data = &preMountCommandsData{Device: device} ui.Say("Running device setup commands...") diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index add0a04c6..a91fb3035 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -8,6 +8,7 @@ import ( "runtime" "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + amznchroot "github.com/hashicorp/packer/builder/amazon/chroot" azcommon "github.com/hashicorp/packer/builder/azure/common" "github.com/hashicorp/packer/builder/azure/common/client" "github.com/hashicorp/packer/common" @@ -22,6 +23,9 @@ type Config struct { FromScratch bool `mapstructure:"from_scratch"` + CommandWrapper string `mapstructure:"command_wrapper"` + PreMountCommands []string `mapstructure:"pre_mount_commands"` + OSDiskSizeGB int32 `mapstructure:"osdisk_size_gb"` OSDiskStorageAccountType string `mapstructure:"osdisk_storageaccounttype"` @@ -59,6 +63,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { errs = packer.MultiErrorAppend( errs, errors.New("osdisk_size_gb is required with from_scratch.")) } + if len(b.config.PreMountCommands) == 0 { + errs = packer.MultiErrorAppend( + errs, errors.New("pre_mount_commands is required with from_scratch.")) + } } if err != nil { @@ -74,11 +82,18 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack var azcli client.AzureClientSet + wrappedCommand := func(command string) (string, error) { + ictx := b.config.ctx + ictx.Data = &struct{ Command string }{Command: command} + return interpolate.Render(b.config.CommandWrapper, &ictx) + } + // Setup the state bag and initial state for the steps state := new(multistep.BasicStateBag) state.Put("config", &b.config) state.Put("hook", hook) state.Put("ui", ui) + state.Put("wrappedCommand", amznchroot.CommandWrapper(wrappedCommand)) info, err := azcli.MetadataClient().GetComputeInfo() if err != nil { @@ -115,6 +130,13 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack }) } + steps = append(steps, + //&StepAttachDisk{}, + &amznchroot.StepPreMountCommands{ + Commands: b.config.PreMountCommands, + }, + ) + // Run! b.runner = common.NewRunner(steps, b.config.PackerConfig, ui) b.runner.Run(ctx, state) From d1f8b8545dad133ea772ef44f370a09b866e4a15 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 29 May 2019 18:00:04 +0000 Subject: [PATCH 15/55] Add StepAttachDisk --- builder/azure/chroot/builder.go | 6 ++- builder/azure/chroot/diskattacher_test.go | 2 +- builder/azure/chroot/step_attach_disk.go | 65 +++++++++++++++++++++++ 3 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 builder/azure/chroot/step_attach_disk.go diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index a91fb3035..4454ffd55 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -131,7 +131,11 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack } steps = append(steps, - //&StepAttachDisk{}, + &StepAttachDisk{ // sets 'device' in stateBag + SubscriptionID: info.SubscriptionID, + ResourceGroup: info.ResourceGroupName, + DiskName: osDiskName, + }, &amznchroot.StepPreMountCommands{ Commands: b.config.PreMountCommands, }, diff --git a/builder/azure/chroot/diskattacher_test.go b/builder/azure/chroot/diskattacher_test.go index fbdc359ed..088dbd147 100644 --- a/builder/azure/chroot/diskattacher_test.go +++ b/builder/azure/chroot/diskattacher_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" - "github.com/hashicorp/packer/builder/azure/chroot/client" + "github.com/hashicorp/packer/builder/azure/common/client" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/builder/azure/chroot/step_attach_disk.go b/builder/azure/chroot/step_attach_disk.go new file mode 100644 index 000000000..9f05592b2 --- /dev/null +++ b/builder/azure/chroot/step_attach_disk.go @@ -0,0 +1,65 @@ +package chroot + +import ( + "context" + "fmt" + "github.com/hashicorp/packer/builder/azure/common/client" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "log" + "time" +) + +var _ multistep.Step = &StepAttachDisk{} + +type StepAttachDisk struct { + SubscriptionID, ResourceGroup, DiskName string +} + +func (s StepAttachDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + azcli := state.Get("azureclient").(client.AzureClientSet) + ui := state.Get("ui").(packer.Ui) + + diskResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", + s.SubscriptionID, + s.ResourceGroup, + s.DiskName) + ui.Say(fmt.Sprintf("Attaching disk '%s'", diskResourceID)) + + da := NewDiskAttacher(azcli) + lun, err := da.AttachDisk(ctx, diskResourceID) + if err != nil { + log.Printf("StepAttachDisk.Run: error: %+v", err) + err := fmt.Errorf( + "error attaching disk '%s': %v", diskResourceID, err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + ui.Say("Disk attached, waiting for device to show up") + ctx, cancel := context.WithTimeout(ctx, time.Minute*3) // in case is not configured correctly + defer cancel() + device, err := da.WaitForDevice(ctx, lun) + + ui.Say(fmt.Sprintf("Disk available at %q", device)) + state.Put("device", device) + return multistep.ActionContinue +} + +func (s StepAttachDisk) Cleanup(state multistep.StateBag) { + azcli := state.Get("azureclient").(client.AzureClientSet) + ui := state.Get("ui").(packer.Ui) + + diskResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", + s.SubscriptionID, + s.ResourceGroup, + s.DiskName) + ui.Say(fmt.Sprintf("Detaching disk '%s'", diskResourceID)) + + da := NewDiskAttacher(azcli) + err := da.DetachDisk(context.Background(), diskResourceID) + if err != nil { + ui.Error(fmt.Sprintf("error detaching %q: %v", diskResourceID, err)) + } +} From addbdedea966433c674e6664e8a01f641fc0b231 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 31 May 2019 18:39:43 +0000 Subject: [PATCH 16/55] Add StepPostMountCommands --- .../amazon/chroot/step_post_mount_commands.go | 4 +- builder/azure/chroot/builder.go | 22 ++- builder/azure/chroot/step_mount_device.go | 132 ++++++++++++++++++ 3 files changed, 151 insertions(+), 7 deletions(-) create mode 100644 builder/azure/chroot/step_mount_device.go diff --git a/builder/amazon/chroot/step_post_mount_commands.go b/builder/amazon/chroot/step_post_mount_commands.go index 704af428f..c152d409f 100644 --- a/builder/amazon/chroot/step_post_mount_commands.go +++ b/builder/amazon/chroot/step_post_mount_commands.go @@ -19,7 +19,7 @@ type StepPostMountCommands struct { } func (s *StepPostMountCommands) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*Config) + config := state.Get("config").(interpolateContextProvider) device := state.Get("device").(string) mountPath := state.Get("mount_path").(string) ui := state.Get("ui").(packer.Ui) @@ -29,7 +29,7 @@ func (s *StepPostMountCommands) Run(ctx context.Context, state multistep.StateBa return multistep.ActionContinue } - ictx := config.ctx + ictx := config.GetContext() ictx.Data = &postMountCommandsData{ Device: device, MountPath: mountPath, diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index 4454ffd55..d1ca610c8 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -8,7 +8,7 @@ import ( "runtime" "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" - amznchroot "github.com/hashicorp/packer/builder/amazon/chroot" + "github.com/hashicorp/packer/builder/amazon/chroot" azcommon "github.com/hashicorp/packer/builder/azure/common" "github.com/hashicorp/packer/builder/azure/common/client" "github.com/hashicorp/packer/common" @@ -23,8 +23,12 @@ type Config struct { FromScratch bool `mapstructure:"from_scratch"` - CommandWrapper string `mapstructure:"command_wrapper"` - PreMountCommands []string `mapstructure:"pre_mount_commands"` + CommandWrapper string `mapstructure:"command_wrapper"` + MountOptions []string `mapstructure:"mount_options"` + MountPartition string `mapstructure:"mount_partition"` + MountPath string `mapstructure:"mount_path"` + PreMountCommands []string `mapstructure:"pre_mount_commands"` + PostMountCommands []string `mapstructure:"post_mount_commands"` OSDiskSizeGB int32 `mapstructure:"osdisk_size_gb"` OSDiskStorageAccountType string `mapstructure:"osdisk_storageaccounttype"` @@ -93,7 +97,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack state.Put("config", &b.config) state.Put("hook", hook) state.Put("ui", ui) - state.Put("wrappedCommand", amznchroot.CommandWrapper(wrappedCommand)) + state.Put("wrappedCommand", chroot.CommandWrapper(wrappedCommand)) info, err := azcli.MetadataClient().GetComputeInfo() if err != nil { @@ -136,9 +140,17 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack ResourceGroup: info.ResourceGroupName, DiskName: osDiskName, }, - &amznchroot.StepPreMountCommands{ + &chroot.StepPreMountCommands{ Commands: b.config.PreMountCommands, }, + &StepMountDevice{ + MountOptions: b.config.MountOptions, + MountPartition: b.config.MountPartition, + MountPath: b.config.MountPath, + }, + &chroot.StepPostMountCommands{ + Commands: b.config.PostMountCommands, + }, ) // Run! diff --git a/builder/azure/chroot/step_mount_device.go b/builder/azure/chroot/step_mount_device.go new file mode 100644 index 000000000..7c197fbea --- /dev/null +++ b/builder/azure/chroot/step_mount_device.go @@ -0,0 +1,132 @@ +package chroot + +// mostly borrowed from ./builder/amazon/chroot/step_mount_device.go + +import ( + "bytes" + "context" + "fmt" + "github.com/hashicorp/packer/builder/amazon/chroot" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/template/interpolate" + "log" + "os" + "path/filepath" + "strings" +) + +var _ multistep.Step = &StepMountDevice{} + +type StepMountDevice struct { + MountOptions []string + MountPartition string + MountPath string + + mountPath string +} + +func (s *StepMountDevice) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packer.Ui) + device := state.Get("device").(string) + config := state.Get("config").(*Config) + wrappedCommand := state.Get("wrappedCommand").(chroot.CommandWrapper) + + ictx := config.ctx + + ictx.Data = &struct{ Device string }{Device: filepath.Base(device)} + mountPath, err := interpolate.Render(s.MountPath, &ictx) + + if err != nil { + err := fmt.Errorf("error preparing mount directory: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + mountPath, err = filepath.Abs(mountPath) + if err != nil { + err := fmt.Errorf("error preparing mount directory: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + log.Printf("Mount path: %s", mountPath) + + if err := os.MkdirAll(mountPath, 0755); err != nil { + err := fmt.Errorf("error creating mount directory: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + deviceMount := fmt.Sprintf("%s%s", device, s.MountPartition) + + state.Put("deviceMount", deviceMount) + + ui.Say("Mounting the root device...") + stderr := new(bytes.Buffer) + + // build mount options from mount_options config, useful for nouuid options + // or other specific device type settings for mount + opts := "" + if len(s.MountOptions) > 0 { + opts = "-o " + strings.Join(s.MountOptions, " -o ") + } + mountCommand, err := wrappedCommand( + fmt.Sprintf("mount %s %s %s", opts, deviceMount, mountPath)) + if err != nil { + err := fmt.Errorf("error creating mount command: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + log.Printf("[DEBUG] (step mount) mount command is %s", mountCommand) + cmd := chroot.ShellCommand(mountCommand) + cmd.Stderr = stderr + if err := cmd.Run(); err != nil { + err := fmt.Errorf( + "error mounting root volume: %s\nStderr: %s", err, stderr.String()) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + // Set the mount path so we remember to unmount it later + s.mountPath = mountPath + state.Put("mount_path", s.mountPath) + state.Put("mount_device_cleanup", s) + + return multistep.ActionContinue +} + +func (s *StepMountDevice) Cleanup(state multistep.StateBag) { + ui := state.Get("ui").(packer.Ui) + if err := s.CleanupFunc(state); err != nil { + ui.Error(err.Error()) + } +} + +func (s *StepMountDevice) CleanupFunc(state multistep.StateBag) error { + if s.mountPath == "" { + return nil + } + + ui := state.Get("ui").(packer.Ui) + wrappedCommand := state.Get("wrappedCommand").(chroot.CommandWrapper) + + ui.Say("Unmounting the root device...") + unmountCommand, err := wrappedCommand(fmt.Sprintf("umount %s", s.mountPath)) + if err != nil { + return fmt.Errorf("error creating unmount command: %s", err) + } + + cmd := chroot.ShellCommand(unmountCommand) + if err := cmd.Run(); err != nil { + return fmt.Errorf("error unmounting root device: %s", err) + } + + s.mountPath = "" + return nil +} From 9a3e6661b10d0af6176ac97723360cf35f3d448b Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 31 May 2019 18:49:35 +0000 Subject: [PATCH 17/55] Add StepMountExtra --- builder/amazon/chroot/builder.go | 4 +++- builder/amazon/chroot/step_mount_extra.go | 8 +++---- builder/azure/chroot/builder.go | 29 +++++++++++++++-------- 3 files changed, 26 insertions(+), 15 deletions(-) diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index 21eeefc13..0b336c64e 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -400,7 +400,9 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack &StepPostMountCommands{ Commands: b.config.PostMountCommands, }, - &StepMountExtra{}, + &StepMountExtra{ + ChrootMounts: b.config.ChrootMounts, + }, &StepCopyFiles{}, &StepChrootProvision{}, &StepEarlyCleanup{}, diff --git a/builder/amazon/chroot/step_mount_extra.go b/builder/amazon/chroot/step_mount_extra.go index 089bf7e75..e1b03abbb 100644 --- a/builder/amazon/chroot/step_mount_extra.go +++ b/builder/amazon/chroot/step_mount_extra.go @@ -17,19 +17,19 @@ import ( // Produces: // mount_extra_cleanup CleanupFunc - To perform early cleanup type StepMountExtra struct { - mounts []string + ChrootMounts [][]string + mounts []string } func (s *StepMountExtra) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*Config) mountPath := state.Get("mount_path").(string) ui := state.Get("ui").(packer.Ui) wrappedCommand := state.Get("wrappedCommand").(CommandWrapper) - s.mounts = make([]string, 0, len(config.ChrootMounts)) + s.mounts = make([]string, 0, len(s.ChrootMounts)) ui.Say("Mounting additional paths within the chroot...") - for _, mountInfo := range config.ChrootMounts { + for _, mountInfo := range s.ChrootMounts { innerPath := mountPath + mountInfo[2] if err := os.MkdirAll(innerPath, 0755); err != nil { diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index d1ca610c8..24a128ce2 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -23,12 +23,13 @@ type Config struct { FromScratch bool `mapstructure:"from_scratch"` - CommandWrapper string `mapstructure:"command_wrapper"` - MountOptions []string `mapstructure:"mount_options"` - MountPartition string `mapstructure:"mount_partition"` - MountPath string `mapstructure:"mount_path"` - PreMountCommands []string `mapstructure:"pre_mount_commands"` - PostMountCommands []string `mapstructure:"post_mount_commands"` + CommandWrapper string `mapstructure:"command_wrapper"` + PreMountCommands []string `mapstructure:"pre_mount_commands"` + MountOptions []string `mapstructure:"mount_options"` + MountPartition string `mapstructure:"mount_partition"` + MountPath string `mapstructure:"mount_path"` + PostMountCommands []string `mapstructure:"post_mount_commands"` + ChrootMounts [][]string `mapstructure:"chroot_mounts"` OSDiskSizeGB int32 `mapstructure:"osdisk_size_gb"` OSDiskStorageAccountType string `mapstructure:"osdisk_storageaccounttype"` @@ -48,7 +49,12 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { InterpolateContext: &b.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ - // fields to exclude from interpolation + // these fields are interpolated in the steps, + // when more information is available + "command_wrapper", + "post_mount_commands", + "pre_mount_commands", + "mount_path", }, }, }, raws...) @@ -65,11 +71,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { if b.config.FromScratch { if b.config.OSDiskSizeGB == 0 { errs = packer.MultiErrorAppend( - errs, errors.New("osdisk_size_gb is required with from_scratch.")) + errs, errors.New("osdisk_size_gb is required with from_scratch")) } if len(b.config.PreMountCommands) == 0 { errs = packer.MultiErrorAppend( - errs, errors.New("pre_mount_commands is required with from_scratch.")) + errs, errors.New("pre_mount_commands is required with from_scratch")) } } @@ -81,7 +87,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (packer.Artifact, error) { if runtime.GOOS != "linux" { - return nil, errors.New("The azure-chroot builder only works on Linux environments.") + return nil, errors.New("the azure-chroot builder only works on Linux environments") } var azcli client.AzureClientSet @@ -151,6 +157,9 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack &chroot.StepPostMountCommands{ Commands: b.config.PostMountCommands, }, + &chroot.StepMountExtra{ + ChrootMounts: b.config.ChrootMounts, + }, ) // Run! From 977ca5be1c24bd0f1ff5ec234bca0e1287b9ede5 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 31 May 2019 18:55:58 +0000 Subject: [PATCH 18/55] Add StepCopyFiles and StepChrootProvision --- builder/amazon/chroot/builder.go | 4 +++- builder/amazon/chroot/step_copy_files.go | 8 ++++---- builder/azure/chroot/builder.go | 5 +++++ 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index 0b336c64e..59b2fbed1 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -403,7 +403,9 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack &StepMountExtra{ ChrootMounts: b.config.ChrootMounts, }, - &StepCopyFiles{}, + &StepCopyFiles{ + Files: b.config.CopyFiles, + }, &StepChrootProvision{}, &StepEarlyCleanup{}, &StepSnapshot{}, diff --git a/builder/amazon/chroot/step_copy_files.go b/builder/amazon/chroot/step_copy_files.go index 78625a8d3..af5766386 100644 --- a/builder/amazon/chroot/step_copy_files.go +++ b/builder/amazon/chroot/step_copy_files.go @@ -17,20 +17,20 @@ import ( // copy_files_cleanup CleanupFunc - A function to clean up the copied files // early. type StepCopyFiles struct { + Files []string files []string } func (s *StepCopyFiles) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { - config := state.Get("config").(*Config) mountPath := state.Get("mount_path").(string) ui := state.Get("ui").(packer.Ui) wrappedCommand := state.Get("wrappedCommand").(CommandWrapper) stderr := new(bytes.Buffer) - s.files = make([]string, 0, len(config.CopyFiles)) - if len(config.CopyFiles) > 0 { + s.files = make([]string, 0, len(s.Files)) + if len(s.Files) > 0 { ui.Say("Copying files from host to chroot...") - for _, path := range config.CopyFiles { + for _, path := range s.Files { ui.Message(path) chrootPath := filepath.Join(mountPath, path) log.Printf("Copying '%s' to '%s'", path, chrootPath) diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index 24a128ce2..37f486c0e 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -30,6 +30,7 @@ type Config struct { MountPath string `mapstructure:"mount_path"` PostMountCommands []string `mapstructure:"post_mount_commands"` ChrootMounts [][]string `mapstructure:"chroot_mounts"` + CopyFiles []string `mapstructure:"copy_files"` OSDiskSizeGB int32 `mapstructure:"osdisk_size_gb"` OSDiskStorageAccountType string `mapstructure:"osdisk_storageaccounttype"` @@ -160,6 +161,10 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack &chroot.StepMountExtra{ ChrootMounts: b.config.ChrootMounts, }, + &chroot.StepCopyFiles{ + Files: b.config.CopyFiles, + }, + &chroot.StepChrootProvision{}, ) // Run! From 77b782c5b51b7f9a08b630d47af0392df1187f5d Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 31 May 2019 19:01:47 +0000 Subject: [PATCH 19/55] Add StepEarlyCleanup --- builder/azure/chroot/builder.go | 1 + builder/azure/chroot/step_attach_disk.go | 11 ++++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index 37f486c0e..b0364ceb2 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -165,6 +165,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack Files: b.config.CopyFiles, }, &chroot.StepChrootProvision{}, + &chroot.StepEarlyCleanup{}, ) // Run! diff --git a/builder/azure/chroot/step_attach_disk.go b/builder/azure/chroot/step_attach_disk.go index 9f05592b2..c090f2e42 100644 --- a/builder/azure/chroot/step_attach_disk.go +++ b/builder/azure/chroot/step_attach_disk.go @@ -44,10 +44,18 @@ func (s StepAttachDisk) Run(ctx context.Context, state multistep.StateBag) multi ui.Say(fmt.Sprintf("Disk available at %q", device)) state.Put("device", device) + state.Put("attach_cleanup", s) return multistep.ActionContinue } func (s StepAttachDisk) Cleanup(state multistep.StateBag) { + ui := state.Get("ui").(packer.Ui) + if err := s.CleanupFunc(state); err != nil { + ui.Error(err.Error()) + } +} + +func (s *StepAttachDisk) CleanupFunc(state multistep.StateBag) error { azcli := state.Get("azureclient").(client.AzureClientSet) ui := state.Get("ui").(packer.Ui) @@ -60,6 +68,7 @@ func (s StepAttachDisk) Cleanup(state multistep.StateBag) { da := NewDiskAttacher(azcli) err := da.DetachDisk(context.Background(), diskResourceID) if err != nil { - ui.Error(fmt.Sprintf("error detaching %q: %v", diskResourceID, err)) + return fmt.Errorf("error detaching %q: %v", diskResourceID, err) } + return nil } From e11a5bdb40847a57a14bd7d4a9e6fe6a016fcf20 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 31 May 2019 20:02:25 +0000 Subject: [PATCH 20/55] Put os disk id in state --- builder/azure/chroot/builder.go | 64 ++++++++++++-- builder/azure/chroot/step_attach_disk.go | 16 ++-- builder/azure/chroot/step_create_image.go | 101 ++++++++++++++++++++++ 3 files changed, 161 insertions(+), 20 deletions(-) create mode 100644 builder/azure/chroot/step_create_image.go diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index b0364ceb2..df57330e6 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -7,7 +7,6 @@ import ( "log" "runtime" - "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" "github.com/hashicorp/packer/builder/amazon/chroot" azcommon "github.com/hashicorp/packer/builder/azure/common" "github.com/hashicorp/packer/builder/azure/common/client" @@ -16,6 +15,8 @@ import ( "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/template/interpolate" + + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" ) type Config struct { @@ -32,8 +33,12 @@ type Config struct { ChrootMounts [][]string `mapstructure:"chroot_mounts"` CopyFiles []string `mapstructure:"copy_files"` - OSDiskSizeGB int32 `mapstructure:"osdisk_size_gb"` - OSDiskStorageAccountType string `mapstructure:"osdisk_storageaccounttype"` + OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb"` + OSDiskStorageAccountType string `mapstructure:"os_disk_storage_account_type"` + OSDiskCacheType string `mapstructure:"os_disk_cache_type"` + + ImageResourceID string `mapstructure:"image_resource_id"` + ImageOSState string `mapstructure:"image_os_state"` ctx interpolate.Context } @@ -60,11 +65,48 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { }, }, raws...) - // defaults + // Defaults + if b.config.ChrootMounts == nil { + b.config.ChrootMounts = make([][]string, 0) + } + + if len(b.config.ChrootMounts) == 0 { + b.config.ChrootMounts = [][]string{ + {"proc", "proc", "/proc"}, + {"sysfs", "sysfs", "/sys"}, + {"bind", "/dev", "/dev"}, + {"devpts", "devpts", "/dev/pts"}, + {"binfmt_misc", "binfmt_misc", "/proc/sys/fs/binfmt_misc"}, + } + } + + // set default copy file if we're not giving our own + if b.config.CopyFiles == nil { + if !b.config.FromScratch { + b.config.CopyFiles = []string{"/etc/resolv.conf"} + } + } + + if b.config.CommandWrapper == "" { + b.config.CommandWrapper = "{{.Command}}" + } + + if b.config.MountPath == "" { + b.config.MountPath = "/mnt/packer-amazon-chroot-volumes/{{.Device}}" + } + + if b.config.MountPartition == "" { + b.config.MountPartition = "1" + } + if b.config.OSDiskStorageAccountType == "" { b.config.OSDiskStorageAccountType = string(compute.PremiumLRS) } + if b.config.OSDiskCacheType == "" { + b.config.OSDiskCacheType = string(compute.CachingTypesReadOnly) + } + // checks, accumulate any errors or warnings var errs *packer.MultiError var warns []string @@ -91,6 +133,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack return nil, errors.New("the azure-chroot builder only works on Linux environments") } + // todo: instantiate Azure client var azcli client.AzureClientSet wrappedCommand := func(command string) (string, error) { @@ -104,6 +147,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack state.Put("config", &b.config) state.Put("hook", hook) state.Put("ui", ui) + state.Put("azureclient", azcli) state.Put("wrappedCommand", chroot.CommandWrapper(wrappedCommand)) info, err := azcli.MetadataClient().GetComputeInfo() @@ -142,11 +186,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack } steps = append(steps, - &StepAttachDisk{ // sets 'device' in stateBag - SubscriptionID: info.SubscriptionID, - ResourceGroup: info.ResourceGroupName, - DiskName: osDiskName, - }, + &StepAttachDisk{}, // uses os_disk_resource_id and sets 'device' in stateBag &chroot.StepPreMountCommands{ Commands: b.config.PreMountCommands, }, @@ -166,6 +206,12 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack }, &chroot.StepChrootProvision{}, &chroot.StepEarlyCleanup{}, + &StepCreateImage{ + ImageResourceID: b.config.ImageResourceID, + ImageOSState: b.config.ImageOSState, + OSDiskCacheType: b.config.OSDiskCacheType, + OSDiskStorageAccountType: b.config.OSDiskStorageAccountType, + }, ) // Run! diff --git a/builder/azure/chroot/step_attach_disk.go b/builder/azure/chroot/step_attach_disk.go index c090f2e42..87f4e5c9b 100644 --- a/builder/azure/chroot/step_attach_disk.go +++ b/builder/azure/chroot/step_attach_disk.go @@ -3,27 +3,24 @@ package chroot import ( "context" "fmt" + "log" + "time" + "github.com/hashicorp/packer/builder/azure/common/client" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" - "log" - "time" ) var _ multistep.Step = &StepAttachDisk{} type StepAttachDisk struct { - SubscriptionID, ResourceGroup, DiskName string } func (s StepAttachDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { azcli := state.Get("azureclient").(client.AzureClientSet) ui := state.Get("ui").(packer.Ui) + diskResourceID := state.Get("os_disk_resource_id").(string) - diskResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", - s.SubscriptionID, - s.ResourceGroup, - s.DiskName) ui.Say(fmt.Sprintf("Attaching disk '%s'", diskResourceID)) da := NewDiskAttacher(azcli) @@ -58,11 +55,8 @@ func (s StepAttachDisk) Cleanup(state multistep.StateBag) { func (s *StepAttachDisk) CleanupFunc(state multistep.StateBag) error { azcli := state.Get("azureclient").(client.AzureClientSet) ui := state.Get("ui").(packer.Ui) + diskResourceID := state.Get("os_disk_resource_id").(string) - diskResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", - s.SubscriptionID, - s.ResourceGroup, - s.DiskName) ui.Say(fmt.Sprintf("Detaching disk '%s'", diskResourceID)) da := NewDiskAttacher(azcli) diff --git a/builder/azure/chroot/step_create_image.go b/builder/azure/chroot/step_create_image.go new file mode 100644 index 000000000..ad05b85ac --- /dev/null +++ b/builder/azure/chroot/step_create_image.go @@ -0,0 +1,101 @@ +package chroot + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/packer/builder/azure/common/client" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "log" +) + +var _ multistep.Step = &StepCreateImage{} + +type StepCreateImage struct { + ImageResourceID string + ImageOSState string + OSDiskStorageAccountType string + OSDiskCacheType string + + imageResource azure.Resource +} + +func (s *StepCreateImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + azcli := state.Get("azureclient").(client.AzureClientSet) + ui := state.Get("ui").(packer.Ui) + diskResourceID := state.Get("os_disk_resource_id").(string) + + ui.Say(fmt.Sprintf("Creating image %s\n using %s for os disk.", + s.ImageResourceID, + diskResourceID)) + + var err error + s.imageResource, err = azure.ParseResourceID(s.ImageResourceID) + + if err != nil { + log.Printf("StepCreateImage.Run: error: %+v", err) + err := fmt.Errorf( + "error parsing image resource id '%s': %v", s.ImageResourceID, err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + image := compute.Image{ + ImageProperties: &compute.ImageProperties{ + StorageProfile: &compute.ImageStorageProfile{ + OsDisk: &compute.ImageOSDisk{ + OsType: "Linux", + OsState: compute.OperatingSystemStateTypes(s.ImageOSState), + ManagedDisk: &compute.SubResource{ + ID: &diskResourceID, + }, + Caching: compute.CachingTypes(s.OSDiskCacheType), + StorageAccountType: compute.StorageAccountTypes(s.OSDiskStorageAccountType), + }, + // DataDisks: nil, + // ZoneResilient: nil, + }, + }, + // Tags: nil, + } + f, err := azcli.ImagesClient().CreateOrUpdate( + ctx, + s.imageResource.ResourceGroup, + s.imageResource.ResourceName, + image) + if err == nil { + err = f.WaitForCompletionRef(ctx, azcli.PollClient()) + } + if err != nil { + log.Printf("StepCreateImage.Run: error: %+v", err) + err := fmt.Errorf( + "error creating image '%s': %v", s.ImageResourceID, err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (s *StepCreateImage) Cleanup(state multistep.StateBag) { + azcli := state.Get("azureclient").(client.AzureClientSet) + ui := state.Get("ui").(packer.Ui) + + ctx := context.Background() + f, err := azcli.ImagesClient().Delete( + ctx, + s.imageResource.ResourceGroup, + s.imageResource.ResourceName) + if err == nil { + err = f.WaitForCompletionRef(ctx, azcli.PollClient()) + } + if err != nil { + log.Printf("StepCreateImage.Cleanup: error: %+v", err) + ui.Error(fmt.Sprintf( + "error deleting image '%s': %v", s.ImageResourceID, err)) + } +} From b4d0865548e2388bd6c8cf4d7e3c232743b1bc07 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 31 May 2019 22:15:56 +0000 Subject: [PATCH 21/55] Update azure-chroot builder --- builder/azure/chroot/builder.go | 79 ++++++++++++++++++++++++++++----- 1 file changed, 69 insertions(+), 10 deletions(-) diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index df57330e6..d5446fa4d 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -22,6 +22,8 @@ import ( type Config struct { common.PackerConfig `mapstructure:",squash"` + ClientConfig client.Config `mapstructure:",squash"` + FromScratch bool `mapstructure:"from_scratch"` CommandWrapper string `mapstructure:"command_wrapper"` @@ -66,6 +68,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { }, raws...) // Defaults + err = b.config.ClientConfig.SetDefaultValues() + if err != nil { + return nil, err + } + if b.config.ChrootMounts == nil { b.config.ChrootMounts = make([][]string, 0) } @@ -92,7 +99,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } if b.config.MountPath == "" { - b.config.MountPath = "/mnt/packer-amazon-chroot-volumes/{{.Device}}" + b.config.MountPath = "/mnt/packer-azure-chroot-disks/{{.Device}}" } if b.config.MountPartition == "" { @@ -107,6 +114,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.OSDiskCacheType = string(compute.CachingTypesReadOnly) } + if b.config.ImageOSState == "" { + b.config.ImageOSState = string(compute.Generalized) + + } + // checks, accumulate any errors or warnings var errs *packer.MultiError var warns []string @@ -114,18 +126,62 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { if b.config.FromScratch { if b.config.OSDiskSizeGB == 0 { errs = packer.MultiErrorAppend( - errs, errors.New("osdisk_size_gb is required with from_scratch")) + errs, errors.New("os_disk_size_gb is required with from_scratch")) } if len(b.config.PreMountCommands) == 0 { errs = packer.MultiErrorAppend( errs, errors.New("pre_mount_commands is required with from_scratch")) } + } else { + errs = packer.MultiErrorAppend(errors.New("only 'from_scratch'=true is supported right now")) } - if err != nil { - return nil, err + if err := checkOSState(b.config.ImageOSState); err != nil { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("image_os_state: %v", err)) } - return warns, errs + if err := checkDiskCacheType(b.config.OSDiskCacheType); err != nil { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("os_disk_cache_type: %v", err)) + } + if err := checkStorageAccountType(b.config.OSDiskStorageAccountType); err != nil { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("os_disk_storage_account_type: %v", err)) + } + + if errs != nil { + return warns, errs + } + + packer.LogSecretFilter.Set(b.config.ClientConfig.ClientSecret, b.config.ClientConfig.ClientJWT) + return warns, nil +} + +func checkOSState(s string) interface{} { + for _, v := range compute.PossibleOperatingSystemStateTypesValues() { + if compute.OperatingSystemStateTypes(s) == v { + return nil + } + } + return fmt.Errorf("%q is not a valid value (%v)", + s, compute.PossibleOperatingSystemStateTypesValues()) +} + +func checkDiskCacheType(s string) interface{} { + for _, v := range compute.PossibleCachingTypesValues() { + if compute.CachingTypes(s) == v { + return nil + } + } + return fmt.Errorf("%q is not a valid value (%v)", + s, compute.PossibleCachingTypesValues()) +} + +func checkStorageAccountType(s string) interface{} { + for _, v := range compute.PossibleStorageAccountTypesValues() { + if compute.StorageAccountTypes(s) == v { + return nil + } + } + return fmt.Errorf("%q is not a valid value (%v)", + s, compute.PossibleStorageAccountTypesValues()) } func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (packer.Artifact, error) { @@ -133,8 +189,14 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack return nil, errors.New("the azure-chroot builder only works on Linux environments") } - // todo: instantiate Azure client - var azcli client.AzureClientSet + err := b.config.ClientConfig.FillParameters() + if err != nil { + return nil, fmt.Errorf("error setting Azure client defaults: %v", err) + } + azcli, err := client.New(b.config.ClientConfig, ui.Say) + if err != nil { + return nil, fmt.Errorf("error creating Azure client: %v", err) + } wrappedCommand := func(command string) (string, error) { ictx := b.config.ctx @@ -164,9 +226,6 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack osDiskName := "PackerBuiltOsDisk" state.Put("instance", info) - if err != nil { - return nil, err - } // Build the steps var steps []multistep.Step From 7297d74c98f9340f63fef31d9f8ce19660dafcc7 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Sun, 2 Jun 2019 19:26:03 +0000 Subject: [PATCH 22/55] Add azure-chroot builder plugin --- command/plugin.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/command/plugin.go b/command/plugin.go index 81dcc4ec1..277685a9b 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -20,6 +20,7 @@ import ( amazonebsvolumebuilder "github.com/hashicorp/packer/builder/amazon/ebsvolume" amazoninstancebuilder "github.com/hashicorp/packer/builder/amazon/instance" azurearmbuilder "github.com/hashicorp/packer/builder/azure/arm" + azurechrootbuilder "github.com/hashicorp/packer/builder/azure/chroot" cloudstackbuilder "github.com/hashicorp/packer/builder/cloudstack" digitaloceanbuilder "github.com/hashicorp/packer/builder/digitalocean" dockerbuilder "github.com/hashicorp/packer/builder/docker" @@ -109,6 +110,7 @@ var Builders = map[string]packer.Builder{ "amazon-ebsvolume": new(amazonebsvolumebuilder.Builder), "amazon-instance": new(amazoninstancebuilder.Builder), "azure-arm": new(azurearmbuilder.Builder), + "azure-chroot": new(azurechrootbuilder.Builder), "cloudstack": new(cloudstackbuilder.Builder), "digitalocean": new(digitaloceanbuilder.Builder), "docker": new(dockerbuilder.Builder), From b9b5bb2951864d05c1d03448fc92c3c82c345716 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Sun, 2 Jun 2019 19:29:45 +0000 Subject: [PATCH 23/55] Use DiskStorageAccountTypes --- builder/azure/chroot/builder.go | 6 +++--- builder/azure/chroot/step_create_new_disk.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index d5446fa4d..8023db7f5 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -175,13 +175,13 @@ func checkDiskCacheType(s string) interface{} { } func checkStorageAccountType(s string) interface{} { - for _, v := range compute.PossibleStorageAccountTypesValues() { - if compute.StorageAccountTypes(s) == v { + for _, v := range compute.PossibleDiskStorageAccountTypesValues() { + if compute.DiskStorageAccountTypes(s) == v { return nil } } return fmt.Errorf("%q is not a valid value (%v)", - s, compute.PossibleStorageAccountTypesValues()) + s, compute.PossibleDiskStorageAccountTypesValues()) } func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (packer.Artifact, error) { diff --git a/builder/azure/chroot/step_create_new_disk.go b/builder/azure/chroot/step_create_new_disk.go index 098adb25f..c2eb15ca3 100644 --- a/builder/azure/chroot/step_create_new_disk.go +++ b/builder/azure/chroot/step_create_new_disk.go @@ -33,7 +33,7 @@ func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) mu disk := compute.Disk{ Sku: &compute.DiskSku{ - Name: "", + Name: compute.DiskStorageAccountTypes(s.DiskStorageAccountType), }, //Zones: nil, DiskProperties: &compute.DiskProperties{ From b5401d552a3a698f8e9067c5b798ba539cdfbe47 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Mon, 3 Jun 2019 05:27:33 +0000 Subject: [PATCH 24/55] Make from_scratch work --- builder/azure/chroot/builder.go | 66 ++++++-- builder/azure/chroot/diskattacher.go | 159 ++++++++++++------- builder/azure/chroot/step_attach_disk.go | 35 ++-- builder/azure/chroot/step_create_image.go | 27 +--- builder/azure/chroot/step_create_new_disk.go | 14 +- 5 files changed, 191 insertions(+), 110 deletions(-) diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index 8023db7f5..b2ac901f9 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -4,8 +4,10 @@ import ( "context" "errors" "fmt" + "github.com/Azure/go-autorest/autorest/azure" "log" "runtime" + "strings" "github.com/hashicorp/packer/builder/amazon/chroot" azcommon "github.com/hashicorp/packer/builder/azure/common" @@ -35,16 +37,22 @@ type Config struct { ChrootMounts [][]string `mapstructure:"chroot_mounts"` CopyFiles []string `mapstructure:"copy_files"` + TemporaryOSDiskName string `mapstructure:"temporary_os_disk_name"` OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb"` OSDiskStorageAccountType string `mapstructure:"os_disk_storage_account_type"` OSDiskCacheType string `mapstructure:"os_disk_cache_type"` - ImageResourceID string `mapstructure:"image_resource_id"` - ImageOSState string `mapstructure:"image_os_state"` + ImageResourceID string `mapstructure:"image_resource_id"` + ImageOSState string `mapstructure:"image_os_state"` + ImageHyperVGeneration string `mapstructure:"image_hyperv_generation"` ctx interpolate.Context } +func (c *Config) GetContext() interpolate.Context { + return c.ctx +} + type Builder struct { config Config runner multistep.Runner @@ -106,6 +114,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.MountPartition = "1" } + if b.config.TemporaryOSDiskName == "" { + b.config.TemporaryOSDiskName = "PackerTemp-{{timestamp}}" + } + if b.config.OSDiskStorageAccountType == "" { b.config.OSDiskStorageAccountType = string(compute.PremiumLRS) } @@ -116,7 +128,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { if b.config.ImageOSState == "" { b.config.ImageOSState = string(compute.Generalized) + } + if b.config.ImageHyperVGeneration == "" { + b.config.ImageHyperVGeneration = string(compute.V1) } // checks, accumulate any errors or warnings @@ -136,16 +151,34 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { errs = packer.MultiErrorAppend(errors.New("only 'from_scratch'=true is supported right now")) } - if err := checkOSState(b.config.ImageOSState); err != nil { - errs = packer.MultiErrorAppend(errs, fmt.Errorf("image_os_state: %v", err)) - } if err := checkDiskCacheType(b.config.OSDiskCacheType); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("os_disk_cache_type: %v", err)) } + if err := checkStorageAccountType(b.config.OSDiskStorageAccountType); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("os_disk_storage_account_type: %v", err)) } + if b.config.ImageResourceID == "" { + errs = packer.MultiErrorAppend(errs, errors.New("image_resource_id is required")) + } else { + r, err := azure.ParseResourceID(b.config.ImageResourceID) + if err != nil || + !strings.EqualFold(r.Provider, "Microsoft.Compute") || + !strings.EqualFold(r.ResourceType, "images") { + errs = packer.MultiErrorAppend(fmt.Errorf( + "image_resource_id: %q is not a valid image resource id", b.config.ImageResourceID)) + } + } + + if err := checkOSState(b.config.ImageOSState); err != nil { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("image_os_state: %v", err)) + } + + if err := checkHyperVGeneration(b.config.ImageHyperVGeneration); err != nil { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("image_hyperv_generation: %v", err)) + } + if errs != nil { return warns, errs } @@ -160,7 +193,7 @@ func checkOSState(s string) interface{} { return nil } } - return fmt.Errorf("%q is not a valid value (%v)", + return fmt.Errorf("%q is not a valid value %v", s, compute.PossibleOperatingSystemStateTypesValues()) } @@ -170,7 +203,7 @@ func checkDiskCacheType(s string) interface{} { return nil } } - return fmt.Errorf("%q is not a valid value (%v)", + return fmt.Errorf("%q is not a valid value %v", s, compute.PossibleCachingTypesValues()) } @@ -180,10 +213,20 @@ func checkStorageAccountType(s string) interface{} { return nil } } - return fmt.Errorf("%q is not a valid value (%v)", + return fmt.Errorf("%q is not a valid value %v", s, compute.PossibleDiskStorageAccountTypesValues()) } +func checkHyperVGeneration(s string) interface{} { + for _, v := range compute.PossibleHyperVGenerationValues() { + if compute.HyperVGeneration(s) == v { + return nil + } + } + return fmt.Errorf("%q is not a valid value %v", + s, compute.PossibleHyperVGenerationValues()) +} + func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (packer.Artifact, error) { if runtime.GOOS != "linux" { return nil, errors.New("the azure-chroot builder only works on Linux environments") @@ -223,8 +266,6 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack return nil, err } - osDiskName := "PackerBuiltOsDisk" - state.Put("instance", info) // Build the steps @@ -238,9 +279,11 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack &StepCreateNewDisk{ SubscriptionID: info.SubscriptionID, ResourceGroup: info.ResourceGroupName, - DiskName: osDiskName, + DiskName: b.config.TemporaryOSDiskName, DiskSizeGB: b.config.OSDiskSizeGB, DiskStorageAccountType: b.config.OSDiskStorageAccountType, + HyperVGeneration: b.config.ImageHyperVGeneration, + Location: info.Location, }) } @@ -270,6 +313,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack ImageOSState: b.config.ImageOSState, OSDiskCacheType: b.config.OSDiskCacheType, OSDiskStorageAccountType: b.config.OSDiskStorageAccountType, + Location: info.Location, }, ) diff --git a/builder/azure/chroot/diskattacher.go b/builder/azure/chroot/diskattacher.go index 2f21ebf45..97ba4174c 100644 --- a/builder/azure/chroot/diskattacher.go +++ b/builder/azure/chroot/diskattacher.go @@ -4,9 +4,11 @@ import ( "context" "errors" "fmt" + "log" "os" "path/filepath" "strings" + "syscall" "time" "github.com/hashicorp/packer/builder/azure/common/client" @@ -16,38 +18,43 @@ import ( "github.com/Azure/go-autorest/autorest/to" ) -type VirtualMachinesClientAPI interface { - CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) ( - result compute.VirtualMachinesCreateOrUpdateFuture, err error) - Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) ( - result compute.VirtualMachine, err error) -} - type DiskAttacher interface { AttachDisk(ctx context.Context, disk string) (lun int32, err error) DetachDisk(ctx context.Context, disk string) (err error) WaitForDevice(ctx context.Context, i int32) (device string, err error) + DiskPathForLun(lun int32) string } func NewDiskAttacher(azureClient client.AzureClientSet) DiskAttacher { - return diskAttacher{azureClient} + return &diskAttacher{ + azcli: azureClient, + } } type diskAttacher struct { azcli client.AzureClientSet + + vm *client.ComputeInfo // store info about this VM so that we don't have to ask metadata service on every call } -func (da diskAttacher) WaitForDevice(ctx context.Context, i int32) (device string, err error) { - path := fmt.Sprintf("/dev/disk/azure/scsi1/lun%d", i) +func (diskAttacher) DiskPathForLun(lun int32) string { + return fmt.Sprintf("/dev/disk/azure/scsi1/lun%d", lun) +} + +func (da diskAttacher) WaitForDevice(ctx context.Context, lun int32) (device string, err error) { + path := da.DiskPathForLun(lun) for { - l, err := os.Readlink(path) + link, err := os.Readlink(path) if err == nil { - return filepath.Abs("/dev/disk/azure/scsi1/" + l) + return filepath.Abs("/dev/disk/azure/scsi1/" + link) } if err != nil && err != os.ErrNotExist { - return "", err + if pe, ok := err.(*os.PathError); ok && pe.Err != syscall.ENOENT { + return "", err + } } + select { case <-time.After(100 * time.Millisecond): // continue @@ -57,13 +64,14 @@ func (da diskAttacher) WaitForDevice(ctx context.Context, i int32) (device strin } } -func (da diskAttacher) DetachDisk(ctx context.Context, diskID string) error { +func (da *diskAttacher) DetachDisk(ctx context.Context, diskID string) error { + log.Println("Fetching list of disks currently attached to VM") currentDisks, err := da.getDisks(ctx) if err != nil { return err } - // copy all disks to new array that not match diskID + log.Printf("Removing %q from list of disks currently attached to VM", diskID) newDisks := []compute.DataDisk{} for _, disk := range currentDisks { if disk.ManagedDisk != nil && @@ -75,71 +83,92 @@ func (da diskAttacher) DetachDisk(ctx context.Context, diskID string) error { return DiskNotFoundError } - return da.setDisks(ctx, newDisks) + log.Println("Updating new list of disks attached to VM") + err = da.setDisks(ctx, newDisks) + if err != nil { + return err + } + + // waiting for VM update to finish takes way to long + for { // loop until disk is not attached, timeout or error + list, err := da.getDisks(ctx) + if err != nil { + return err + } + if findDiskInList(list, diskID) == nil { + log.Println("Disk is no longer in VM model, assuming detached") + return nil + } + + select { + case <-time.After(time.Second): //continue + case <-ctx.Done(): + return ctx.Err() + } + } } var DiskNotFoundError = errors.New("Disk not found") -func (da diskAttacher) AttachDisk(ctx context.Context, diskID string) (int32, error) { +func (da *diskAttacher) AttachDisk(ctx context.Context, diskID string) (int32, error) { dataDisks, err := da.getDisks(ctx) if err != nil { return -1, err } // check to see if disk is already attached, remember lun if found + if disk := findDiskInList(dataDisks, diskID); disk != nil { + // disk is already attached, just take this lun + if disk.Lun == nil { + return -1, errors.New("disk is attached, but lun was not set in VM model (possibly an error in the Azure APIs)") + } + return to.Int32(disk.Lun), nil + } + + // disk was not found on VM, go and actually attach it + var lun int32 = -1 - for _, disk := range dataDisks { - if disk.ManagedDisk != nil && - strings.EqualFold(to.String(disk.ManagedDisk.ID), diskID) { - // disk is already attached, just take this lun - if disk.Lun != nil { - lun = to.Int32(disk.Lun) - break +findFreeLun: + for lun = 0; lun < 64; lun++ { + for _, v := range dataDisks { + if to.Int32(v.Lun) == lun { + continue findFreeLun } } + // no datadisk is using this lun + break } - if lun == -1 { - // disk was not found on VM, go and actually attach it + // append new data disk to collection + dataDisks = append(dataDisks, compute.DataDisk{ + CreateOption: compute.DiskCreateOptionTypesAttach, + ManagedDisk: &compute.ManagedDiskParameters{ + ID: to.StringPtr(diskID), + }, + Lun: to.Int32Ptr(lun), + }) - findFreeLun: - for lun = 0; lun < 64; lun++ { - for _, v := range dataDisks { - if to.Int32(v.Lun) == lun { - continue findFreeLun - } - } - // no datadisk is using this lun - break - } - - // append new data disk to collection - dataDisks = append(dataDisks, compute.DataDisk{ - CreateOption: compute.DiskCreateOptionTypesAttach, - ManagedDisk: &compute.ManagedDiskParameters{ - ID: to.StringPtr(diskID), - }, - Lun: to.Int32Ptr(lun), - }) - - // prepare resource object for update operation - err = da.setDisks(ctx, dataDisks) - if err != nil { - return -1, err - } + // prepare resource object for update operation + err = da.setDisks(ctx, dataDisks) + if err != nil { + return -1, err } + return lun, nil } -func (da diskAttacher) getThisVM(ctx context.Context) (compute.VirtualMachine, error) { +func (da *diskAttacher) getThisVM(ctx context.Context) (compute.VirtualMachine, error) { // getting resource info for this VM - vm, err := da.azcli.MetadataClient().GetComputeInfo() - if err != nil { - return compute.VirtualMachine{}, err + if da.vm == nil { + vm, err := da.azcli.MetadataClient().GetComputeInfo() + if err != nil { + return compute.VirtualMachine{}, err + } + da.vm = vm } // retrieve actual VM - vmResource, err := da.azcli.VirtualMachinesClient().Get(ctx, vm.ResourceGroupName, vm.Name, "") + vmResource, err := da.azcli.VirtualMachinesClient().Get(ctx, da.vm.ResourceGroupName, da.vm.Name, "") if err != nil { return compute.VirtualMachine{}, err } @@ -173,10 +202,18 @@ func (da diskAttacher) setDisks(ctx context.Context, disks []compute.DataDisk) e vmResource.StorageProfile.DataDisks = &disks vmResource.Resources = nil - // update the VM resource, attaching disk - f, err := da.azcli.VirtualMachinesClient().CreateOrUpdate(ctx, id.ResourceGroup, id.ResourceName, vmResource) - if err == nil { - err = f.WaitForCompletionRef(ctx, da.azcli.PollClient()) - } + // update the VM resource, attach disk + _, err = da.azcli.VirtualMachinesClient().CreateOrUpdate(ctx, id.ResourceGroup, id.ResourceName, vmResource) + return err } + +func findDiskInList(list []compute.DataDisk, diskID string) *compute.DataDisk { + for _, disk := range list { + if disk.ManagedDisk != nil && + strings.EqualFold(to.String(disk.ManagedDisk.ID), diskID) { + return &disk + } + } + return nil +} diff --git a/builder/azure/chroot/step_attach_disk.go b/builder/azure/chroot/step_attach_disk.go index 87f4e5c9b..286c01af1 100644 --- a/builder/azure/chroot/step_attach_disk.go +++ b/builder/azure/chroot/step_attach_disk.go @@ -14,9 +14,10 @@ import ( var _ multistep.Step = &StepAttachDisk{} type StepAttachDisk struct { + attached bool } -func (s StepAttachDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { +func (s *StepAttachDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { azcli := state.Get("azureclient").(client.AzureClientSet) ui := state.Get("ui").(packer.Ui) diskResourceID := state.Get("os_disk_resource_id").(string) @@ -38,14 +39,23 @@ func (s StepAttachDisk) Run(ctx context.Context, state multistep.StateBag) multi ctx, cancel := context.WithTimeout(ctx, time.Minute*3) // in case is not configured correctly defer cancel() device, err := da.WaitForDevice(ctx, lun) + if err != nil { + log.Printf("StepAttachDisk.Run: error: %+v", err) + err := fmt.Errorf( + "error attaching disk '%s': %v", diskResourceID, err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } ui.Say(fmt.Sprintf("Disk available at %q", device)) + s.attached = true state.Put("device", device) state.Put("attach_cleanup", s) return multistep.ActionContinue } -func (s StepAttachDisk) Cleanup(state multistep.StateBag) { +func (s *StepAttachDisk) Cleanup(state multistep.StateBag) { ui := state.Get("ui").(packer.Ui) if err := s.CleanupFunc(state); err != nil { ui.Error(err.Error()) @@ -53,16 +63,21 @@ func (s StepAttachDisk) Cleanup(state multistep.StateBag) { } func (s *StepAttachDisk) CleanupFunc(state multistep.StateBag) error { - azcli := state.Get("azureclient").(client.AzureClientSet) - ui := state.Get("ui").(packer.Ui) - diskResourceID := state.Get("os_disk_resource_id").(string) - ui.Say(fmt.Sprintf("Detaching disk '%s'", diskResourceID)) + if s.attached { + azcli := state.Get("azureclient").(client.AzureClientSet) + ui := state.Get("ui").(packer.Ui) + diskResourceID := state.Get("os_disk_resource_id").(string) - da := NewDiskAttacher(azcli) - err := da.DetachDisk(context.Background(), diskResourceID) - if err != nil { - return fmt.Errorf("error detaching %q: %v", diskResourceID, err) + ui.Say(fmt.Sprintf("Detaching disk '%s'", diskResourceID)) + + da := NewDiskAttacher(azcli) + err := da.DetachDisk(context.Background(), diskResourceID) + if err != nil { + return fmt.Errorf("error detaching %q: %v", diskResourceID, err) + } + s.attached = false } + return nil } diff --git a/builder/azure/chroot/step_create_image.go b/builder/azure/chroot/step_create_image.go index ad05b85ac..bd869470c 100644 --- a/builder/azure/chroot/step_create_image.go +++ b/builder/azure/chroot/step_create_image.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" "github.com/hashicorp/packer/builder/azure/common/client" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" @@ -18,6 +19,7 @@ type StepCreateImage struct { ImageOSState string OSDiskStorageAccountType string OSDiskCacheType string + Location string imageResource azure.Resource } @@ -44,16 +46,14 @@ func (s *StepCreateImage) Run(ctx context.Context, state multistep.StateBag) mul } image := compute.Image{ + Location: to.StringPtr(s.Location), ImageProperties: &compute.ImageProperties{ StorageProfile: &compute.ImageStorageProfile{ OsDisk: &compute.ImageOSDisk{ - OsType: "Linux", OsState: compute.OperatingSystemStateTypes(s.ImageOSState), ManagedDisk: &compute.SubResource{ ID: &diskResourceID, }, - Caching: compute.CachingTypes(s.OSDiskCacheType), - StorageAccountType: compute.StorageAccountTypes(s.OSDiskStorageAccountType), }, // DataDisks: nil, // ZoneResilient: nil, @@ -67,6 +67,7 @@ func (s *StepCreateImage) Run(ctx context.Context, state multistep.StateBag) mul s.imageResource.ResourceName, image) if err == nil { + log.Println("Image creation in process...") err = f.WaitForCompletionRef(ctx, azcli.PollClient()) } if err != nil { @@ -77,25 +78,9 @@ func (s *StepCreateImage) Run(ctx context.Context, state multistep.StateBag) mul ui.Error(err.Error()) return multistep.ActionHalt } + log.Printf("Image creation complete: %s", f.Status()) return multistep.ActionContinue } -func (s *StepCreateImage) Cleanup(state multistep.StateBag) { - azcli := state.Get("azureclient").(client.AzureClientSet) - ui := state.Get("ui").(packer.Ui) - - ctx := context.Background() - f, err := azcli.ImagesClient().Delete( - ctx, - s.imageResource.ResourceGroup, - s.imageResource.ResourceName) - if err == nil { - err = f.WaitForCompletionRef(ctx, azcli.PollClient()) - } - if err != nil { - log.Printf("StepCreateImage.Cleanup: error: %+v", err) - ui.Error(fmt.Sprintf( - "error deleting image '%s': %v", s.ImageResourceID, err)) - } -} +func (*StepCreateImage) Cleanup(bag multistep.StateBag) {} // this is the final artifact, don't delete diff --git a/builder/azure/chroot/step_create_new_disk.go b/builder/azure/chroot/step_create_new_disk.go index c2eb15ca3..8187bb7dc 100644 --- a/builder/azure/chroot/step_create_new_disk.go +++ b/builder/azure/chroot/step_create_new_disk.go @@ -18,6 +18,8 @@ type StepCreateNewDisk struct { SubscriptionID, ResourceGroup, DiskName string DiskSizeGB int32 // optional, ignored if 0 DiskStorageAccountType string // from compute.DiskStorageAccountTypes + HyperVGeneration string + Location string } func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { @@ -32,13 +34,14 @@ func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) mu ui.Say(fmt.Sprintf("Creating disk '%s'", diskResourceID)) disk := compute.Disk{ + Location: to.StringPtr(s.Location), Sku: &compute.DiskSku{ Name: compute.DiskStorageAccountTypes(s.DiskStorageAccountType), }, //Zones: nil, DiskProperties: &compute.DiskProperties{ - OsType: "", - HyperVGeneration: "", + OsType: "Linux", + HyperVGeneration: compute.HyperVGeneration(s.HyperVGeneration), CreationData: &compute.CreationData{ CreateOption: compute.Empty, }, @@ -70,11 +73,8 @@ func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) mu func (s StepCreateNewDisk) Cleanup(state multistep.StateBag) { azcli := state.Get("azureclient").(client.AzureClientSet) ui := state.Get("ui").(packer.Ui) + diskResourceID := state.Get("os_disk_resource_id") - diskResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s", - s.SubscriptionID, - s.ResourceGroup, - s.DiskName) ui.Say(fmt.Sprintf("Deleting disk '%s'", diskResourceID)) f, err := azcli.DisksClient().Delete(context.TODO(), s.ResourceGroup, s.DiskName) @@ -83,6 +83,6 @@ func (s StepCreateNewDisk) Cleanup(state multistep.StateBag) { } if err != nil { log.Printf("StepCreateNewDisk.Cleanup: error: %+v", err) - ui.Error(fmt.Sprintf("Error deleting new disk '%s': %v.", diskResourceID, err)) + ui.Error(fmt.Sprintf("error deleting new disk '%s': %v.", diskResourceID, err)) } } From bbac79f0a428a483687ddf94d45aa3cbcee29573 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Mon, 3 Jun 2019 08:33:31 +0000 Subject: [PATCH 25/55] Make PIR images work --- builder/azure/chroot/builder.go | 57 ++++++++++++++++---- builder/azure/chroot/step_create_image.go | 3 ++ builder/azure/chroot/step_create_new_disk.go | 18 +++++-- 3 files changed, 65 insertions(+), 13 deletions(-) diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index b2ac901f9..abc75d677 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" "log" "runtime" "strings" @@ -26,7 +27,8 @@ type Config struct { ClientConfig client.Config `mapstructure:",squash"` - FromScratch bool `mapstructure:"from_scratch"` + FromScratch bool `mapstructure:"from_scratch"` + Source string `mapstructure:"source"` CommandWrapper string `mapstructure:"command_wrapper"` PreMountCommands []string `mapstructure:"pre_mount_commands"` @@ -75,6 +77,9 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { }, }, raws...) + var errs *packer.MultiError + var warns []string + // Defaults err = b.config.ClientConfig.SetDefaultValues() if err != nil { @@ -115,7 +120,12 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } if b.config.TemporaryOSDiskName == "" { - b.config.TemporaryOSDiskName = "PackerTemp-{{timestamp}}" + + if def, err := interpolate.Render("PackerTemp-{{timestamp}}", &b.config.ctx); err == nil { + b.config.TemporaryOSDiskName = def + } else { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("unable to render temporary disk name: %s", err)) + } } if b.config.OSDiskStorageAccountType == "" { @@ -135,10 +145,12 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } // checks, accumulate any errors or warnings - var errs *packer.MultiError - var warns []string if b.config.FromScratch { + if b.config.Source != "" { + errs = packer.MultiErrorAppend( + errs, errors.New("source cannot be specified when building from_scratch")) + } if b.config.OSDiskSizeGB == 0 { errs = packer.MultiErrorAppend( errs, errors.New("os_disk_size_gb is required with from_scratch")) @@ -148,7 +160,12 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { errs, errors.New("pre_mount_commands is required with from_scratch")) } } else { - errs = packer.MultiErrorAppend(errors.New("only 'from_scratch'=true is supported right now")) + if _, err := client.ParsePlatformImageURN(b.config.Source); err == nil { + log.Println("Source is platform image:", b.config.Source) + } else { + errs = packer.MultiErrorAppend( + errs, fmt.Errorf("source: %q is not a valid platform image specifier", b.config.Source)) + } } if err := checkDiskCacheType(b.config.OSDiskCacheType); err != nil { @@ -271,10 +288,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack // Build the steps var steps []multistep.Step - if !b.config.FromScratch { - panic("Only from_scratch is currently implemented") - // create disk from PIR / managed image (warn on non-linux images) - } else { + if b.config.FromScratch { steps = append(steps, &StepCreateNewDisk{ SubscriptionID: info.SubscriptionID, @@ -285,6 +299,31 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack HyperVGeneration: b.config.ImageHyperVGeneration, Location: info.Location, }) + } else { + if pi, err := client.ParsePlatformImageURN(b.config.Source); err == nil { + if strings.EqualFold(pi.Version, "latest") { + + vmi, err := azcli.VirtualMachineImagesClient().GetLatest(ctx, pi.Publisher, pi.Offer, pi.Sku, info.Location) + if err != nil { + return nil, fmt.Errorf("error retieving latest version of %q: %v", b.config.Source, err) + } + pi.Version = to.String(vmi.Name) + log.Println("Resolved latest version of source image:", pi.Version) + } + steps = append(steps, + &StepCreateNewDisk{ + SubscriptionID: info.SubscriptionID, + ResourceGroup: info.ResourceGroupName, + DiskName: b.config.TemporaryOSDiskName, + DiskSizeGB: b.config.OSDiskSizeGB, + DiskStorageAccountType: b.config.OSDiskStorageAccountType, + HyperVGeneration: b.config.ImageHyperVGeneration, + Location: info.Location, + PlatformImage: pi, + }) + } else { + panic("Unknown image source: " + b.config.Source) + } } steps = append(steps, diff --git a/builder/azure/chroot/step_create_image.go b/builder/azure/chroot/step_create_image.go index bd869470c..4ae90d2fe 100644 --- a/builder/azure/chroot/step_create_image.go +++ b/builder/azure/chroot/step_create_image.go @@ -51,9 +51,12 @@ func (s *StepCreateImage) Run(ctx context.Context, state multistep.StateBag) mul StorageProfile: &compute.ImageStorageProfile{ OsDisk: &compute.ImageOSDisk{ OsState: compute.OperatingSystemStateTypes(s.ImageOSState), + OsType: compute.Linux, ManagedDisk: &compute.SubResource{ ID: &diskResourceID, }, + StorageAccountType: compute.StorageAccountTypes(s.OSDiskStorageAccountType), + Caching: compute.CachingTypes(s.OSDiskCacheType), }, // DataDisks: nil, // ZoneResilient: nil, diff --git a/builder/azure/chroot/step_create_new_disk.go b/builder/azure/chroot/step_create_new_disk.go index 8187bb7dc..6873c827a 100644 --- a/builder/azure/chroot/step_create_new_disk.go +++ b/builder/azure/chroot/step_create_new_disk.go @@ -20,6 +20,7 @@ type StepCreateNewDisk struct { DiskStorageAccountType string // from compute.DiskStorageAccountTypes HyperVGeneration string Location string + PlatformImage *client.PlatformImage } func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { @@ -42,10 +43,8 @@ func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) mu DiskProperties: &compute.DiskProperties{ OsType: "Linux", HyperVGeneration: compute.HyperVGeneration(s.HyperVGeneration), - CreationData: &compute.CreationData{ - CreateOption: compute.Empty, - }, - DiskSizeGB: to.Int32Ptr(s.DiskSizeGB), + CreationData: &compute.CreationData{}, + DiskSizeGB: to.Int32Ptr(s.DiskSizeGB), }, //Tags: map[string]*string{ } @@ -54,6 +53,17 @@ func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) mu disk.DiskProperties.DiskSizeGB = to.Int32Ptr(s.DiskSizeGB) } + if s.PlatformImage == nil { + disk.CreationData.CreateOption = compute.Empty + } else { + disk.CreationData.CreateOption = compute.FromImage + disk.CreationData.ImageReference = &compute.ImageDiskReference{ + ID: to.StringPtr(fmt.Sprintf( + "/subscriptions/%s/providers/Microsoft.Compute/locations/%s/publishers/%s/artifacttypes/vmimage/offers/%s/skus/%s/versions/%s", + s.SubscriptionID, s.Location, s.PlatformImage.Publisher, s.PlatformImage.Offer, s.PlatformImage.Sku, s.PlatformImage.Version)), + } + } + f, err := azcli.DisksClient().CreateOrUpdate(ctx, s.ResourceGroup, s.DiskName, disk) if err == nil { err = f.WaitForCompletionRef(ctx, azcli.PollClient()) From 6dee4d2d323485768c461cab6a98a5a858c249a7 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Mon, 3 Jun 2019 23:01:53 +0000 Subject: [PATCH 26/55] Wait for detach optimization --- builder/azure/chroot/diskattacher.go | 10 +++++++--- builder/azure/chroot/step_create_new_disk.go | 7 +++++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/builder/azure/chroot/diskattacher.go b/builder/azure/chroot/diskattacher.go index 97ba4174c..c52e294eb 100644 --- a/builder/azure/chroot/diskattacher.go +++ b/builder/azure/chroot/diskattacher.go @@ -20,9 +20,10 @@ import ( type DiskAttacher interface { AttachDisk(ctx context.Context, disk string) (lun int32, err error) - DetachDisk(ctx context.Context, disk string) (err error) - WaitForDevice(ctx context.Context, i int32) (device string, err error) DiskPathForLun(lun int32) string + WaitForDevice(ctx context.Context, i int32) (device string, err error) + DetachDisk(ctx context.Context, disk string) (err error) + WaitForDetach(ctx context.Context, diskID string) error } func NewDiskAttacher(azureClient client.AzureClientSet) DiskAttacher { @@ -89,7 +90,10 @@ func (da *diskAttacher) DetachDisk(ctx context.Context, diskID string) error { return err } - // waiting for VM update to finish takes way to long + return nil +} + +func (da *diskAttacher) WaitForDetach(ctx context.Context, diskID string) error { for { // loop until disk is not attached, timeout or error list, err := da.getDisks(ctx) if err != nil { diff --git a/builder/azure/chroot/step_create_new_disk.go b/builder/azure/chroot/step_create_new_disk.go index 6873c827a..8d334a76a 100644 --- a/builder/azure/chroot/step_create_new_disk.go +++ b/builder/azure/chroot/step_create_new_disk.go @@ -83,9 +83,12 @@ func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) mu func (s StepCreateNewDisk) Cleanup(state multistep.StateBag) { azcli := state.Get("azureclient").(client.AzureClientSet) ui := state.Get("ui").(packer.Ui) - diskResourceID := state.Get("os_disk_resource_id") + diskResourceID := state.Get("os_disk_resource_id").(string) - ui.Say(fmt.Sprintf("Deleting disk '%s'", diskResourceID)) + ui.Say(fmt.Sprintf("Waiting for disk %q detach to complete", diskResourceID)) + err := NewDiskAttacher(azcli).WaitForDetach(context.Background(), diskResourceID) + + ui.Say(fmt.Sprintf("Deleting disk %q", diskResourceID)) f, err := azcli.DisksClient().Delete(context.TODO(), s.ResourceGroup, s.DiskName) if err == nil { From 57cff8961a1b565e2f26a111281a92d8ecba6a95 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Mon, 3 Jun 2019 23:06:19 +0000 Subject: [PATCH 27/55] Allow skipping OSDisk cleanup --- builder/azure/chroot/builder.go | 3 ++ builder/azure/chroot/step_create_new_disk.go | 30 +++++++++++--------- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index abc75d677..398b2230e 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -43,6 +43,7 @@ type Config struct { OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb"` OSDiskStorageAccountType string `mapstructure:"os_disk_storage_account_type"` OSDiskCacheType string `mapstructure:"os_disk_cache_type"` + OSDiskSkipCleanup bool `mapstructure:"os_disk_skip_cleanup"` ImageResourceID string `mapstructure:"image_resource_id"` ImageOSState string `mapstructure:"image_os_state"` @@ -320,6 +321,8 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack HyperVGeneration: b.config.ImageHyperVGeneration, Location: info.Location, PlatformImage: pi, + + SkipCleanup: b.config.OSDiskSkipCleanup, }) } else { panic("Unknown image source: " + b.config.Source) diff --git a/builder/azure/chroot/step_create_new_disk.go b/builder/azure/chroot/step_create_new_disk.go index 8d334a76a..d8df8b6b2 100644 --- a/builder/azure/chroot/step_create_new_disk.go +++ b/builder/azure/chroot/step_create_new_disk.go @@ -21,6 +21,8 @@ type StepCreateNewDisk struct { HyperVGeneration string Location string PlatformImage *client.PlatformImage + + SkipCleanup bool } func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { @@ -81,21 +83,23 @@ func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) mu } func (s StepCreateNewDisk) Cleanup(state multistep.StateBag) { - azcli := state.Get("azureclient").(client.AzureClientSet) - ui := state.Get("ui").(packer.Ui) - diskResourceID := state.Get("os_disk_resource_id").(string) + if !s.SkipCleanup { + azcli := state.Get("azureclient").(client.AzureClientSet) + ui := state.Get("ui").(packer.Ui) + diskResourceID := state.Get("os_disk_resource_id").(string) - ui.Say(fmt.Sprintf("Waiting for disk %q detach to complete", diskResourceID)) - err := NewDiskAttacher(azcli).WaitForDetach(context.Background(), diskResourceID) + ui.Say(fmt.Sprintf("Waiting for disk %q detach to complete", diskResourceID)) + err := NewDiskAttacher(azcli).WaitForDetach(context.Background(), diskResourceID) - ui.Say(fmt.Sprintf("Deleting disk %q", diskResourceID)) + ui.Say(fmt.Sprintf("Deleting disk %q", diskResourceID)) - f, err := azcli.DisksClient().Delete(context.TODO(), s.ResourceGroup, s.DiskName) - if err == nil { - err = f.WaitForCompletionRef(context.TODO(), azcli.PollClient()) - } - if err != nil { - log.Printf("StepCreateNewDisk.Cleanup: error: %+v", err) - ui.Error(fmt.Sprintf("error deleting new disk '%s': %v.", diskResourceID, err)) + f, err := azcli.DisksClient().Delete(context.TODO(), s.ResourceGroup, s.DiskName) + if err == nil { + err = f.WaitForCompletionRef(context.TODO(), azcli.PollClient()) + } + if err != nil { + log.Printf("StepCreateNewDisk.Cleanup: error: %+v", err) + ui.Error(fmt.Sprintf("error deleting new disk '%s': %v.", diskResourceID, err)) + } } } From eff3f2bdcfe694ea64a80f71c7b64e22abb97858 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Thu, 15 Aug 2019 20:05:36 +0000 Subject: [PATCH 28/55] Add test for disk input validation --- builder/azure/chroot/builder_test.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 builder/azure/chroot/builder_test.go diff --git a/builder/azure/chroot/builder_test.go b/builder/azure/chroot/builder_test.go new file mode 100644 index 000000000..707cdf196 --- /dev/null +++ b/builder/azure/chroot/builder_test.go @@ -0,0 +1,28 @@ +package chroot + +import ( + "regexp" + "testing" + + "github.com/hashicorp/packer/packer" +) + +func TestBuilder_Prepare_DiskAsInput(t *testing.T) { + b := Builder{} + _, err := b.Prepare(map[string]interface{}{ + "source": "/subscriptions/28279221-ccbe-40f0-b70b-4d78ab822e09/resourceGroups/testrg/providers/Microsoft.Compute/disks/diskname", + }) + + if err != nil { + // make sure there is no error about the source field + errs, ok := err.(*packer.MultiError) + if !ok { + t.Error("Expected the returned error to be of type packer.MultiError") + } + for _, err := range errs.Errors { + if matched, _:=regexp.MatchString(`(^|\W)source\W`,err.Error()); matched { + t.Errorf("Did not expect an error about the 'source' field, but found %q", err) + } + } + } +} From 27a5bfe11c7ff4c7a78e1f2cbe60c45eed8643a9 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Tue, 10 Sep 2019 12:48:55 +0000 Subject: [PATCH 29/55] Add implementation for disk as source --- builder/azure/chroot/builder.go | 61 ++++++++++++---- builder/azure/chroot/step_create_new_disk.go | 12 +++- .../azure/chroot/step_create_new_disk_test.go | 69 +++++++++++++++++++ .../common/client/azure_client_set_mock.go | 46 +++++++++++++ 4 files changed, 171 insertions(+), 17 deletions(-) create mode 100644 builder/azure/chroot/step_create_new_disk_test.go create mode 100644 builder/azure/common/client/azure_client_set_mock.go diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index 398b2230e..4b07916a6 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -4,8 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" "log" "runtime" "strings" @@ -20,6 +18,8 @@ import ( "github.com/hashicorp/packer/template/interpolate" "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" ) type Config struct { @@ -29,6 +29,7 @@ type Config struct { FromScratch bool `mapstructure:"from_scratch"` Source string `mapstructure:"source"` + sourceType sourceType CommandWrapper string `mapstructure:"command_wrapper"` PreMountCommands []string `mapstructure:"pre_mount_commands"` @@ -52,6 +53,13 @@ type Config struct { ctx interpolate.Context } +type sourceType string + +const ( + sourcePlatformImage sourceType = "PlatformImage" + sourceDisk sourceType = "Disk" +) + func (c *Config) GetContext() interpolate.Context { return c.ctx } @@ -163,9 +171,14 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } else { if _, err := client.ParsePlatformImageURN(b.config.Source); err == nil { log.Println("Source is platform image:", b.config.Source) + b.config.sourceType = sourcePlatformImage + } else if id, err := azure.ParseResourceID(b.config.Source); err == nil && + strings.EqualFold(id.Provider, "Microsoft.Compute") && strings.EqualFold(id.ResourceType, "disks") { + log.Println("Source is a disk resource ID:", b.config.Source) + b.config.sourceType = sourceDisk } else { errs = packer.MultiErrorAppend( - errs, fmt.Errorf("source: %q is not a valid platform image specifier", b.config.Source)) + errs, fmt.Errorf("source: %q is not a valid platform image specifier, nor is it a disk resource ID", b.config.Source)) } } @@ -301,16 +314,36 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack Location: info.Location, }) } else { - if pi, err := client.ParsePlatformImageURN(b.config.Source); err == nil { - if strings.EqualFold(pi.Version, "latest") { + switch b.config.sourceType { + case sourcePlatformImage: - vmi, err := azcli.VirtualMachineImagesClient().GetLatest(ctx, pi.Publisher, pi.Offer, pi.Sku, info.Location) - if err != nil { - return nil, fmt.Errorf("error retieving latest version of %q: %v", b.config.Source, err) + if pi, err := client.ParsePlatformImageURN(b.config.Source); err == nil { + if strings.EqualFold(pi.Version, "latest") { + + vmi, err := azcli.VirtualMachineImagesClient().GetLatest(ctx, pi.Publisher, pi.Offer, pi.Sku, info.Location) + if err != nil { + return nil, fmt.Errorf("error retieving latest version of %q: %v", b.config.Source, err) + } + pi.Version = to.String(vmi.Name) + log.Println("Resolved latest version of source image:", pi.Version) } - pi.Version = to.String(vmi.Name) - log.Println("Resolved latest version of source image:", pi.Version) + steps = append(steps, + &StepCreateNewDisk{ + SubscriptionID: info.SubscriptionID, + ResourceGroup: info.ResourceGroupName, + DiskName: b.config.TemporaryOSDiskName, + DiskSizeGB: b.config.OSDiskSizeGB, + DiskStorageAccountType: b.config.OSDiskStorageAccountType, + HyperVGeneration: b.config.ImageHyperVGeneration, + Location: info.Location, + PlatformImage: pi, + + SkipCleanup: b.config.OSDiskSkipCleanup, + }) + } else { + panic("Unknown image source: " + b.config.Source) } + case sourceDisk: steps = append(steps, &StepCreateNewDisk{ SubscriptionID: info.SubscriptionID, @@ -319,13 +352,13 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack DiskSizeGB: b.config.OSDiskSizeGB, DiskStorageAccountType: b.config.OSDiskStorageAccountType, HyperVGeneration: b.config.ImageHyperVGeneration, - Location: info.Location, - PlatformImage: pi, + SourceDiskResourceID: b.config.Source, + //todo(paulmey) validate that source disk is in same location as VM SkipCleanup: b.config.OSDiskSkipCleanup, }) - } else { - panic("Unknown image source: " + b.config.Source) + default: + panic(fmt.Errorf("Unknown source type: %+q", b.config.sourceType)) } } diff --git a/builder/azure/chroot/step_create_new_disk.go b/builder/azure/chroot/step_create_new_disk.go index d8df8b6b2..ed5844c4e 100644 --- a/builder/azure/chroot/step_create_new_disk.go +++ b/builder/azure/chroot/step_create_new_disk.go @@ -19,8 +19,11 @@ type StepCreateNewDisk struct { DiskSizeGB int32 // optional, ignored if 0 DiskStorageAccountType string // from compute.DiskStorageAccountTypes HyperVGeneration string - Location string - PlatformImage *client.PlatformImage + + Location string + PlatformImage *client.PlatformImage + + SourceDiskResourceID string SkipCleanup bool } @@ -55,7 +58,10 @@ func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) mu disk.DiskProperties.DiskSizeGB = to.Int32Ptr(s.DiskSizeGB) } - if s.PlatformImage == nil { + if s.SourceDiskResourceID != "" { + disk.CreationData.CreateOption = compute.Copy + disk.CreationData.SourceResourceID = to.StringPtr(s.SourceDiskResourceID) + } else if s.PlatformImage == nil { disk.CreationData.CreateOption = compute.Empty } else { disk.CreationData.CreateOption = compute.FromImage diff --git a/builder/azure/chroot/step_create_new_disk_test.go b/builder/azure/chroot/step_create_new_disk_test.go new file mode 100644 index 000000000..f0488ade7 --- /dev/null +++ b/builder/azure/chroot/step_create_new_disk_test.go @@ -0,0 +1,69 @@ +package chroot + +import ( + "context" + "io/ioutil" + "net/http" + "regexp" + "testing" + + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + "github.com/Azure/go-autorest/autorest" + "github.com/hashicorp/packer/builder/azure/common/client" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +func Test_StepCreateNewDisk_FromDisk(t *testing.T) { + sut := StepCreateNewDisk{ + SubscriptionID: "SubscriptionID", + ResourceGroup: "ResourceGroupName", + DiskName: "TemporaryOSDiskName", + DiskSizeGB: 42, + DiskStorageAccountType: string(compute.PremiumLRS), + HyperVGeneration: string(compute.V1), + Location: "westus", + SourceDiskResourceID: "SourceDisk", + } + + expected := regexp.MustCompile(`[\s\n]`).ReplaceAllString(` +{ + "location": "westus", + "properties": { + "osType": "Linux", + "hyperVGeneration": "V1", + "creationData": { + "createOption": "Copy", + "sourceResourceId": "SourceDisk" + }, + "diskSizeGB": 42 + }, + "sku": { + "name": "Premium_LRS" + } +}`, "") + + m := compute.NewDisksClient("subscriptionId") + m.Sender = autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + b, _ := ioutil.ReadAll(r.Body) + if string(b) != expected { + t.Fatalf("expected body to be %q, but got %q", expected, string(b)) + } + return &http.Response{ + Request: r, + StatusCode: 200, + }, nil + }) + + state := new(multistep.BasicStateBag) + state.Put("azureclient", &client.AzureClientSetMock{ + DisksClientMock: m, + }) + state.Put("ui", packer.TestUi(t)) + + r := sut.Run(context.TODO(), state) + + if r != multistep.ActionContinue { + t.Fatal("Run failed") + } +} diff --git a/builder/azure/common/client/azure_client_set_mock.go b/builder/azure/common/client/azure_client_set_mock.go new file mode 100644 index 000000000..9f4a34b1c --- /dev/null +++ b/builder/azure/common/client/azure_client_set_mock.go @@ -0,0 +1,46 @@ +package client + +import ( + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/computeapi" + "github.com/Azure/go-autorest/autorest" +) + +// AzureClientSetMock provides a generic mock for AzureClientSet +type AzureClientSetMock struct { + DisksClientMock computeapi.DisksClientAPI + ImagesClientMock computeapi.ImagesClientAPI + VirtualMachineImagesClientMock VirtualMachineImagesClientAPI + VirtualMachinesClientMock computeapi.VirtualMachinesClientAPI + PollClientMock autorest.Client + MetadataClientMock MetadataClientAPI +} + +// DisksClient returns a DisksClientAPI +func (m *AzureClientSetMock) DisksClient() computeapi.DisksClientAPI { + return m.DisksClientMock +} + +// ImagesClient returns a ImagesClientAPI +func (m *AzureClientSetMock) ImagesClient() computeapi.ImagesClientAPI { + return m.ImagesClientMock +} + +// VirtualMachineImagesClient returns a VirtualMachineImagesClientAPI +func (m *AzureClientSetMock) VirtualMachineImagesClient() VirtualMachineImagesClientAPI { + return m.VirtualMachineImagesClientMock +} + +// VirtualMachinesClient returns a VirtualMachinesClientAPI +func (m *AzureClientSetMock) VirtualMachinesClient() computeapi.VirtualMachinesClientAPI { + return m.VirtualMachinesClientMock +} + +// PollClient returns an autorest Client that can be used for polling async requests +func (m *AzureClientSetMock) PollClient() autorest.Client { + return m.PollClientMock +} + +// MetadataClient returns a MetadataClientAPI +func (m *AzureClientSetMock) MetadataClient() MetadataClientAPI { + return m.MetadataClientMock +} From cb729e5a3872cdc86a42bad82a9f6597f6b2d42a Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Thu, 26 Sep 2019 18:44:35 +0000 Subject: [PATCH 30/55] Add documentation to config struct --- builder/azure/chroot/builder.go | 72 +++++++++++++------ .../azure/chroot/step_create_new_disk_test.go | 2 +- .../azure/chroot/_Config-not-required.html.md | 50 +++++++++++++ .../azure/chroot/_Config-required.html.md | 6 ++ .../builder/azure/chroot/_Config.html.md | 3 + 5 files changed, 109 insertions(+), 24 deletions(-) create mode 100644 website/source/partials/builder/azure/chroot/_Config-not-required.html.md create mode 100644 website/source/partials/builder/azure/chroot/_Config-required.html.md create mode 100644 website/source/partials/builder/azure/chroot/_Config.html.md diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index 4b07916a6..f2f14f407 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -1,3 +1,9 @@ +//go:generate struct-markdown + +// Package chroot is able to create an Azure manage image without requiring the +// launch of a new instance for every build. It does this by attaching and +// mounting the root disk and chrooting into that directory. +// It then creates a managed image from that attached disk. package chroot import ( @@ -22,32 +28,68 @@ import ( "github.com/Azure/go-autorest/autorest/to" ) +// Config is the configuration that is chained through the steps and settable +// from the template. type Config struct { common.PackerConfig `mapstructure:",squash"` ClientConfig client.Config `mapstructure:",squash"` + // When set to `true`, starts with an empty, unpartitioned disk. Defaults to `false`. FromScratch bool `mapstructure:"from_scratch"` - Source string `mapstructure:"source"` + // Either a managed disk resourced ID or a publisher:offer:sku:version specifier for plaform image sources. + Source string `mapstructure:"source" required:"true"` sourceType sourceType + // How to run shell commands. This may be useful to set environment variables or perhaps run + // a command with sudo or so on. This is a configuration template where the `.Command` variable + // is replaced with the command to be run. Defaults to `{{.Command}}`. CommandWrapper string `mapstructure:"command_wrapper"` + // A series of commands to execute after attaching the root volume and before mounting the chroot. + // This is not required unless using `from_scratch`. If so, this should include any partitioning + // and filesystem creation commands. The path to the device is provided by `{{.Device}}`. PreMountCommands []string `mapstructure:"pre_mount_commands"` + // Options to supply the `mount` command when mounting devices. Each option will be prefixed with + // `-o` and supplied to the `mount` command ran by Packer. Because this command is ran in a shell, + // user discretion is advised. See this manual page for the `mount` command for valid file system specific options. MountOptions []string `mapstructure:"mount_options"` + // The partition number containing the / partition. By default this is the first partition of the volume. MountPartition string `mapstructure:"mount_partition"` + // The path where the volume will be mounted. This is where the chroot environment will be. This defaults + // to `/mnt/packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration template where the `.Device` + // variable is replaced with the name of the device where the volume is attached. MountPath string `mapstructure:"mount_path"` + // As `pre_mount_commands`, but the commands are executed after mounting the root device and before the + // extra mount and copy steps. The device and mount path are provided by `{{.Device}}` and `{{.MountPath}}`. PostMountCommands []string `mapstructure:"post_mount_commands"` + // This is a list of devices to mount into the chroot environment. This configuration parameter requires + // some additional documentation which is in the "Chroot Mounts" section below. Please read that section + // for more information on how to use this. ChrootMounts [][]string `mapstructure:"chroot_mounts"` + // Paths to files on the running Azure instance that will be copied into the chroot environment prior to + // provisioning. Defaults to `/etc/resolv.conf` so that DNS lookups work. Pass an empty list to skip copying + // `/etc/resolv.conf`. You may need to do this if you're building an image that uses systemd. CopyFiles []string `mapstructure:"copy_files"` + // The name of the temporary disk that will be created in the resource group of the VM that Packer is + // running on. Will be generated if not set. TemporaryOSDiskName string `mapstructure:"temporary_os_disk_name"` + // Try to resize the OS disk to this size on the first copy. Disks can only be englarged. If not specified, + // the disk will keep its original size. Required when using `from_scratch` OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb"` + // The [storage SKU](https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#diskstorageaccounttypes) + // to use for the OS Disk. Defaults to `Standard_LRS`. OSDiskStorageAccountType string `mapstructure:"os_disk_storage_account_type"` + // The [cache type](https://docs.microsoft.com/en-us/rest/api/compute/images/createorupdate#cachingtypes) + // specified in the resulting image and for attaching it to the Packer VM. Defaults to `ReadOnly` OSDiskCacheType string `mapstructure:"os_disk_cache_type"` + // If set to `true`, leaves the temporary disk behind in the Packer VM resource group. Defaults to `false` OSDiskSkipCleanup bool `mapstructure:"os_disk_skip_cleanup"` - ImageResourceID string `mapstructure:"image_resource_id"` - ImageOSState string `mapstructure:"image_os_state"` + // The image to create using this build. + ImageResourceID string `mapstructure:"image_resource_id" required:"true"` + // The [Hyper-V generation type](https://docs.microsoft.com/en-us/rest/api/compute/images/createorupdate#hypervgenerationtypes). + // Defaults to `V2`. ImageHyperVGeneration string `mapstructure:"image_hyperv_generation"` ctx interpolate.Context @@ -60,6 +102,8 @@ const ( sourceDisk sourceType = "Disk" ) +// GetContext implements ContextProvider to allow steps to use the config context +// for template interpolation func (c *Config) GetContext() interpolate.Context { return c.ctx } @@ -145,12 +189,8 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.OSDiskCacheType = string(compute.CachingTypesReadOnly) } - if b.config.ImageOSState == "" { - b.config.ImageOSState = string(compute.Generalized) - } - if b.config.ImageHyperVGeneration == "" { - b.config.ImageHyperVGeneration = string(compute.V1) + b.config.ImageHyperVGeneration = string(compute.V2) } // checks, accumulate any errors or warnings @@ -202,10 +242,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } } - if err := checkOSState(b.config.ImageOSState); err != nil { - errs = packer.MultiErrorAppend(errs, fmt.Errorf("image_os_state: %v", err)) - } - if err := checkHyperVGeneration(b.config.ImageHyperVGeneration); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("image_hyperv_generation: %v", err)) } @@ -218,16 +254,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { return warns, nil } -func checkOSState(s string) interface{} { - for _, v := range compute.PossibleOperatingSystemStateTypesValues() { - if compute.OperatingSystemStateTypes(s) == v { - return nil - } - } - return fmt.Errorf("%q is not a valid value %v", - s, compute.PossibleOperatingSystemStateTypesValues()) -} - func checkDiskCacheType(s string) interface{} { for _, v := range compute.PossibleCachingTypesValues() { if compute.CachingTypes(s) == v { @@ -385,7 +411,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack &chroot.StepEarlyCleanup{}, &StepCreateImage{ ImageResourceID: b.config.ImageResourceID, - ImageOSState: b.config.ImageOSState, + ImageOSState: string(compute.Generalized), OSDiskCacheType: b.config.OSDiskCacheType, OSDiskStorageAccountType: b.config.OSDiskStorageAccountType, Location: info.Location, diff --git a/builder/azure/chroot/step_create_new_disk_test.go b/builder/azure/chroot/step_create_new_disk_test.go index f0488ade7..dc95a8b8e 100644 --- a/builder/azure/chroot/step_create_new_disk_test.go +++ b/builder/azure/chroot/step_create_new_disk_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" - "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" "github.com/hashicorp/packer/builder/azure/common/client" "github.com/hashicorp/packer/helper/multistep" "github.com/hashicorp/packer/packer" diff --git a/website/source/partials/builder/azure/chroot/_Config-not-required.html.md b/website/source/partials/builder/azure/chroot/_Config-not-required.html.md new file mode 100644 index 000000000..8f8556919 --- /dev/null +++ b/website/source/partials/builder/azure/chroot/_Config-not-required.html.md @@ -0,0 +1,50 @@ + + +- `from_scratch` (bool) - When set to `true`, starts with an empty, unpartitioned disk. Defaults to `false`. + +- `command_wrapper` (string) - How to run shell commands. This may be useful to set environment variables or perhaps run + a command with sudo or so on. This is a configuration template where the `.Command` variable + is replaced with the command to be run. Defaults to `{{.Command}}`. + +- `pre_mount_commands` ([]string) - A series of commands to execute after attaching the root volume and before mounting the chroot. + This is not required unless using `from_scratch`. If so, this should include any partitioning + and filesystem creation commands. The path to the device is provided by `{{.Device}}`. + +- `mount_options` ([]string) - Options to supply the `mount` command when mounting devices. Each option will be prefixed with + `-o` and supplied to the `mount` command ran by Packer. Because this command is ran in a shell, + user discretion is advised. See this manual page for the `mount` command for valid file system specific options. + +- `mount_partition` (string) - The partition number containing the / partition. By default this is the first partition of the volume. + +- `mount_path` (string) - The path where the volume will be mounted. This is where the chroot environment will be. This defaults + to `/mnt/packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration template where the `.Device` + variable is replaced with the name of the device where the volume is attached. + +- `post_mount_commands` ([]string) - As `pre_mount_commands`, but the commands are executed after mounting the root device and before the + extra mount and copy steps. The device and mount path are provided by `{{.Device}}` and `{{.MountPath}}`. + +- `chroot_mounts` ([][]string) - This is a list of devices to mount into the chroot environment. This configuration parameter requires + some additional documentation which is in the "Chroot Mounts" section below. Please read that section + for more information on how to use this. + +- `copy_files` ([]string) - Paths to files on the running Azure instance that will be copied into the chroot environment prior to + provisioning. Defaults to `/etc/resolv.conf` so that DNS lookups work. Pass an empty list to skip copying + `/etc/resolv.conf`. You may need to do this if you're building an image that uses systemd. + +- `temporary_os_disk_name` (string) - The name of the temporary disk that will be created in the resource group of the VM that Packer is + running on. Will be generated if not set. + +- `os_disk_size_gb` (int32) - Try to resize the OS disk to this size on the first copy. Disks can only be englarged. If not specified, + the disk will keep its original size. Required when using `from_scratch` + +- `os_disk_storage_account_type` (string) - The [storage SKU](https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#diskstorageaccounttypes) + to use for the OS Disk. Defaults to `Standard_LRS`. + +- `os_disk_cache_type` (string) - The [cache type](https://docs.microsoft.com/en-us/rest/api/compute/images/createorupdate#cachingtypes) + specified in the resulting image and for attaching it to the Packer VM. Defaults to `ReadOnly` + +- `os_disk_skip_cleanup` (bool) - If set to `true`, leaves the temporary disk behind in the Packer VM resource group. Defaults to `false` + +- `image_hyperv_generation` (string) - The [Hyper-V generation type](https://docs.microsoft.com/en-us/rest/api/compute/images/createorupdate#hypervgenerationtypes). + Defaults to `V2`. + \ No newline at end of file diff --git a/website/source/partials/builder/azure/chroot/_Config-required.html.md b/website/source/partials/builder/azure/chroot/_Config-required.html.md new file mode 100644 index 000000000..e24734560 --- /dev/null +++ b/website/source/partials/builder/azure/chroot/_Config-required.html.md @@ -0,0 +1,6 @@ + + +- `source` (string) - Either a managed disk resourced ID or a publisher:offer:sku:version specifier for plaform image sources. + +- `image_resource_id` (string) - The image to create using this build. + \ No newline at end of file diff --git a/website/source/partials/builder/azure/chroot/_Config.html.md b/website/source/partials/builder/azure/chroot/_Config.html.md new file mode 100644 index 000000000..147c5b9c7 --- /dev/null +++ b/website/source/partials/builder/azure/chroot/_Config.html.md @@ -0,0 +1,3 @@ + +Config is the configuration that is chained through the steps and settable +from the template. From 11ef06b94d853bcefeb0e48e79178741c6dc3ab2 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Thu, 26 Sep 2019 22:17:07 +0000 Subject: [PATCH 31/55] Add StepVerifySourceDisk --- builder/azure/chroot/builder.go | 54 ++++--- .../azure/chroot/step_verify_source_disk.go | 65 ++++++++ .../chroot/step_verify_source_disk_test.go | 146 ++++++++++++++++++ 3 files changed, 241 insertions(+), 24 deletions(-) create mode 100644 builder/azure/chroot/step_verify_source_disk.go create mode 100644 builder/azure/chroot/step_verify_source_disk_test.go diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index f2f14f407..c476b2fd1 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -36,58 +36,58 @@ type Config struct { ClientConfig client.Config `mapstructure:",squash"` // When set to `true`, starts with an empty, unpartitioned disk. Defaults to `false`. - FromScratch bool `mapstructure:"from_scratch"` + FromScratch bool `mapstructure:"from_scratch"` // Either a managed disk resourced ID or a publisher:offer:sku:version specifier for plaform image sources. - Source string `mapstructure:"source" required:"true"` - sourceType sourceType + Source string `mapstructure:"source" required:"true"` + sourceType sourceType // How to run shell commands. This may be useful to set environment variables or perhaps run // a command with sudo or so on. This is a configuration template where the `.Command` variable // is replaced with the command to be run. Defaults to `{{.Command}}`. - CommandWrapper string `mapstructure:"command_wrapper"` - // A series of commands to execute after attaching the root volume and before mounting the chroot. - // This is not required unless using `from_scratch`. If so, this should include any partitioning + CommandWrapper string `mapstructure:"command_wrapper"` + // A series of commands to execute after attaching the root volume and before mounting the chroot. + // This is not required unless using `from_scratch`. If so, this should include any partitioning // and filesystem creation commands. The path to the device is provided by `{{.Device}}`. - PreMountCommands []string `mapstructure:"pre_mount_commands"` - // Options to supply the `mount` command when mounting devices. Each option will be prefixed with + PreMountCommands []string `mapstructure:"pre_mount_commands"` + // Options to supply the `mount` command when mounting devices. Each option will be prefixed with // `-o` and supplied to the `mount` command ran by Packer. Because this command is ran in a shell, // user discretion is advised. See this manual page for the `mount` command for valid file system specific options. - MountOptions []string `mapstructure:"mount_options"` + MountOptions []string `mapstructure:"mount_options"` // The partition number containing the / partition. By default this is the first partition of the volume. - MountPartition string `mapstructure:"mount_partition"` + MountPartition string `mapstructure:"mount_partition"` // The path where the volume will be mounted. This is where the chroot environment will be. This defaults - // to `/mnt/packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration template where the `.Device` + // to `/mnt/packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration template where the `.Device` // variable is replaced with the name of the device where the volume is attached. - MountPath string `mapstructure:"mount_path"` + MountPath string `mapstructure:"mount_path"` // As `pre_mount_commands`, but the commands are executed after mounting the root device and before the // extra mount and copy steps. The device and mount path are provided by `{{.Device}}` and `{{.MountPath}}`. - PostMountCommands []string `mapstructure:"post_mount_commands"` - // This is a list of devices to mount into the chroot environment. This configuration parameter requires + PostMountCommands []string `mapstructure:"post_mount_commands"` + // This is a list of devices to mount into the chroot environment. This configuration parameter requires // some additional documentation which is in the "Chroot Mounts" section below. Please read that section // for more information on how to use this. - ChrootMounts [][]string `mapstructure:"chroot_mounts"` - // Paths to files on the running Azure instance that will be copied into the chroot environment prior to + ChrootMounts [][]string `mapstructure:"chroot_mounts"` + // Paths to files on the running Azure instance that will be copied into the chroot environment prior to // provisioning. Defaults to `/etc/resolv.conf` so that DNS lookups work. Pass an empty list to skip copying // `/etc/resolv.conf`. You may need to do this if you're building an image that uses systemd. - CopyFiles []string `mapstructure:"copy_files"` + CopyFiles []string `mapstructure:"copy_files"` // The name of the temporary disk that will be created in the resource group of the VM that Packer is // running on. Will be generated if not set. - TemporaryOSDiskName string `mapstructure:"temporary_os_disk_name"` + TemporaryOSDiskName string `mapstructure:"temporary_os_disk_name"` // Try to resize the OS disk to this size on the first copy. Disks can only be englarged. If not specified, // the disk will keep its original size. Required when using `from_scratch` - OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb"` + OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb"` // The [storage SKU](https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#diskstorageaccounttypes) // to use for the OS Disk. Defaults to `Standard_LRS`. OSDiskStorageAccountType string `mapstructure:"os_disk_storage_account_type"` // The [cache type](https://docs.microsoft.com/en-us/rest/api/compute/images/createorupdate#cachingtypes) // specified in the resulting image and for attaching it to the Packer VM. Defaults to `ReadOnly` - OSDiskCacheType string `mapstructure:"os_disk_cache_type"` + OSDiskCacheType string `mapstructure:"os_disk_cache_type"` // If set to `true`, leaves the temporary disk behind in the Packer VM resource group. Defaults to `false` - OSDiskSkipCleanup bool `mapstructure:"os_disk_skip_cleanup"` + OSDiskSkipCleanup bool `mapstructure:"os_disk_skip_cleanup"` // The image to create using this build. - ImageResourceID string `mapstructure:"image_resource_id" required:"true"` + ImageResourceID string `mapstructure:"image_resource_id" required:"true"` // The [Hyper-V generation type](https://docs.microsoft.com/en-us/rest/api/compute/images/createorupdate#hypervgenerationtypes). // Defaults to `V2`. ImageHyperVGeneration string `mapstructure:"image_hyperv_generation"` @@ -102,7 +102,7 @@ const ( sourceDisk sourceType = "Disk" ) -// GetContext implements ContextProvider to allow steps to use the config context +// GetContext implements ContextProvider to allow steps to use the config context // for template interpolation func (c *Config) GetContext() interpolate.Context { return c.ctx @@ -354,6 +354,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack log.Println("Resolved latest version of source image:", pi.Version) } steps = append(steps, + &StepCreateNewDisk{ SubscriptionID: info.SubscriptionID, ResourceGroup: info.ResourceGroupName, @@ -371,6 +372,11 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack } case sourceDisk: steps = append(steps, + &StepVerifySourceDisk{ + SourceDiskResourceID: b.config.Source, + SubscriptionID: info.SubscriptionID, + Location: info.Location, + }, &StepCreateNewDisk{ SubscriptionID: info.SubscriptionID, ResourceGroup: info.ResourceGroupName, @@ -379,7 +385,7 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack DiskStorageAccountType: b.config.OSDiskStorageAccountType, HyperVGeneration: b.config.ImageHyperVGeneration, SourceDiskResourceID: b.config.Source, - //todo(paulmey) validate that source disk is in same location as VM + Location: info.Location, SkipCleanup: b.config.OSDiskSkipCleanup, }) diff --git a/builder/azure/chroot/step_verify_source_disk.go b/builder/azure/chroot/step_verify_source_disk.go new file mode 100644 index 000000000..b949447cb --- /dev/null +++ b/builder/azure/chroot/step_verify_source_disk.go @@ -0,0 +1,65 @@ +package chroot + +import ( + "context" + "fmt" + "strings" + + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" + + "github.com/hashicorp/packer/builder/azure/common/client" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +type StepVerifySourceDisk struct { + SubscriptionID string + SourceDiskResourceID string + Location string +} + +func (s StepVerifySourceDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + azcli := state.Get("azureclient").(client.AzureClientSet) + ui := state.Get("ui").(packer.Ui) + + ui.Say("Checking source disk location") + resource, err := azure.ParseResourceID(s.SourceDiskResourceID) + if err != nil { + ui.Error(fmt.Sprintf("Could not parse resource id %q: %s", s.SourceDiskResourceID, err)) + return multistep.ActionHalt + } + + if !strings.EqualFold(resource.SubscriptionID, s.SubscriptionID) { + ui.Error(fmt.Sprintf("Source disk resource %q is in a different subscription than this VM (%q). "+ + "Packer does not know how to handle that.", + s.SourceDiskResourceID, s.SubscriptionID)) + return multistep.ActionHalt + } + + if !(strings.EqualFold(resource.Provider, "Microsoft.Compute") && strings.EqualFold(resource.ResourceType, "disks")) { + ui.Error(fmt.Sprintf("Resource ID %q is not a managed disk resource", s.SourceDiskResourceID)) + return multistep.ActionHalt + } + + disk, err := azcli.DisksClient().Get(ctx, + resource.ResourceGroup, resource.ResourceName) + if err != nil { + ui.Error(fmt.Sprintf("Unable to retrieve disk (%q): %s", s.SourceDiskResourceID, err)) + return multistep.ActionHalt + } + + location := to.String(disk.Location) + if !strings.EqualFold(location, s.Location) { + ui.Error(fmt.Sprintf("Source disk resource %q is in a different location (%q) than this VM (%q). "+ + "Packer does not know how to handle that.", + s.SourceDiskResourceID, + location, + s.Location)) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (s StepVerifySourceDisk) Cleanup(state multistep.StateBag) {} diff --git a/builder/azure/chroot/step_verify_source_disk_test.go b/builder/azure/chroot/step_verify_source_disk_test.go new file mode 100644 index 000000000..113fd3f73 --- /dev/null +++ b/builder/azure/chroot/step_verify_source_disk_test.go @@ -0,0 +1,146 @@ +package chroot + +import ( + "context" + "io/ioutil" + "net/http" + "reflect" + "regexp" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + "github.com/Azure/go-autorest/autorest" + "github.com/hashicorp/packer/builder/azure/common/client" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +func Test_StepVerifySourceDisk_Run(t *testing.T) { + type fields struct { + SubscriptionID string + SourceDiskResourceID string + Location string + + GetDiskResponseCode int + GetDiskResponseBody string + } + type args struct { + state multistep.StateBag + } + tests := []struct { + name string + fields fields + args args + want multistep.StepAction + errormatch string + }{ + { + name: "HappyPath", + fields: fields{ + SubscriptionID: "subid1", + SourceDiskResourceID: "/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/disks/disk1", + Location: "westus2", + + GetDiskResponseCode: 200, + GetDiskResponseBody: `{"location":"westus2"}`, + }, + want: multistep.ActionContinue, + }, + { + name: "DiskNotFound", + fields: fields{ + SubscriptionID: "subid1", + SourceDiskResourceID: "/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/disks/disk1", + Location: "westus2", + + GetDiskResponseCode: 404, + GetDiskResponseBody: `{}`, + }, + want: multistep.ActionHalt, + errormatch: "Unable to retrieve", + }, + { + name: "NotADisk", + fields: fields{ + SubscriptionID: "subid1", + SourceDiskResourceID: "/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/images/image1", + Location: "westus2", + + GetDiskResponseCode: 404, + }, + want: multistep.ActionHalt, + errormatch: "not a managed disk", + }, + { + name: "OtherSubscription", + fields: fields{ + SubscriptionID: "subid1", + SourceDiskResourceID: "/subscriptions/subid2/resourcegroups/rg1/providers/Microsoft.Compute/disks/disk1", + Location: "westus2", + + GetDiskResponseCode: 200, + GetDiskResponseBody: `{"location":"westus2"}`, + }, + want: multistep.ActionHalt, + errormatch: "different subscription", + }, + { + name: "OtherLocation", + fields: fields{ + SubscriptionID: "subid1", + SourceDiskResourceID: "/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/disks/disk1", + Location: "eastus", + + GetDiskResponseCode: 200, + GetDiskResponseBody: `{"location":"westus2"}`, + }, + want: multistep.ActionHalt, + errormatch: "different location", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := StepVerifySourceDisk{ + SubscriptionID: tt.fields.SubscriptionID, + SourceDiskResourceID: tt.fields.SourceDiskResourceID, + Location: tt.fields.Location, + } + + m := compute.NewDisksClient("subscriptionId") + m.Sender = autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + return &http.Response{ + Request: r, + Body: ioutil.NopCloser(strings.NewReader(tt.fields.GetDiskResponseBody)), + StatusCode: tt.fields.GetDiskResponseCode, + }, nil + }) + errorBuffer := &strings.Builder{} + ui := &packer.BasicUi{ + Reader: strings.NewReader(""), + Writer: ioutil.Discard, + ErrorWriter: errorBuffer, + } + + state := new(multistep.BasicStateBag) + state.Put("azureclient", &client.AzureClientSetMock{ + DisksClientMock: m, + }) + state.Put("ui", ui) + + if got := s.Run(context.TODO(), state); !reflect.DeepEqual(got, tt.want) { + t.Errorf("StepVerifySourceDisk.Run() = %v, want %v", got, tt.want) + } + if tt.errormatch != "" { + if !regexp.MustCompile(tt.errormatch).MatchString(errorBuffer.String()) { + t.Errorf("Expected the error output (%q) to match %q", errorBuffer.String(), tt.errormatch) + } + } + }) + } +} + +type uiThatRemebersErrors struct { + packer.Ui + LastError string +} From 066ae0aa0744129a761b4e208ea1e652d62f1b2f Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 2 Oct 2019 22:13:04 +0000 Subject: [PATCH 32/55] Documentation update --- ...zure.html.md.erb => azure-arm.html.md.erb} | 10 +- .../docs/builders/azure-chroot.html.md.erb | 101 ++++++++ .../source/docs/builders/azure-setup.html.md | 236 ------------------ website/source/docs/builders/azure.html.md | 101 ++++++++ website/source/layouts/docs.erb | 7 +- 5 files changed, 212 insertions(+), 243 deletions(-) rename website/source/docs/builders/{azure.html.md.erb => azure-arm.html.md.erb} (98%) create mode 100644 website/source/docs/builders/azure-chroot.html.md.erb delete mode 100644 website/source/docs/builders/azure-setup.html.md create mode 100644 website/source/docs/builders/azure.html.md diff --git a/website/source/docs/builders/azure.html.md.erb b/website/source/docs/builders/azure-arm.html.md.erb similarity index 98% rename from website/source/docs/builders/azure.html.md.erb rename to website/source/docs/builders/azure-arm.html.md.erb index e2c83f80b..b08e8506e 100644 --- a/website/source/docs/builders/azure.html.md.erb +++ b/website/source/docs/builders/azure-arm.html.md.erb @@ -1,8 +1,8 @@ --- description: 'Packer supports building VHDs in Azure Resource manager.' layout: docs -page_title: 'Azure - Builders' -sidebar_current: 'docs-builders-azure' +page_title: 'Azure arm - Builders' +sidebar_current: 'docs-builders-azure-arm' --- # Azure Resource Manager Builder @@ -23,7 +23,7 @@ VM from your build artifact. Azure uses a combination of OAuth and Active Directory to authorize requests to the ARM API. Learn how to [authorize access to -ARM](/docs/builders/azure-setup.html). +ARM](/docs/builders/azure.html#authentication-for-azure). The documentation below references command output from the [Azure CLI](https://azure.microsoft.com/en-us/documentation/articles/xplat-cli-install/). @@ -36,12 +36,12 @@ addition to the options listed here, a builder. ### Required options for authentication: -If you're running packer on an Azure VM with a [managed identity](/docs/builders/azure-setup.html#managed-identities-for-azure-resources) +If you're running packer on an Azure VM with a [managed identity](/docs/builders/azure.html#azure-managed-identity) you don't need to specify any additional configuration options. If you would like to use interactive user authentication, you should specify `subscription_id` only. Packer will use cached credentials or redirect you to a website to log in. -If you want to use a [service principal](/docs/builders/azure-setup.html#create-a-service-principal) +If you want to use a [service principal](/docs/builders/azure.html#azure-active-directory-service-principal) you should specify `subscription_id`, `client_id` and one of `client_secret`, `client_cert_path` or `client_jwt`. diff --git a/website/source/docs/builders/azure-chroot.html.md.erb b/website/source/docs/builders/azure-chroot.html.md.erb new file mode 100644 index 000000000..6a67888d9 --- /dev/null +++ b/website/source/docs/builders/azure-chroot.html.md.erb @@ -0,0 +1,101 @@ +--- +description: | + The azure-chroot Packer builder is able to create Azure Managed Images leveraging + a VM in Azure. +layout: docs +page_title: 'Azure chroot - Builders' +sidebar_current: 'docs-builders-azure-chroot' +--- + +# Azure Builder (chroot) + +Type: `azure-chroot` + +The `azure-chroot` builder is able to build Azure managed disk (MD) images. For +more information on managed disks, see [Azure Managed Disks Overview](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/managed-disks-overview). + +The difference between this builder and the `azure-arm` builder is that this +builder is able to build a managed disk image without launching an Azure VM +instance. This can dramatically speed up image builds. It also allows for more +deterministic image content and enables some capabilities that are not possible +with the `azure-arm` builder. + +> **This is an advanced builder** If you're just getting started with Packer, +it is recommend to start with the [azure-arm builder](/docs/builders/azure-arm.html), +which is much easier to use. + +## How Does it Work? + +This builder works by creating a new MD from either an existing source or from +scratch and attaching it to the (already existing) Azure VM where Packer is +running. Once attached, a [chroot](https://en.wikipedia.org/wiki/Chroot) is set +up and made available to the provisioners. After provisioning, the MD is +detached, snapshotted and a MD image is created. + +Using this process, minutes can be shaved off the image creation process +because Packer does not need to launch a VM instance. + +There are some restrictions however: +* The host system must be a similar system (generally the same OS version, + kernel versions, etc.) as the image being built. +* If the source is a user image or managed disk, it must be made available in + the same region as the host system. ([name=Paul Meyer]: we could work around + this restriction by doing a cross-region copy, but that takes away the main + speed advantage.) +* The host system SKU has to allow for all of the specified disks to be + attached. + +## Configuration Reference + +There are many configuration options available for the builder. We'll start +withauthentication parameters, then go over the builder specific options. + +### Authentication options +The Azure builders [share this configuration](/docs/builders/azure.html). + +<%= partial "partials/builder/azure/common/client/_Config-not-required.html" %> + +### Builder specific options + +#### Required: +<%= partial "partials/builder/azure/chroot/_Config-required.html" %> + +#### Optional: +<%= partial "partials/builder/azure/chroot/_Config-not-required.html" %> + +## Chroot Mounts + +The `chroot_mounts` configuration can be used to mount specific devices within +the chroot. By default, the following additional mounts are added into the +chroot by Packer: + +- `/proc` (proc) +- `/sys` (sysfs) +- `/dev` (bind to real `/dev`) +- `/dev/pts` (devpts) +- `/proc/sys/fs/binfmt_misc` (binfmt\_misc) + +These default mounts are usually good enough for anyone and are sane defaults. +However, if you want to change or add the mount points, you may using the +`chroot_mounts` configuration. Here is an example configuration which only +mounts `/prod` and `/dev`: + +``` json +{ + "chroot_mounts": [ + ["proc", "proc", "/proc"], + ["bind", "/dev", "/dev"] + ] +} +``` + +`chroot_mounts` is a list of a 3-tuples of strings. The three components of the +3-tuple, in order, are: + +- The filesystem type. If this is "bind", then Packer will properly bind the + filesystem to another mount point. + +- The source device. + +- The mount directory. + diff --git a/website/source/docs/builders/azure-setup.html.md b/website/source/docs/builders/azure-setup.html.md deleted file mode 100644 index ee73b1715..000000000 --- a/website/source/docs/builders/azure-setup.html.md +++ /dev/null @@ -1,236 +0,0 @@ ---- -description: | - In order to build VMs in Azure, Packer needs various configuration options. - These options and how to obtain them are documented on this page. -layout: docs -page_title: 'Setup - Azure - Builders' -sidebar_current: 'docs-builders-azure-setup' ---- - -# Authorizing Packer Builds in Azure - -In order to build VMs in Azure, Packer needs 6 configuration options to be -specified: - -- `subscription_id` - UUID identifying your Azure subscription (where billing - is handled) - -- `client_id` - UUID identifying the Active Directory service principal that - will run your Packer builds - -- `client_secret` - service principal secret / password - -- `resource_group_name` - name of the resource group where your VHD(s) will - be stored - -- `storage_account` - name of the storage account where your VHD(s) will be - stored - --> Behind the scenes, Packer uses the OAuth protocol to authenticate against -Azure Active Directory and authorize requests to the Azure Service Management -API. These topics are unnecessarily complicated, so we will try to ignore them -for the rest of this document.

You do not need to understand how -OAuth works in order to use Packer with Azure, though the Active Directory -terms "service principal" and "role" will be useful for understanding Azure's -access policies. - -In order to get all of the items above, you will need a username and password -for your Azure account. - -## Device Login - -Device login is an alternative way to authorize in Azure Packer. Device login -only requires you to know your Subscription ID. (Device login is only supported -for Linux based VMs.) Device login is intended for those who are first time -users, and just want to ''kick the tires.'' We recommend the SPN approach if -you intend to automate Packer. - -> Device login is for **interactive** builds, and SPN is for **automated** builds. - -There are three pieces of information you must provide to enable device login -mode: - -1. SubscriptionID -2. Resource Group - parent resource group that Packer uses to build an image. -3. Storage Account - storage account where the image will be placed. - -> Device login mode is enabled by not setting client\_id and client\_secret. - -> Device login mode is for the Public and US Gov clouds only. - -The device login flow asks that you open a web browser, navigate to -http://aka.ms/devicelogin, -and input the supplied code. This authorizes the Packer for Azure application -to act on your behalf. An OAuth token will be created, and stored in the user's -home directory (\~/.azure/packer/oauth-TenantID.json). This token is used if -the token file exists, and it is refreshed as necessary. The token file -prevents the need to continually execute the device login flow. Packer will ask -for two device login auth, one for service management endpoint and another for -accessing temp keyvault secrets that it creates. - -## Managed identities for Azure resources - --> Managed identities for Azure resources is the new name for the service -formerly known as Managed Service Identity (MSI). - -Managed identities is an alternative way to authorize in Azure Packer. Managed -identities for Azure resources are automatically managed by Azure and enable -you to authenticate to services that support Azure AD authentication without -needing to insert credentials into your buildfile. Navigate to -managed identities azure resources overview to learn more about -this feature. - -This feature will be used when no `subscription_id`, `client_id` or -`client_secret` is set in your buildfile. - -## Install the Azure CLI - -To get the credentials above, we will need to install the Azure CLI. Please -refer to Microsoft's official [installation -guide](https://azure.microsoft.com/en-us/documentation/articles/xplat-cli-install/). - -The guides below use [JMESPath](http://jmespath.org/) queries to select and reformat output from the AZ CLI commands. JMESPath is [part of the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/query-azure-cli?view=azure-cli-latest) and can be used in the same way as the `jq` tool. - -## Guided Setup - -The Packer project includes a [setup -script](https://github.com/hashicorp/packer/blob/master/contrib/azure-setup.sh) -that can help you setup your account. It uses an interactive bash script to log -you into Azure, name your resources, and export your Packer configuration. - -## Manual Setup - -If you want more control, or the script does not work for you, you can also use -the manual instructions below to setup your Azure account. You will need to -manually keep track of the various account identifiers, resource names, and -your service principal password. - -### Identify Your Tenant and Subscription IDs - -Login using the Azure CLI - -``` shell -$ az login -# Note, we have launched a browser for you to login. For old experience with device code, use "az login --use-device-code" -``` - -Once you've completed logging in, you should get a JSON array like the one -below: - -``` shell -[ - { - "cloudName": "AzureCloud", - "id": "$uuid", - "isDefault": false, - "name": "Pay-As-You-Go", - "state": "Enabled", - "tenantId": "$tenant_uuid", - "user": { - "name": "my_email@anywhere.com", - "type": "user" - } - } -] -``` - -Get your account information - -``` shell -$ az account list --output table --query '[].{Name:name,subscription_id:id}' -$ az account set --subscription ACCOUNTNAME -$ az account show --output json --query 'id' -``` - -This will print out one line that look like this: - - 4f562e88-8caf-421a-b4da-e3f6786c52ec - -This is your `subscription_id`. Note it for later. - -### Create a Resource Group - -A [resource -group](https://azure.microsoft.com/en-us/documentation/articles/resource-group-overview/#resource-groups) -is used to organize related resources. Resource groups and storage accounts are -tied to a location. To see available locations, run: - -``` shell -$ az account list-locations -$ LOCATION=xxx -$ GROUPNAME=xxx -# ... - -$ az group create --name $GROUPNAME --location $LOCATION -``` - -Your storage account (below) will need to use the same `GROUPNAME` and -`LOCATION`. - -### Create a Storage Account - -We will need to create a storage account where your Packer artifacts will be -stored. We will create a `LRS` storage account which is the least expensive -price/GB at the time of writing. - -``` shell -$ az storage account create \ - --name STORAGENAME - --resource-group $GROUPNAME \ - --location $LOCATION \ - --sku Standard_LRS \ - --kind Storage -``` - --> `LRS` and `Standard_LRS` are meant as literal "LRS" or "Standard\_LRS" -and not as variables. - -Make sure that `GROUPNAME` and `LOCATION` are the same as above. Also, ensure -that `GROUPNAME` is less than 24 characters long and contains only lowercase -letters and numbers. - -### Create a Service Principal - -A service principal acts on behalf of an application (Packer) on your Azure -subscription. To create an application and service principal for use with -Packer, run the below command specifying the subscription. This will grant -Packer the contributor role to the subscription. -The output of this command is your service principal credentials, save these in -a safe place as you will need these to configure Packer. - -``` shell -az ad sp create-for-rbac -n "Packer" --role contributor \ - --scopes /subscriptions/{SubID} -``` - -The service principal credentials. - -``` shell -{ - "appId": "AppId", - "displayName": "Packer", - "name": "http://Packer", - "password": "Password", - "tenant": "TenantId" -} -``` - -There are a lot of pre-defined roles and you can define your own with more -granular permissions, though this is out of scope. You can see a list of -pre-configured roles via: - -``` shell -$ az role definition list --output table --query '[].{name:roleName, description:description}' -``` - -If you would rather use a certificate to autenticate your service principal, -please follow the [Azure Active Directory documentation](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-certificate-credentials#register-your-certificate-with-azure-ad). - -### Configuring Packer - -Now (finally) everything has been setup in Azure and our service principal has -been created. You can use the output from creating your service principal in -your template. Use the value from the `appId` field above as a value for -`client_id` in your configuration and set `client_secret` to the `password` -value from above. diff --git a/website/source/docs/builders/azure.html.md b/website/source/docs/builders/azure.html.md new file mode 100644 index 000000000..62bfdb25b --- /dev/null +++ b/website/source/docs/builders/azure.html.md @@ -0,0 +1,101 @@ +--- +description: | + Packer is able to create Azure VM images. To achieve this, Packer comes with + multiple builders depending on the strategy you want to use to build the images. +layout: docs +page_title: 'Azure images - Builders' +sidebar_current: 'docs-builders-azure' +--- + +# Azure Virtual Machine Image Builders + +Packer can create Azure virtual machine images through variety of ways +depending on the strategy that you want to use for building the images. +Packer supports the following builders for Azure images at the moment: + +- [azure-arm](/docs/builders/azure-arm.html) - Uses Azure Resource + Manager (ARM) to launch a virtual machine (VM) from which a new image is + captured after provisioning. If in doubt, use this builder; it is the + easiest builder to get started with. + +- [azure-chroot](/docs/builders/azure-chroot.html) - Uses ARM to create + a managed disk that is attached to an existing Azure VM that Packer is + running on. Provisioning leverages [Chroot](https://en.wikipedia.org/wiki/Chroot) + environment. After provisioning, the disk is detached an image is created + from this disk. This is an **advanced builder and should not be used by + newcomers**. However, it is also the fastest way to build a VM image in + Azure. + +-> **Don't know which builder to use?** If in doubt, use the [azure-arm +builder](/docs/builders/azure-arm.html). It is much easier to use. + +# Authentication for Azure + +The Packer Azure builders provide a couple of ways to authenticate to Azure. The +following methods are available and are explained below: + +- Azure Active Directory interactive login. Interactive login is available + for the Public and US Gov clouds only. +- Azure Managed Identity +- Azure Active Directory Service Principal + +-> **Don't know which authentication method to use?** Go with interactive +login to try out the builders. If you need packer to run automatically, +switch to using a Service Principal or Managed Identity. + +No matter which method you choose, the identity you use will need the +appropriate permissions on Azure resources for Packer to operate. The minimal +set of permissions is highly dependent on the builder and its configuration. +An easy way to get started is to assign the identity the `Contributor` role at +the subscription level. + +## Azure Active Directory interactive login + +If your organization allows it, you can use a command line interactive login +method based on oAuth 'device code flow'. Packer will select this method when +you only specify a `subscription_id` in your builder configuration. When you +run Packer, it will ask you to visit a web site and input a code. This web site +will then authenticate you, satisfying any two-factor authentication policies +that your organization might have. The tokens are cached under the `.azure/packer` +directory in your home directory and will be reused if they are still valid +on subsequent runs. + +## Azure Managed Identity + +Azure provides the option to assign an identity to a virtual machine ([Azure +documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)). Packer can +use a system assigned identity for a VM where Packer is running to orchestrate +Azure API's. This is the default behavior and requires no configuration +properties to be set. It does, however, require that you run Packer on an +Azure VM. + +To enable this method, [let Azure assign a system-assigned identity to your VM](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm). +Then, [grant your VM access to the appropriate resources](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/howto-assign-access-portal). +To get started, try assigning the `Contributor` role at the subscription level to +your VM. Then, when you discover your exact scenario, scope the permissions +appropriately or isolate Packer builds in a separate subscription. + +## Azure Active Directory Service Principal + +Azure Active Directory models service accounts as 'Service Principal' (SP) +objects. An SP represents an application accessing your Azure resources. It +is identified by a client ID (aka application ID) and can use a password or a +certificate to authenticate. To use a Service Principal, specify the +`subscription_id` and `client_id`, as well as either `client_secret`, +`client_cert_path` or `client_jwt`. Each of these last three represent a different +way to authenticate the SP to AAD: + +- `client_secret` - allows the user to provide a password/secret registered + for the AAD SP. +- `client_cert_path` - allows usage of a certificate to be used to + authenticate as the specified AAD SP. +- `client_jwt` - For advanced scenario's where the used cannot provide Packer + the full certificate, they can provide a JWT bearer token for client auth + (RFC 7523, Sec. 2.2). These bearer tokens are created and signed using a + certificate registered in AAD and have a user-chosen expiry time, limiting + the validity of the token. This is also the underlying mechanism used to + authenticate when using `client_cert_path`. + +To create a service principal, you can follow [the Azure documentation on this +subject](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli?view=azure-cli-latest). + diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 6593ae9f9..878c6653a 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -94,8 +94,11 @@ > Azure From e05d6a223e68ffc8f13e785f2c9834bca0114204 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 2 Oct 2019 22:35:16 +0000 Subject: [PATCH 33/55] fixup! Documentation update --- website/source/netlify-redirects | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/netlify-redirects b/website/source/netlify-redirects index 04652a4e2..ef0074a7d 100644 --- a/website/source/netlify-redirects +++ b/website/source/netlify-redirects @@ -7,7 +7,7 @@ /docs/command-line/machine-readable.html /docs/commands/index.html /docs/command-line/introduction.html /docs/commands/index.html /docs/templates/introduction.html /docs/templates/index.html -/docs/builders/azure-arm.html /docs/builders/azure.html +/docs/builders/azure-setup.html /docs/builders/azure.html /docs/templates/veewee-to-packer.html /guides/veewee-to-packer.html /docs/extend/developing-plugins.html /docs/extending/plugins.html /docs/extending/developing-plugins.html /docs/extending/plugins.html From 8c73450f3dc35df561a4d400a6305849c92210ca Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 2 Oct 2019 22:44:15 +0000 Subject: [PATCH 34/55] fixup! Add documentation to config struct --- builder/azure/chroot/builder.go | 2 +- .../partials/builder/azure/chroot/_Config-required.html.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index c476b2fd1..e72507441 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -37,7 +37,7 @@ type Config struct { // When set to `true`, starts with an empty, unpartitioned disk. Defaults to `false`. FromScratch bool `mapstructure:"from_scratch"` - // Either a managed disk resourced ID or a publisher:offer:sku:version specifier for plaform image sources. + // Either a managed disk resource ID or a publisher:offer:sku:version specifier for plaform image sources. Source string `mapstructure:"source" required:"true"` sourceType sourceType diff --git a/website/source/partials/builder/azure/chroot/_Config-required.html.md b/website/source/partials/builder/azure/chroot/_Config-required.html.md index e24734560..8402b8c73 100644 --- a/website/source/partials/builder/azure/chroot/_Config-required.html.md +++ b/website/source/partials/builder/azure/chroot/_Config-required.html.md @@ -1,6 +1,6 @@ -- `source` (string) - Either a managed disk resourced ID or a publisher:offer:sku:version specifier for plaform image sources. +- `source` (string) - Either a managed disk resource ID or a publisher:offer:sku:version specifier for plaform image sources. - `image_resource_id` (string) - The image to create using this build. \ No newline at end of file From b9c726417f8540d456ebe69c66d09ff848f5d99d Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 2 Oct 2019 23:34:02 +0000 Subject: [PATCH 35/55] update modules --- go.mod | 1 + go.sum | 2 ++ 2 files changed, 3 insertions(+) diff --git a/go.mod b/go.mod index 327a88162..e75f6325c 100644 --- a/go.mod +++ b/go.mod @@ -34,6 +34,7 @@ require ( github.com/digitalocean/go-libvirt v0.0.0-20190626172931-4d226dd6c437 // indirect github.com/digitalocean/go-qemu v0.0.0-20181112162955-dd7bb9c771b8 github.com/digitalocean/godo v1.11.1 + github.com/dimchansky/utfbom v1.1.0 // indirect github.com/dnaeon/go-vcr v1.0.0 // indirect github.com/docker/docker v0.0.0-20180422163414-57142e89befe // indirect github.com/dustin/go-humanize v1.0.0 // indirect diff --git a/go.sum b/go.sum index 6735fbbb6..e3e08c8ce 100644 --- a/go.sum +++ b/go.sum @@ -93,6 +93,8 @@ github.com/digitalocean/go-qemu v0.0.0-20181112162955-dd7bb9c771b8 h1:N7nH2py78L github.com/digitalocean/go-qemu v0.0.0-20181112162955-dd7bb9c771b8/go.mod h1:/YnlngP1PARC0SKAZx6kaAEMOp8bNTQGqS+Ka3MctNI= github.com/digitalocean/godo v1.11.1 h1:OsTh37YFKk+g6DnAOrkXJ9oDArTkRx5UTkBJ2EWAO38= github.com/digitalocean/godo v1.11.1/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnaeon/go-vcr v1.0.0 h1:1QZ+ahihvRvppcJnFvuoHAdnZTf1PqKjO4Ftr1cfQTo= github.com/dnaeon/go-vcr v1.0.0/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/docker v0.0.0-20180422163414-57142e89befe h1:VW8TnWi0CZgg7oCv0wH6evNwkzcJg/emnw4HrVIWws4= From cf8688ec40b405489b19cb20a07f37117e5235db Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 2 Oct 2019 23:41:22 +0000 Subject: [PATCH 36/55] doc updates after Dan's review --- .../docs/builders/azure-chroot.html.md.erb | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/website/source/docs/builders/azure-chroot.html.md.erb b/website/source/docs/builders/azure-chroot.html.md.erb index 6a67888d9..ab5af6cb8 100644 --- a/website/source/docs/builders/azure-chroot.html.md.erb +++ b/website/source/docs/builders/azure-chroot.html.md.erb @@ -29,8 +29,8 @@ which is much easier to use. This builder works by creating a new MD from either an existing source or from scratch and attaching it to the (already existing) Azure VM where Packer is running. Once attached, a [chroot](https://en.wikipedia.org/wiki/Chroot) is set -up and made available to the provisioners. After provisioning, the MD is -detached, snapshotted and a MD image is created. +up and made available to the [provisioners](/docs/provisioners/index.html). +After provisioning, the MD is detached, snapshotted and a MD image is created. Using this process, minutes can be shaved off the image creation process because Packer does not need to launch a VM instance. @@ -38,24 +38,26 @@ because Packer does not need to launch a VM instance. There are some restrictions however: * The host system must be a similar system (generally the same OS version, kernel versions, etc.) as the image being built. -* If the source is a user image or managed disk, it must be made available in - the same region as the host system. ([name=Paul Meyer]: we could work around - this restriction by doing a cross-region copy, but that takes away the main - speed advantage.) +* If the source is a managed disk, it must be made available in the same + region as the host system. * The host system SKU has to allow for all of the specified disks to be attached. ## Configuration Reference There are many configuration options available for the builder. We'll start -withauthentication parameters, then go over the builder specific options. +with authentication parameters, then go over the Azure chroot builder specific +options. ### Authentication options -The Azure builders [share this configuration](/docs/builders/azure.html). +None of the authentication options are required, but depending on which +ones are specified a different authentication method may be used. See the +[shared Azure builders documentation](/docs/builders/azure.html) for more +information. <%= partial "partials/builder/azure/common/client/_Config-not-required.html" %> -### Builder specific options +### Azure chroot builder specific options #### Required: <%= partial "partials/builder/azure/chroot/_Config-required.html" %> From 37931c551f866c1dc0f7349379870965a5727358 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 2 Oct 2019 23:52:55 +0000 Subject: [PATCH 37/55] add example --- examples/azure/debian-chroot.json | 26 +++++++++++++ .../docs/builders/azure-chroot.html.md.erb | 37 +++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 examples/azure/debian-chroot.json diff --git a/examples/azure/debian-chroot.json b/examples/azure/debian-chroot.json new file mode 100644 index 000000000..4b29632b1 --- /dev/null +++ b/examples/azure/debian-chroot.json @@ -0,0 +1,26 @@ +{ + "variables": { + "client_id": "{{env `ARM_CLIENT_ID`}}", + "client_secret": "{{env `ARM_CLIENT_SECRET`}}", + "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}" + }, + "builders": [{ + "type": "azure-chroot", + + "client_id": "{{user `client_id`}}", + "client_secret": "{{user `client_secret`}}", + "subscription_id": "{{user `subscription_id`}}", + + "image_resource_id": "/subscriptions/{{user `subscription_id`}}/resourceGroups/{{user `resource_group`}}/providers/Microsoft.Compute/images/MyDebianOSImage-{{timestamp}}", + + "source": "credativ:Debian:9:latest" + }], + "provisioners": [{ + "inline": [ + "apt-get update", + "apt-get upgrade -y" + ], + "inline_shebang": "/bin/sh -x", + "type": "shell" + }] +} diff --git a/website/source/docs/builders/azure-chroot.html.md.erb b/website/source/docs/builders/azure-chroot.html.md.erb index ab5af6cb8..71b488deb 100644 --- a/website/source/docs/builders/azure-chroot.html.md.erb +++ b/website/source/docs/builders/azure-chroot.html.md.erb @@ -101,3 +101,40 @@ mounts `/prod` and `/dev`: - The mount directory. +## Example +Here is an example that creates a Debian image with updated packages. Specify +all environment variables (`ARM_CLIENT_ID`, `ARM_CLIENT_SECRET`, +`ARM_SUBSCRIPTION_ID`) to use a service principal, specify only `ARM_SUBSCRIPTION_ID` +to use interactive login or leave them empty to use the system-assigned identity +of the VM you run Packer on. +The identity you choose should have permission to create disks and images and also +to update your VM. + +``` json +{ + "variables": { + "client_id": "{{env `ARM_CLIENT_ID`}}", + "client_secret": "{{env `ARM_CLIENT_SECRET`}}", + "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}" + }, + "builders": [{ + "type": "azure-chroot", + + "client_id": "{{user `client_id`}}", + "client_secret": "{{user `client_secret`}}", + "subscription_id": "{{user `subscription_id`}}", + + "image_resource_id": "/subscriptions/{{user `subscription_id`}}/resourceGroups/{{user `resource_group`}}/providers/Microsoft.Compute/images/MyDebianOSImage-{{timestamp}}", + + "source": "credativ:Debian:9:latest" + }], + "provisioners": [{ + "inline": [ + "apt-get update", + "apt-get upgrade -y" + ], + "inline_shebang": "/bin/sh -x", + "type": "shell" + }] +} +``` \ No newline at end of file From 0694f2635b4daba8d708daf9485036f514f22f82 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Thu, 3 Oct 2019 04:17:54 +0000 Subject: [PATCH 38/55] update vendor directory --- .../compute/mgmt/compute/computeapi/models.go | 46 + .../latest/compute/mgmt/compute/models.go | 1397 +++++++++++++++++ .../compute/computeapi/interfaces.go | 299 ++++ .../go-autorest/autorest/azure/auth/auth.go | 712 +++++++++ .../go-autorest/autorest/azure/cli/profile.go | 79 + .../go-autorest/autorest/azure/cli/token.go | 170 ++ .../github.com/dimchansky/utfbom/.gitignore | 37 + .../github.com/dimchansky/utfbom/.travis.yml | 18 + vendor/github.com/dimchansky/utfbom/LICENSE | 201 +++ vendor/github.com/dimchansky/utfbom/README.md | 66 + vendor/github.com/dimchansky/utfbom/go.mod | 1 + vendor/github.com/dimchansky/utfbom/utfbom.go | 192 +++ .../golang.org/x/crypto/pkcs12/bmp-string.go | 50 + vendor/golang.org/x/crypto/pkcs12/crypto.go | 131 ++ vendor/golang.org/x/crypto/pkcs12/errors.go | 23 + .../x/crypto/pkcs12/internal/rc2/rc2.go | 271 ++++ vendor/golang.org/x/crypto/pkcs12/mac.go | 45 + vendor/golang.org/x/crypto/pkcs12/pbkdf.go | 170 ++ vendor/golang.org/x/crypto/pkcs12/pkcs12.go | 349 ++++ vendor/golang.org/x/crypto/pkcs12/safebags.go | 57 + vendor/modules.txt | 9 + 21 files changed, 4323 insertions(+) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/computeapi/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/computeapi/interfaces.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go create mode 100644 vendor/github.com/dimchansky/utfbom/.gitignore create mode 100644 vendor/github.com/dimchansky/utfbom/.travis.yml create mode 100644 vendor/github.com/dimchansky/utfbom/LICENSE create mode 100644 vendor/github.com/dimchansky/utfbom/README.md create mode 100644 vendor/github.com/dimchansky/utfbom/go.mod create mode 100644 vendor/github.com/dimchansky/utfbom/utfbom.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/bmp-string.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/crypto.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/errors.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/mac.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pbkdf.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pkcs12.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/safebags.go diff --git a/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/computeapi/models.go b/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/computeapi/models.go new file mode 100644 index 000000000..2a5892f86 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/computeapi/models.go @@ -0,0 +1,46 @@ +// +build go1.9 + +// Copyright 2019 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This code was auto-generated by: +// github.com/Azure/azure-sdk-for-go/tools/profileBuilder + +package computeapi + +import original "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/computeapi" + +type AvailabilitySetsClientAPI = original.AvailabilitySetsClientAPI +type ContainerServicesClientAPI = original.ContainerServicesClientAPI +type DisksClientAPI = original.DisksClientAPI +type GalleriesClientAPI = original.GalleriesClientAPI +type GalleryImageVersionsClientAPI = original.GalleryImageVersionsClientAPI +type GalleryImagesClientAPI = original.GalleryImagesClientAPI +type ImagesClientAPI = original.ImagesClientAPI +type LogAnalyticsClientAPI = original.LogAnalyticsClientAPI +type OperationsClientAPI = original.OperationsClientAPI +type ProximityPlacementGroupsClientAPI = original.ProximityPlacementGroupsClientAPI +type ResourceSkusClientAPI = original.ResourceSkusClientAPI +type SnapshotsClientAPI = original.SnapshotsClientAPI +type UsageClientAPI = original.UsageClientAPI +type VirtualMachineExtensionImagesClientAPI = original.VirtualMachineExtensionImagesClientAPI +type VirtualMachineExtensionsClientAPI = original.VirtualMachineExtensionsClientAPI +type VirtualMachineImagesClientAPI = original.VirtualMachineImagesClientAPI +type VirtualMachineRunCommandsClientAPI = original.VirtualMachineRunCommandsClientAPI +type VirtualMachineScaleSetExtensionsClientAPI = original.VirtualMachineScaleSetExtensionsClientAPI +type VirtualMachineScaleSetRollingUpgradesClientAPI = original.VirtualMachineScaleSetRollingUpgradesClientAPI +type VirtualMachineScaleSetVMsClientAPI = original.VirtualMachineScaleSetVMsClientAPI +type VirtualMachineScaleSetsClientAPI = original.VirtualMachineScaleSetsClientAPI +type VirtualMachineSizesClientAPI = original.VirtualMachineSizesClientAPI +type VirtualMachinesClientAPI = original.VirtualMachinesClientAPI diff --git a/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/models.go new file mode 100644 index 000000000..cc724490f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/models.go @@ -0,0 +1,1397 @@ +// +build go1.9 + +// Copyright 2019 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This code was auto-generated by: +// github.com/Azure/azure-sdk-for-go/tools/profileBuilder + +package compute + +import ( + "context" + + original "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" +) + +const ( + DefaultBaseURI = original.DefaultBaseURI +) + +type AccessLevel = original.AccessLevel + +const ( + None AccessLevel = original.None + Read AccessLevel = original.Read + Write AccessLevel = original.Write +) + +type AggregatedReplicationState = original.AggregatedReplicationState + +const ( + Completed AggregatedReplicationState = original.Completed + Failed AggregatedReplicationState = original.Failed + InProgress AggregatedReplicationState = original.InProgress + Unknown AggregatedReplicationState = original.Unknown +) + +type AvailabilitySetSkuTypes = original.AvailabilitySetSkuTypes + +const ( + Aligned AvailabilitySetSkuTypes = original.Aligned + Classic AvailabilitySetSkuTypes = original.Classic +) + +type CachingTypes = original.CachingTypes + +const ( + CachingTypesNone CachingTypes = original.CachingTypesNone + CachingTypesReadOnly CachingTypes = original.CachingTypesReadOnly + CachingTypesReadWrite CachingTypes = original.CachingTypesReadWrite +) + +type ComponentNames = original.ComponentNames + +const ( + MicrosoftWindowsShellSetup ComponentNames = original.MicrosoftWindowsShellSetup +) + +type ContainerServiceOrchestratorTypes = original.ContainerServiceOrchestratorTypes + +const ( + Custom ContainerServiceOrchestratorTypes = original.Custom + DCOS ContainerServiceOrchestratorTypes = original.DCOS + Kubernetes ContainerServiceOrchestratorTypes = original.Kubernetes + Swarm ContainerServiceOrchestratorTypes = original.Swarm +) + +type ContainerServiceVMSizeTypes = original.ContainerServiceVMSizeTypes + +const ( + StandardA0 ContainerServiceVMSizeTypes = original.StandardA0 + StandardA1 ContainerServiceVMSizeTypes = original.StandardA1 + StandardA10 ContainerServiceVMSizeTypes = original.StandardA10 + StandardA11 ContainerServiceVMSizeTypes = original.StandardA11 + StandardA2 ContainerServiceVMSizeTypes = original.StandardA2 + StandardA3 ContainerServiceVMSizeTypes = original.StandardA3 + StandardA4 ContainerServiceVMSizeTypes = original.StandardA4 + StandardA5 ContainerServiceVMSizeTypes = original.StandardA5 + StandardA6 ContainerServiceVMSizeTypes = original.StandardA6 + StandardA7 ContainerServiceVMSizeTypes = original.StandardA7 + StandardA8 ContainerServiceVMSizeTypes = original.StandardA8 + StandardA9 ContainerServiceVMSizeTypes = original.StandardA9 + StandardD1 ContainerServiceVMSizeTypes = original.StandardD1 + StandardD11 ContainerServiceVMSizeTypes = original.StandardD11 + StandardD11V2 ContainerServiceVMSizeTypes = original.StandardD11V2 + StandardD12 ContainerServiceVMSizeTypes = original.StandardD12 + StandardD12V2 ContainerServiceVMSizeTypes = original.StandardD12V2 + StandardD13 ContainerServiceVMSizeTypes = original.StandardD13 + StandardD13V2 ContainerServiceVMSizeTypes = original.StandardD13V2 + StandardD14 ContainerServiceVMSizeTypes = original.StandardD14 + StandardD14V2 ContainerServiceVMSizeTypes = original.StandardD14V2 + StandardD1V2 ContainerServiceVMSizeTypes = original.StandardD1V2 + StandardD2 ContainerServiceVMSizeTypes = original.StandardD2 + StandardD2V2 ContainerServiceVMSizeTypes = original.StandardD2V2 + StandardD3 ContainerServiceVMSizeTypes = original.StandardD3 + StandardD3V2 ContainerServiceVMSizeTypes = original.StandardD3V2 + StandardD4 ContainerServiceVMSizeTypes = original.StandardD4 + StandardD4V2 ContainerServiceVMSizeTypes = original.StandardD4V2 + StandardD5V2 ContainerServiceVMSizeTypes = original.StandardD5V2 + StandardDS1 ContainerServiceVMSizeTypes = original.StandardDS1 + StandardDS11 ContainerServiceVMSizeTypes = original.StandardDS11 + StandardDS12 ContainerServiceVMSizeTypes = original.StandardDS12 + StandardDS13 ContainerServiceVMSizeTypes = original.StandardDS13 + StandardDS14 ContainerServiceVMSizeTypes = original.StandardDS14 + StandardDS2 ContainerServiceVMSizeTypes = original.StandardDS2 + StandardDS3 ContainerServiceVMSizeTypes = original.StandardDS3 + StandardDS4 ContainerServiceVMSizeTypes = original.StandardDS4 + StandardG1 ContainerServiceVMSizeTypes = original.StandardG1 + StandardG2 ContainerServiceVMSizeTypes = original.StandardG2 + StandardG3 ContainerServiceVMSizeTypes = original.StandardG3 + StandardG4 ContainerServiceVMSizeTypes = original.StandardG4 + StandardG5 ContainerServiceVMSizeTypes = original.StandardG5 + StandardGS1 ContainerServiceVMSizeTypes = original.StandardGS1 + StandardGS2 ContainerServiceVMSizeTypes = original.StandardGS2 + StandardGS3 ContainerServiceVMSizeTypes = original.StandardGS3 + StandardGS4 ContainerServiceVMSizeTypes = original.StandardGS4 + StandardGS5 ContainerServiceVMSizeTypes = original.StandardGS5 +) + +type DiffDiskOptions = original.DiffDiskOptions + +const ( + Local DiffDiskOptions = original.Local +) + +type DiskCreateOption = original.DiskCreateOption + +const ( + Attach DiskCreateOption = original.Attach + Copy DiskCreateOption = original.Copy + Empty DiskCreateOption = original.Empty + FromImage DiskCreateOption = original.FromImage + Import DiskCreateOption = original.Import + Restore DiskCreateOption = original.Restore + Upload DiskCreateOption = original.Upload +) + +type DiskCreateOptionTypes = original.DiskCreateOptionTypes + +const ( + DiskCreateOptionTypesAttach DiskCreateOptionTypes = original.DiskCreateOptionTypesAttach + DiskCreateOptionTypesEmpty DiskCreateOptionTypes = original.DiskCreateOptionTypesEmpty + DiskCreateOptionTypesFromImage DiskCreateOptionTypes = original.DiskCreateOptionTypesFromImage +) + +type DiskState = original.DiskState + +const ( + ActiveSAS DiskState = original.ActiveSAS + ActiveUpload DiskState = original.ActiveUpload + Attached DiskState = original.Attached + ReadyToUpload DiskState = original.ReadyToUpload + Reserved DiskState = original.Reserved + Unattached DiskState = original.Unattached +) + +type DiskStorageAccountTypes = original.DiskStorageAccountTypes + +const ( + PremiumLRS DiskStorageAccountTypes = original.PremiumLRS + StandardLRS DiskStorageAccountTypes = original.StandardLRS + StandardSSDLRS DiskStorageAccountTypes = original.StandardSSDLRS + UltraSSDLRS DiskStorageAccountTypes = original.UltraSSDLRS +) + +type HostCaching = original.HostCaching + +const ( + HostCachingNone HostCaching = original.HostCachingNone + HostCachingReadOnly HostCaching = original.HostCachingReadOnly + HostCachingReadWrite HostCaching = original.HostCachingReadWrite +) + +type HyperVGeneration = original.HyperVGeneration + +const ( + V1 HyperVGeneration = original.V1 + V2 HyperVGeneration = original.V2 +) + +type HyperVGenerationTypes = original.HyperVGenerationTypes + +const ( + HyperVGenerationTypesV1 HyperVGenerationTypes = original.HyperVGenerationTypesV1 + HyperVGenerationTypesV2 HyperVGenerationTypes = original.HyperVGenerationTypesV2 +) + +type IPVersion = original.IPVersion + +const ( + IPv4 IPVersion = original.IPv4 + IPv6 IPVersion = original.IPv6 +) + +type InstanceViewTypes = original.InstanceViewTypes + +const ( + InstanceView InstanceViewTypes = original.InstanceView +) + +type IntervalInMins = original.IntervalInMins + +const ( + FiveMins IntervalInMins = original.FiveMins + SixtyMins IntervalInMins = original.SixtyMins + ThirtyMins IntervalInMins = original.ThirtyMins + ThreeMins IntervalInMins = original.ThreeMins +) + +type MaintenanceOperationResultCodeTypes = original.MaintenanceOperationResultCodeTypes + +const ( + MaintenanceOperationResultCodeTypesMaintenanceAborted MaintenanceOperationResultCodeTypes = original.MaintenanceOperationResultCodeTypesMaintenanceAborted + MaintenanceOperationResultCodeTypesMaintenanceCompleted MaintenanceOperationResultCodeTypes = original.MaintenanceOperationResultCodeTypesMaintenanceCompleted + MaintenanceOperationResultCodeTypesNone MaintenanceOperationResultCodeTypes = original.MaintenanceOperationResultCodeTypesNone + MaintenanceOperationResultCodeTypesRetryLater MaintenanceOperationResultCodeTypes = original.MaintenanceOperationResultCodeTypesRetryLater +) + +type OperatingSystemStateTypes = original.OperatingSystemStateTypes + +const ( + Generalized OperatingSystemStateTypes = original.Generalized + Specialized OperatingSystemStateTypes = original.Specialized +) + +type OperatingSystemTypes = original.OperatingSystemTypes + +const ( + Linux OperatingSystemTypes = original.Linux + Windows OperatingSystemTypes = original.Windows +) + +type PassNames = original.PassNames + +const ( + OobeSystem PassNames = original.OobeSystem +) + +type ProtocolTypes = original.ProtocolTypes + +const ( + HTTP ProtocolTypes = original.HTTP + HTTPS ProtocolTypes = original.HTTPS +) + +type ProvisioningState = original.ProvisioningState + +const ( + ProvisioningStateCreating ProvisioningState = original.ProvisioningStateCreating + ProvisioningStateDeleting ProvisioningState = original.ProvisioningStateDeleting + ProvisioningStateFailed ProvisioningState = original.ProvisioningStateFailed + ProvisioningStateMigrating ProvisioningState = original.ProvisioningStateMigrating + ProvisioningStateSucceeded ProvisioningState = original.ProvisioningStateSucceeded + ProvisioningStateUpdating ProvisioningState = original.ProvisioningStateUpdating +) + +type ProvisioningState1 = original.ProvisioningState1 + +const ( + ProvisioningState1Creating ProvisioningState1 = original.ProvisioningState1Creating + ProvisioningState1Deleting ProvisioningState1 = original.ProvisioningState1Deleting + ProvisioningState1Failed ProvisioningState1 = original.ProvisioningState1Failed + ProvisioningState1Migrating ProvisioningState1 = original.ProvisioningState1Migrating + ProvisioningState1Succeeded ProvisioningState1 = original.ProvisioningState1Succeeded + ProvisioningState1Updating ProvisioningState1 = original.ProvisioningState1Updating +) + +type ProvisioningState2 = original.ProvisioningState2 + +const ( + ProvisioningState2Creating ProvisioningState2 = original.ProvisioningState2Creating + ProvisioningState2Deleting ProvisioningState2 = original.ProvisioningState2Deleting + ProvisioningState2Failed ProvisioningState2 = original.ProvisioningState2Failed + ProvisioningState2Migrating ProvisioningState2 = original.ProvisioningState2Migrating + ProvisioningState2Succeeded ProvisioningState2 = original.ProvisioningState2Succeeded + ProvisioningState2Updating ProvisioningState2 = original.ProvisioningState2Updating +) + +type ProximityPlacementGroupType = original.ProximityPlacementGroupType + +const ( + Standard ProximityPlacementGroupType = original.Standard + Ultra ProximityPlacementGroupType = original.Ultra +) + +type ReplicationState = original.ReplicationState + +const ( + ReplicationStateCompleted ReplicationState = original.ReplicationStateCompleted + ReplicationStateFailed ReplicationState = original.ReplicationStateFailed + ReplicationStateReplicating ReplicationState = original.ReplicationStateReplicating + ReplicationStateUnknown ReplicationState = original.ReplicationStateUnknown +) + +type ReplicationStatusTypes = original.ReplicationStatusTypes + +const ( + ReplicationStatusTypesReplicationStatus ReplicationStatusTypes = original.ReplicationStatusTypesReplicationStatus +) + +type ResourceIdentityType = original.ResourceIdentityType + +const ( + ResourceIdentityTypeNone ResourceIdentityType = original.ResourceIdentityTypeNone + ResourceIdentityTypeSystemAssigned ResourceIdentityType = original.ResourceIdentityTypeSystemAssigned + ResourceIdentityTypeSystemAssignedUserAssigned ResourceIdentityType = original.ResourceIdentityTypeSystemAssignedUserAssigned + ResourceIdentityTypeUserAssigned ResourceIdentityType = original.ResourceIdentityTypeUserAssigned +) + +type ResourceSkuCapacityScaleType = original.ResourceSkuCapacityScaleType + +const ( + ResourceSkuCapacityScaleTypeAutomatic ResourceSkuCapacityScaleType = original.ResourceSkuCapacityScaleTypeAutomatic + ResourceSkuCapacityScaleTypeManual ResourceSkuCapacityScaleType = original.ResourceSkuCapacityScaleTypeManual + ResourceSkuCapacityScaleTypeNone ResourceSkuCapacityScaleType = original.ResourceSkuCapacityScaleTypeNone +) + +type ResourceSkuRestrictionsReasonCode = original.ResourceSkuRestrictionsReasonCode + +const ( + NotAvailableForSubscription ResourceSkuRestrictionsReasonCode = original.NotAvailableForSubscription + QuotaID ResourceSkuRestrictionsReasonCode = original.QuotaID +) + +type ResourceSkuRestrictionsType = original.ResourceSkuRestrictionsType + +const ( + Location ResourceSkuRestrictionsType = original.Location + Zone ResourceSkuRestrictionsType = original.Zone +) + +type RollingUpgradeActionType = original.RollingUpgradeActionType + +const ( + Cancel RollingUpgradeActionType = original.Cancel + Start RollingUpgradeActionType = original.Start +) + +type RollingUpgradeStatusCode = original.RollingUpgradeStatusCode + +const ( + RollingUpgradeStatusCodeCancelled RollingUpgradeStatusCode = original.RollingUpgradeStatusCodeCancelled + RollingUpgradeStatusCodeCompleted RollingUpgradeStatusCode = original.RollingUpgradeStatusCodeCompleted + RollingUpgradeStatusCodeFaulted RollingUpgradeStatusCode = original.RollingUpgradeStatusCodeFaulted + RollingUpgradeStatusCodeRollingForward RollingUpgradeStatusCode = original.RollingUpgradeStatusCodeRollingForward +) + +type SettingNames = original.SettingNames + +const ( + AutoLogon SettingNames = original.AutoLogon + FirstLogonCommands SettingNames = original.FirstLogonCommands +) + +type SnapshotStorageAccountTypes = original.SnapshotStorageAccountTypes + +const ( + SnapshotStorageAccountTypesPremiumLRS SnapshotStorageAccountTypes = original.SnapshotStorageAccountTypesPremiumLRS + SnapshotStorageAccountTypesStandardLRS SnapshotStorageAccountTypes = original.SnapshotStorageAccountTypesStandardLRS + SnapshotStorageAccountTypesStandardZRS SnapshotStorageAccountTypes = original.SnapshotStorageAccountTypesStandardZRS +) + +type StatusLevelTypes = original.StatusLevelTypes + +const ( + Error StatusLevelTypes = original.Error + Info StatusLevelTypes = original.Info + Warning StatusLevelTypes = original.Warning +) + +type StorageAccountType = original.StorageAccountType + +const ( + StorageAccountTypeStandardLRS StorageAccountType = original.StorageAccountTypeStandardLRS + StorageAccountTypeStandardZRS StorageAccountType = original.StorageAccountTypeStandardZRS +) + +type StorageAccountTypes = original.StorageAccountTypes + +const ( + StorageAccountTypesPremiumLRS StorageAccountTypes = original.StorageAccountTypesPremiumLRS + StorageAccountTypesStandardLRS StorageAccountTypes = original.StorageAccountTypesStandardLRS + StorageAccountTypesStandardSSDLRS StorageAccountTypes = original.StorageAccountTypesStandardSSDLRS + StorageAccountTypesUltraSSDLRS StorageAccountTypes = original.StorageAccountTypesUltraSSDLRS +) + +type UpgradeMode = original.UpgradeMode + +const ( + Automatic UpgradeMode = original.Automatic + Manual UpgradeMode = original.Manual + Rolling UpgradeMode = original.Rolling +) + +type UpgradeOperationInvoker = original.UpgradeOperationInvoker + +const ( + UpgradeOperationInvokerPlatform UpgradeOperationInvoker = original.UpgradeOperationInvokerPlatform + UpgradeOperationInvokerUnknown UpgradeOperationInvoker = original.UpgradeOperationInvokerUnknown + UpgradeOperationInvokerUser UpgradeOperationInvoker = original.UpgradeOperationInvokerUser +) + +type UpgradeState = original.UpgradeState + +const ( + UpgradeStateCancelled UpgradeState = original.UpgradeStateCancelled + UpgradeStateCompleted UpgradeState = original.UpgradeStateCompleted + UpgradeStateFaulted UpgradeState = original.UpgradeStateFaulted + UpgradeStateRollingForward UpgradeState = original.UpgradeStateRollingForward +) + +type VirtualMachineEvictionPolicyTypes = original.VirtualMachineEvictionPolicyTypes + +const ( + Deallocate VirtualMachineEvictionPolicyTypes = original.Deallocate + Delete VirtualMachineEvictionPolicyTypes = original.Delete +) + +type VirtualMachinePriorityTypes = original.VirtualMachinePriorityTypes + +const ( + Low VirtualMachinePriorityTypes = original.Low + Regular VirtualMachinePriorityTypes = original.Regular +) + +type VirtualMachineScaleSetSkuScaleType = original.VirtualMachineScaleSetSkuScaleType + +const ( + VirtualMachineScaleSetSkuScaleTypeAutomatic VirtualMachineScaleSetSkuScaleType = original.VirtualMachineScaleSetSkuScaleTypeAutomatic + VirtualMachineScaleSetSkuScaleTypeNone VirtualMachineScaleSetSkuScaleType = original.VirtualMachineScaleSetSkuScaleTypeNone +) + +type VirtualMachineSizeTypes = original.VirtualMachineSizeTypes + +const ( + VirtualMachineSizeTypesBasicA0 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesBasicA0 + VirtualMachineSizeTypesBasicA1 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesBasicA1 + VirtualMachineSizeTypesBasicA2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesBasicA2 + VirtualMachineSizeTypesBasicA3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesBasicA3 + VirtualMachineSizeTypesBasicA4 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesBasicA4 + VirtualMachineSizeTypesStandardA0 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA0 + VirtualMachineSizeTypesStandardA1 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA1 + VirtualMachineSizeTypesStandardA10 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA10 + VirtualMachineSizeTypesStandardA11 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA11 + VirtualMachineSizeTypesStandardA1V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA1V2 + VirtualMachineSizeTypesStandardA2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA2 + VirtualMachineSizeTypesStandardA2mV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA2mV2 + VirtualMachineSizeTypesStandardA2V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA2V2 + VirtualMachineSizeTypesStandardA3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA3 + VirtualMachineSizeTypesStandardA4 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA4 + VirtualMachineSizeTypesStandardA4mV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA4mV2 + VirtualMachineSizeTypesStandardA4V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA4V2 + VirtualMachineSizeTypesStandardA5 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA5 + VirtualMachineSizeTypesStandardA6 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA6 + VirtualMachineSizeTypesStandardA7 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA7 + VirtualMachineSizeTypesStandardA8 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA8 + VirtualMachineSizeTypesStandardA8mV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA8mV2 + VirtualMachineSizeTypesStandardA8V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA8V2 + VirtualMachineSizeTypesStandardA9 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardA9 + VirtualMachineSizeTypesStandardB1ms VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardB1ms + VirtualMachineSizeTypesStandardB1s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardB1s + VirtualMachineSizeTypesStandardB2ms VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardB2ms + VirtualMachineSizeTypesStandardB2s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardB2s + VirtualMachineSizeTypesStandardB4ms VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardB4ms + VirtualMachineSizeTypesStandardB8ms VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardB8ms + VirtualMachineSizeTypesStandardD1 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD1 + VirtualMachineSizeTypesStandardD11 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD11 + VirtualMachineSizeTypesStandardD11V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD11V2 + VirtualMachineSizeTypesStandardD12 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD12 + VirtualMachineSizeTypesStandardD12V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD12V2 + VirtualMachineSizeTypesStandardD13 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD13 + VirtualMachineSizeTypesStandardD13V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD13V2 + VirtualMachineSizeTypesStandardD14 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD14 + VirtualMachineSizeTypesStandardD14V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD14V2 + VirtualMachineSizeTypesStandardD15V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD15V2 + VirtualMachineSizeTypesStandardD16sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD16sV3 + VirtualMachineSizeTypesStandardD16V3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD16V3 + VirtualMachineSizeTypesStandardD1V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD1V2 + VirtualMachineSizeTypesStandardD2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD2 + VirtualMachineSizeTypesStandardD2sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD2sV3 + VirtualMachineSizeTypesStandardD2V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD2V2 + VirtualMachineSizeTypesStandardD2V3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD2V3 + VirtualMachineSizeTypesStandardD3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD3 + VirtualMachineSizeTypesStandardD32sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD32sV3 + VirtualMachineSizeTypesStandardD32V3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD32V3 + VirtualMachineSizeTypesStandardD3V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD3V2 + VirtualMachineSizeTypesStandardD4 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD4 + VirtualMachineSizeTypesStandardD4sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD4sV3 + VirtualMachineSizeTypesStandardD4V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD4V2 + VirtualMachineSizeTypesStandardD4V3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD4V3 + VirtualMachineSizeTypesStandardD5V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD5V2 + VirtualMachineSizeTypesStandardD64sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD64sV3 + VirtualMachineSizeTypesStandardD64V3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD64V3 + VirtualMachineSizeTypesStandardD8sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD8sV3 + VirtualMachineSizeTypesStandardD8V3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardD8V3 + VirtualMachineSizeTypesStandardDS1 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS1 + VirtualMachineSizeTypesStandardDS11 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS11 + VirtualMachineSizeTypesStandardDS11V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS11V2 + VirtualMachineSizeTypesStandardDS12 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS12 + VirtualMachineSizeTypesStandardDS12V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS12V2 + VirtualMachineSizeTypesStandardDS13 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS13 + VirtualMachineSizeTypesStandardDS132V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS132V2 + VirtualMachineSizeTypesStandardDS134V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS134V2 + VirtualMachineSizeTypesStandardDS13V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS13V2 + VirtualMachineSizeTypesStandardDS14 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS14 + VirtualMachineSizeTypesStandardDS144V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS144V2 + VirtualMachineSizeTypesStandardDS148V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS148V2 + VirtualMachineSizeTypesStandardDS14V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS14V2 + VirtualMachineSizeTypesStandardDS15V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS15V2 + VirtualMachineSizeTypesStandardDS1V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS1V2 + VirtualMachineSizeTypesStandardDS2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS2 + VirtualMachineSizeTypesStandardDS2V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS2V2 + VirtualMachineSizeTypesStandardDS3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS3 + VirtualMachineSizeTypesStandardDS3V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS3V2 + VirtualMachineSizeTypesStandardDS4 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS4 + VirtualMachineSizeTypesStandardDS4V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS4V2 + VirtualMachineSizeTypesStandardDS5V2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardDS5V2 + VirtualMachineSizeTypesStandardE16sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE16sV3 + VirtualMachineSizeTypesStandardE16V3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE16V3 + VirtualMachineSizeTypesStandardE2sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE2sV3 + VirtualMachineSizeTypesStandardE2V3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE2V3 + VirtualMachineSizeTypesStandardE3216V3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE3216V3 + VirtualMachineSizeTypesStandardE328sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE328sV3 + VirtualMachineSizeTypesStandardE32sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE32sV3 + VirtualMachineSizeTypesStandardE32V3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE32V3 + VirtualMachineSizeTypesStandardE4sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE4sV3 + VirtualMachineSizeTypesStandardE4V3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE4V3 + VirtualMachineSizeTypesStandardE6416sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE6416sV3 + VirtualMachineSizeTypesStandardE6432sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE6432sV3 + VirtualMachineSizeTypesStandardE64sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE64sV3 + VirtualMachineSizeTypesStandardE64V3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE64V3 + VirtualMachineSizeTypesStandardE8sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE8sV3 + VirtualMachineSizeTypesStandardE8V3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardE8V3 + VirtualMachineSizeTypesStandardF1 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF1 + VirtualMachineSizeTypesStandardF16 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF16 + VirtualMachineSizeTypesStandardF16s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF16s + VirtualMachineSizeTypesStandardF16sV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF16sV2 + VirtualMachineSizeTypesStandardF1s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF1s + VirtualMachineSizeTypesStandardF2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF2 + VirtualMachineSizeTypesStandardF2s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF2s + VirtualMachineSizeTypesStandardF2sV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF2sV2 + VirtualMachineSizeTypesStandardF32sV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF32sV2 + VirtualMachineSizeTypesStandardF4 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF4 + VirtualMachineSizeTypesStandardF4s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF4s + VirtualMachineSizeTypesStandardF4sV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF4sV2 + VirtualMachineSizeTypesStandardF64sV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF64sV2 + VirtualMachineSizeTypesStandardF72sV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF72sV2 + VirtualMachineSizeTypesStandardF8 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF8 + VirtualMachineSizeTypesStandardF8s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF8s + VirtualMachineSizeTypesStandardF8sV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardF8sV2 + VirtualMachineSizeTypesStandardG1 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardG1 + VirtualMachineSizeTypesStandardG2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardG2 + VirtualMachineSizeTypesStandardG3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardG3 + VirtualMachineSizeTypesStandardG4 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardG4 + VirtualMachineSizeTypesStandardG5 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardG5 + VirtualMachineSizeTypesStandardGS1 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardGS1 + VirtualMachineSizeTypesStandardGS2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardGS2 + VirtualMachineSizeTypesStandardGS3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardGS3 + VirtualMachineSizeTypesStandardGS4 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardGS4 + VirtualMachineSizeTypesStandardGS44 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardGS44 + VirtualMachineSizeTypesStandardGS48 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardGS48 + VirtualMachineSizeTypesStandardGS5 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardGS5 + VirtualMachineSizeTypesStandardGS516 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardGS516 + VirtualMachineSizeTypesStandardGS58 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardGS58 + VirtualMachineSizeTypesStandardH16 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardH16 + VirtualMachineSizeTypesStandardH16m VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardH16m + VirtualMachineSizeTypesStandardH16mr VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardH16mr + VirtualMachineSizeTypesStandardH16r VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardH16r + VirtualMachineSizeTypesStandardH8 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardH8 + VirtualMachineSizeTypesStandardH8m VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardH8m + VirtualMachineSizeTypesStandardL16s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardL16s + VirtualMachineSizeTypesStandardL32s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardL32s + VirtualMachineSizeTypesStandardL4s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardL4s + VirtualMachineSizeTypesStandardL8s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardL8s + VirtualMachineSizeTypesStandardM12832ms VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardM12832ms + VirtualMachineSizeTypesStandardM12864ms VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardM12864ms + VirtualMachineSizeTypesStandardM128ms VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardM128ms + VirtualMachineSizeTypesStandardM128s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardM128s + VirtualMachineSizeTypesStandardM6416ms VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardM6416ms + VirtualMachineSizeTypesStandardM6432ms VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardM6432ms + VirtualMachineSizeTypesStandardM64ms VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardM64ms + VirtualMachineSizeTypesStandardM64s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardM64s + VirtualMachineSizeTypesStandardNC12 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNC12 + VirtualMachineSizeTypesStandardNC12sV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNC12sV2 + VirtualMachineSizeTypesStandardNC12sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNC12sV3 + VirtualMachineSizeTypesStandardNC24 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNC24 + VirtualMachineSizeTypesStandardNC24r VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNC24r + VirtualMachineSizeTypesStandardNC24rsV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNC24rsV2 + VirtualMachineSizeTypesStandardNC24rsV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNC24rsV3 + VirtualMachineSizeTypesStandardNC24sV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNC24sV2 + VirtualMachineSizeTypesStandardNC24sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNC24sV3 + VirtualMachineSizeTypesStandardNC6 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNC6 + VirtualMachineSizeTypesStandardNC6sV2 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNC6sV2 + VirtualMachineSizeTypesStandardNC6sV3 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNC6sV3 + VirtualMachineSizeTypesStandardND12s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardND12s + VirtualMachineSizeTypesStandardND24rs VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardND24rs + VirtualMachineSizeTypesStandardND24s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardND24s + VirtualMachineSizeTypesStandardND6s VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardND6s + VirtualMachineSizeTypesStandardNV12 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNV12 + VirtualMachineSizeTypesStandardNV24 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNV24 + VirtualMachineSizeTypesStandardNV6 VirtualMachineSizeTypes = original.VirtualMachineSizeTypesStandardNV6 +) + +type APIEntityReference = original.APIEntityReference +type APIError = original.APIError +type APIErrorBase = original.APIErrorBase +type AccessURI = original.AccessURI +type AdditionalCapabilities = original.AdditionalCapabilities +type AdditionalUnattendContent = original.AdditionalUnattendContent +type AutomaticOSUpgradePolicy = original.AutomaticOSUpgradePolicy +type AutomaticOSUpgradeProperties = original.AutomaticOSUpgradeProperties +type AvailabilitySet = original.AvailabilitySet +type AvailabilitySetListResult = original.AvailabilitySetListResult +type AvailabilitySetListResultIterator = original.AvailabilitySetListResultIterator +type AvailabilitySetListResultPage = original.AvailabilitySetListResultPage +type AvailabilitySetProperties = original.AvailabilitySetProperties +type AvailabilitySetUpdate = original.AvailabilitySetUpdate +type AvailabilitySetsClient = original.AvailabilitySetsClient +type BaseClient = original.BaseClient +type BootDiagnostics = original.BootDiagnostics +type BootDiagnosticsInstanceView = original.BootDiagnosticsInstanceView +type CloudError = original.CloudError +type ContainerService = original.ContainerService +type ContainerServiceAgentPoolProfile = original.ContainerServiceAgentPoolProfile +type ContainerServiceCustomProfile = original.ContainerServiceCustomProfile +type ContainerServiceDiagnosticsProfile = original.ContainerServiceDiagnosticsProfile +type ContainerServiceLinuxProfile = original.ContainerServiceLinuxProfile +type ContainerServiceListResult = original.ContainerServiceListResult +type ContainerServiceListResultIterator = original.ContainerServiceListResultIterator +type ContainerServiceListResultPage = original.ContainerServiceListResultPage +type ContainerServiceMasterProfile = original.ContainerServiceMasterProfile +type ContainerServiceOrchestratorProfile = original.ContainerServiceOrchestratorProfile +type ContainerServiceProperties = original.ContainerServiceProperties +type ContainerServiceSSHConfiguration = original.ContainerServiceSSHConfiguration +type ContainerServiceSSHPublicKey = original.ContainerServiceSSHPublicKey +type ContainerServiceServicePrincipalProfile = original.ContainerServiceServicePrincipalProfile +type ContainerServiceVMDiagnostics = original.ContainerServiceVMDiagnostics +type ContainerServiceWindowsProfile = original.ContainerServiceWindowsProfile +type ContainerServicesClient = original.ContainerServicesClient +type ContainerServicesCreateOrUpdateFuture = original.ContainerServicesCreateOrUpdateFuture +type ContainerServicesDeleteFuture = original.ContainerServicesDeleteFuture +type CreationData = original.CreationData +type DataDisk = original.DataDisk +type DataDiskImage = original.DataDiskImage +type DiagnosticsProfile = original.DiagnosticsProfile +type DiffDiskSettings = original.DiffDiskSettings +type Disallowed = original.Disallowed +type Disk = original.Disk +type DiskEncryptionSettings = original.DiskEncryptionSettings +type DiskInstanceView = original.DiskInstanceView +type DiskList = original.DiskList +type DiskListIterator = original.DiskListIterator +type DiskListPage = original.DiskListPage +type DiskProperties = original.DiskProperties +type DiskSku = original.DiskSku +type DiskUpdate = original.DiskUpdate +type DiskUpdateProperties = original.DiskUpdateProperties +type DisksClient = original.DisksClient +type DisksCreateOrUpdateFuture = original.DisksCreateOrUpdateFuture +type DisksDeleteFuture = original.DisksDeleteFuture +type DisksGrantAccessFuture = original.DisksGrantAccessFuture +type DisksRevokeAccessFuture = original.DisksRevokeAccessFuture +type DisksUpdateFuture = original.DisksUpdateFuture +type EncryptionSettingsCollection = original.EncryptionSettingsCollection +type EncryptionSettingsElement = original.EncryptionSettingsElement +type GalleriesClient = original.GalleriesClient +type GalleriesCreateOrUpdateFuture = original.GalleriesCreateOrUpdateFuture +type GalleriesDeleteFuture = original.GalleriesDeleteFuture +type Gallery = original.Gallery +type GalleryArtifactPublishingProfileBase = original.GalleryArtifactPublishingProfileBase +type GalleryArtifactSource = original.GalleryArtifactSource +type GalleryDataDiskImage = original.GalleryDataDiskImage +type GalleryDiskImage = original.GalleryDiskImage +type GalleryIdentifier = original.GalleryIdentifier +type GalleryImage = original.GalleryImage +type GalleryImageIdentifier = original.GalleryImageIdentifier +type GalleryImageList = original.GalleryImageList +type GalleryImageListIterator = original.GalleryImageListIterator +type GalleryImageListPage = original.GalleryImageListPage +type GalleryImageProperties = original.GalleryImageProperties +type GalleryImageVersion = original.GalleryImageVersion +type GalleryImageVersionList = original.GalleryImageVersionList +type GalleryImageVersionListIterator = original.GalleryImageVersionListIterator +type GalleryImageVersionListPage = original.GalleryImageVersionListPage +type GalleryImageVersionProperties = original.GalleryImageVersionProperties +type GalleryImageVersionPublishingProfile = original.GalleryImageVersionPublishingProfile +type GalleryImageVersionStorageProfile = original.GalleryImageVersionStorageProfile +type GalleryImageVersionsClient = original.GalleryImageVersionsClient +type GalleryImageVersionsCreateOrUpdateFuture = original.GalleryImageVersionsCreateOrUpdateFuture +type GalleryImageVersionsDeleteFuture = original.GalleryImageVersionsDeleteFuture +type GalleryImagesClient = original.GalleryImagesClient +type GalleryImagesCreateOrUpdateFuture = original.GalleryImagesCreateOrUpdateFuture +type GalleryImagesDeleteFuture = original.GalleryImagesDeleteFuture +type GalleryList = original.GalleryList +type GalleryListIterator = original.GalleryListIterator +type GalleryListPage = original.GalleryListPage +type GalleryOSDiskImage = original.GalleryOSDiskImage +type GalleryProperties = original.GalleryProperties +type GrantAccessData = original.GrantAccessData +type HardwareProfile = original.HardwareProfile +type Image = original.Image +type ImageDataDisk = original.ImageDataDisk +type ImageDiskReference = original.ImageDiskReference +type ImageListResult = original.ImageListResult +type ImageListResultIterator = original.ImageListResultIterator +type ImageListResultPage = original.ImageListResultPage +type ImageOSDisk = original.ImageOSDisk +type ImageProperties = original.ImageProperties +type ImagePurchasePlan = original.ImagePurchasePlan +type ImageReference = original.ImageReference +type ImageStorageProfile = original.ImageStorageProfile +type ImageUpdate = original.ImageUpdate +type ImagesClient = original.ImagesClient +type ImagesCreateOrUpdateFuture = original.ImagesCreateOrUpdateFuture +type ImagesDeleteFuture = original.ImagesDeleteFuture +type ImagesUpdateFuture = original.ImagesUpdateFuture +type InnerError = original.InnerError +type InstanceViewStatus = original.InstanceViewStatus +type KeyVaultAndKeyReference = original.KeyVaultAndKeyReference +type KeyVaultAndSecretReference = original.KeyVaultAndSecretReference +type KeyVaultKeyReference = original.KeyVaultKeyReference +type KeyVaultSecretReference = original.KeyVaultSecretReference +type LinuxConfiguration = original.LinuxConfiguration +type ListUsagesResult = original.ListUsagesResult +type ListUsagesResultIterator = original.ListUsagesResultIterator +type ListUsagesResultPage = original.ListUsagesResultPage +type ListVirtualMachineExtensionImage = original.ListVirtualMachineExtensionImage +type ListVirtualMachineImageResource = original.ListVirtualMachineImageResource +type LogAnalyticsClient = original.LogAnalyticsClient +type LogAnalyticsExportRequestRateByIntervalFuture = original.LogAnalyticsExportRequestRateByIntervalFuture +type LogAnalyticsExportThrottledRequestsFuture = original.LogAnalyticsExportThrottledRequestsFuture +type LogAnalyticsInputBase = original.LogAnalyticsInputBase +type LogAnalyticsOperationResult = original.LogAnalyticsOperationResult +type LogAnalyticsOutput = original.LogAnalyticsOutput +type MaintenanceRedeployStatus = original.MaintenanceRedeployStatus +type ManagedArtifact = original.ManagedArtifact +type ManagedDiskParameters = original.ManagedDiskParameters +type NetworkInterfaceReference = original.NetworkInterfaceReference +type NetworkInterfaceReferenceProperties = original.NetworkInterfaceReferenceProperties +type NetworkProfile = original.NetworkProfile +type OSDisk = original.OSDisk +type OSDiskImage = original.OSDiskImage +type OSProfile = original.OSProfile +type OperationListResult = original.OperationListResult +type OperationValue = original.OperationValue +type OperationValueDisplay = original.OperationValueDisplay +type OperationsClient = original.OperationsClient +type Plan = original.Plan +type ProximityPlacementGroup = original.ProximityPlacementGroup +type ProximityPlacementGroupListResult = original.ProximityPlacementGroupListResult +type ProximityPlacementGroupListResultIterator = original.ProximityPlacementGroupListResultIterator +type ProximityPlacementGroupListResultPage = original.ProximityPlacementGroupListResultPage +type ProximityPlacementGroupProperties = original.ProximityPlacementGroupProperties +type ProximityPlacementGroupUpdate = original.ProximityPlacementGroupUpdate +type ProximityPlacementGroupsClient = original.ProximityPlacementGroupsClient +type PurchasePlan = original.PurchasePlan +type RecommendedMachineConfiguration = original.RecommendedMachineConfiguration +type RecoveryWalkResponse = original.RecoveryWalkResponse +type RegionalReplicationStatus = original.RegionalReplicationStatus +type ReplicationStatus = original.ReplicationStatus +type RequestRateByIntervalInput = original.RequestRateByIntervalInput +type Resource = original.Resource +type ResourceRange = original.ResourceRange +type ResourceSku = original.ResourceSku +type ResourceSkuCapabilities = original.ResourceSkuCapabilities +type ResourceSkuCapacity = original.ResourceSkuCapacity +type ResourceSkuCosts = original.ResourceSkuCosts +type ResourceSkuLocationInfo = original.ResourceSkuLocationInfo +type ResourceSkuRestrictionInfo = original.ResourceSkuRestrictionInfo +type ResourceSkuRestrictions = original.ResourceSkuRestrictions +type ResourceSkuZoneDetails = original.ResourceSkuZoneDetails +type ResourceSkusClient = original.ResourceSkusClient +type ResourceSkusResult = original.ResourceSkusResult +type ResourceSkusResultIterator = original.ResourceSkusResultIterator +type ResourceSkusResultPage = original.ResourceSkusResultPage +type RollbackStatusInfo = original.RollbackStatusInfo +type RollingUpgradePolicy = original.RollingUpgradePolicy +type RollingUpgradeProgressInfo = original.RollingUpgradeProgressInfo +type RollingUpgradeRunningStatus = original.RollingUpgradeRunningStatus +type RollingUpgradeStatusInfo = original.RollingUpgradeStatusInfo +type RollingUpgradeStatusInfoProperties = original.RollingUpgradeStatusInfoProperties +type RunCommandDocument = original.RunCommandDocument +type RunCommandDocumentBase = original.RunCommandDocumentBase +type RunCommandInput = original.RunCommandInput +type RunCommandInputParameter = original.RunCommandInputParameter +type RunCommandListResult = original.RunCommandListResult +type RunCommandListResultIterator = original.RunCommandListResultIterator +type RunCommandListResultPage = original.RunCommandListResultPage +type RunCommandParameterDefinition = original.RunCommandParameterDefinition +type RunCommandResult = original.RunCommandResult +type SSHConfiguration = original.SSHConfiguration +type SSHPublicKey = original.SSHPublicKey +type Sku = original.Sku +type Snapshot = original.Snapshot +type SnapshotList = original.SnapshotList +type SnapshotListIterator = original.SnapshotListIterator +type SnapshotListPage = original.SnapshotListPage +type SnapshotProperties = original.SnapshotProperties +type SnapshotSku = original.SnapshotSku +type SnapshotUpdate = original.SnapshotUpdate +type SnapshotUpdateProperties = original.SnapshotUpdateProperties +type SnapshotsClient = original.SnapshotsClient +type SnapshotsCreateOrUpdateFuture = original.SnapshotsCreateOrUpdateFuture +type SnapshotsDeleteFuture = original.SnapshotsDeleteFuture +type SnapshotsGrantAccessFuture = original.SnapshotsGrantAccessFuture +type SnapshotsRevokeAccessFuture = original.SnapshotsRevokeAccessFuture +type SnapshotsUpdateFuture = original.SnapshotsUpdateFuture +type SourceVault = original.SourceVault +type StorageProfile = original.StorageProfile +type SubResource = original.SubResource +type SubResourceReadOnly = original.SubResourceReadOnly +type TargetRegion = original.TargetRegion +type ThrottledRequestsInput = original.ThrottledRequestsInput +type UpdateResource = original.UpdateResource +type UpgradeOperationHistoricalStatusInfo = original.UpgradeOperationHistoricalStatusInfo +type UpgradeOperationHistoricalStatusInfoProperties = original.UpgradeOperationHistoricalStatusInfoProperties +type UpgradeOperationHistoryStatus = original.UpgradeOperationHistoryStatus +type UpgradePolicy = original.UpgradePolicy +type Usage = original.Usage +type UsageClient = original.UsageClient +type UsageName = original.UsageName +type VMScaleSetConvertToSinglePlacementGroupInput = original.VMScaleSetConvertToSinglePlacementGroupInput +type VaultCertificate = original.VaultCertificate +type VaultSecretGroup = original.VaultSecretGroup +type VirtualHardDisk = original.VirtualHardDisk +type VirtualMachine = original.VirtualMachine +type VirtualMachineAgentInstanceView = original.VirtualMachineAgentInstanceView +type VirtualMachineCaptureParameters = original.VirtualMachineCaptureParameters +type VirtualMachineCaptureResult = original.VirtualMachineCaptureResult +type VirtualMachineExtension = original.VirtualMachineExtension +type VirtualMachineExtensionHandlerInstanceView = original.VirtualMachineExtensionHandlerInstanceView +type VirtualMachineExtensionImage = original.VirtualMachineExtensionImage +type VirtualMachineExtensionImageProperties = original.VirtualMachineExtensionImageProperties +type VirtualMachineExtensionImagesClient = original.VirtualMachineExtensionImagesClient +type VirtualMachineExtensionInstanceView = original.VirtualMachineExtensionInstanceView +type VirtualMachineExtensionProperties = original.VirtualMachineExtensionProperties +type VirtualMachineExtensionUpdate = original.VirtualMachineExtensionUpdate +type VirtualMachineExtensionUpdateProperties = original.VirtualMachineExtensionUpdateProperties +type VirtualMachineExtensionsClient = original.VirtualMachineExtensionsClient +type VirtualMachineExtensionsCreateOrUpdateFuture = original.VirtualMachineExtensionsCreateOrUpdateFuture +type VirtualMachineExtensionsDeleteFuture = original.VirtualMachineExtensionsDeleteFuture +type VirtualMachineExtensionsListResult = original.VirtualMachineExtensionsListResult +type VirtualMachineExtensionsUpdateFuture = original.VirtualMachineExtensionsUpdateFuture +type VirtualMachineHealthStatus = original.VirtualMachineHealthStatus +type VirtualMachineIdentity = original.VirtualMachineIdentity +type VirtualMachineIdentityUserAssignedIdentitiesValue = original.VirtualMachineIdentityUserAssignedIdentitiesValue +type VirtualMachineImage = original.VirtualMachineImage +type VirtualMachineImageProperties = original.VirtualMachineImageProperties +type VirtualMachineImageResource = original.VirtualMachineImageResource +type VirtualMachineImagesClient = original.VirtualMachineImagesClient +type VirtualMachineInstanceView = original.VirtualMachineInstanceView +type VirtualMachineListResult = original.VirtualMachineListResult +type VirtualMachineListResultIterator = original.VirtualMachineListResultIterator +type VirtualMachineListResultPage = original.VirtualMachineListResultPage +type VirtualMachineProperties = original.VirtualMachineProperties +type VirtualMachineReimageParameters = original.VirtualMachineReimageParameters +type VirtualMachineRunCommandsClient = original.VirtualMachineRunCommandsClient +type VirtualMachineScaleSet = original.VirtualMachineScaleSet +type VirtualMachineScaleSetDataDisk = original.VirtualMachineScaleSetDataDisk +type VirtualMachineScaleSetExtension = original.VirtualMachineScaleSetExtension +type VirtualMachineScaleSetExtensionListResult = original.VirtualMachineScaleSetExtensionListResult +type VirtualMachineScaleSetExtensionListResultIterator = original.VirtualMachineScaleSetExtensionListResultIterator +type VirtualMachineScaleSetExtensionListResultPage = original.VirtualMachineScaleSetExtensionListResultPage +type VirtualMachineScaleSetExtensionProfile = original.VirtualMachineScaleSetExtensionProfile +type VirtualMachineScaleSetExtensionProperties = original.VirtualMachineScaleSetExtensionProperties +type VirtualMachineScaleSetExtensionsClient = original.VirtualMachineScaleSetExtensionsClient +type VirtualMachineScaleSetExtensionsCreateOrUpdateFuture = original.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture +type VirtualMachineScaleSetExtensionsDeleteFuture = original.VirtualMachineScaleSetExtensionsDeleteFuture +type VirtualMachineScaleSetIPConfiguration = original.VirtualMachineScaleSetIPConfiguration +type VirtualMachineScaleSetIPConfigurationProperties = original.VirtualMachineScaleSetIPConfigurationProperties +type VirtualMachineScaleSetIPTag = original.VirtualMachineScaleSetIPTag +type VirtualMachineScaleSetIdentity = original.VirtualMachineScaleSetIdentity +type VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue = original.VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue +type VirtualMachineScaleSetInstanceView = original.VirtualMachineScaleSetInstanceView +type VirtualMachineScaleSetInstanceViewStatusesSummary = original.VirtualMachineScaleSetInstanceViewStatusesSummary +type VirtualMachineScaleSetListOSUpgradeHistory = original.VirtualMachineScaleSetListOSUpgradeHistory +type VirtualMachineScaleSetListOSUpgradeHistoryIterator = original.VirtualMachineScaleSetListOSUpgradeHistoryIterator +type VirtualMachineScaleSetListOSUpgradeHistoryPage = original.VirtualMachineScaleSetListOSUpgradeHistoryPage +type VirtualMachineScaleSetListResult = original.VirtualMachineScaleSetListResult +type VirtualMachineScaleSetListResultIterator = original.VirtualMachineScaleSetListResultIterator +type VirtualMachineScaleSetListResultPage = original.VirtualMachineScaleSetListResultPage +type VirtualMachineScaleSetListSkusResult = original.VirtualMachineScaleSetListSkusResult +type VirtualMachineScaleSetListSkusResultIterator = original.VirtualMachineScaleSetListSkusResultIterator +type VirtualMachineScaleSetListSkusResultPage = original.VirtualMachineScaleSetListSkusResultPage +type VirtualMachineScaleSetListWithLinkResult = original.VirtualMachineScaleSetListWithLinkResult +type VirtualMachineScaleSetListWithLinkResultIterator = original.VirtualMachineScaleSetListWithLinkResultIterator +type VirtualMachineScaleSetListWithLinkResultPage = original.VirtualMachineScaleSetListWithLinkResultPage +type VirtualMachineScaleSetManagedDiskParameters = original.VirtualMachineScaleSetManagedDiskParameters +type VirtualMachineScaleSetNetworkConfiguration = original.VirtualMachineScaleSetNetworkConfiguration +type VirtualMachineScaleSetNetworkConfigurationDNSSettings = original.VirtualMachineScaleSetNetworkConfigurationDNSSettings +type VirtualMachineScaleSetNetworkConfigurationProperties = original.VirtualMachineScaleSetNetworkConfigurationProperties +type VirtualMachineScaleSetNetworkProfile = original.VirtualMachineScaleSetNetworkProfile +type VirtualMachineScaleSetOSDisk = original.VirtualMachineScaleSetOSDisk +type VirtualMachineScaleSetOSProfile = original.VirtualMachineScaleSetOSProfile +type VirtualMachineScaleSetProperties = original.VirtualMachineScaleSetProperties +type VirtualMachineScaleSetPublicIPAddressConfiguration = original.VirtualMachineScaleSetPublicIPAddressConfiguration +type VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings = original.VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings +type VirtualMachineScaleSetPublicIPAddressConfigurationProperties = original.VirtualMachineScaleSetPublicIPAddressConfigurationProperties +type VirtualMachineScaleSetReimageParameters = original.VirtualMachineScaleSetReimageParameters +type VirtualMachineScaleSetRollingUpgradesCancelFuture = original.VirtualMachineScaleSetRollingUpgradesCancelFuture +type VirtualMachineScaleSetRollingUpgradesClient = original.VirtualMachineScaleSetRollingUpgradesClient +type VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture = original.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture +type VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture = original.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture +type VirtualMachineScaleSetSku = original.VirtualMachineScaleSetSku +type VirtualMachineScaleSetSkuCapacity = original.VirtualMachineScaleSetSkuCapacity +type VirtualMachineScaleSetStorageProfile = original.VirtualMachineScaleSetStorageProfile +type VirtualMachineScaleSetUpdate = original.VirtualMachineScaleSetUpdate +type VirtualMachineScaleSetUpdateIPConfiguration = original.VirtualMachineScaleSetUpdateIPConfiguration +type VirtualMachineScaleSetUpdateIPConfigurationProperties = original.VirtualMachineScaleSetUpdateIPConfigurationProperties +type VirtualMachineScaleSetUpdateNetworkConfiguration = original.VirtualMachineScaleSetUpdateNetworkConfiguration +type VirtualMachineScaleSetUpdateNetworkConfigurationProperties = original.VirtualMachineScaleSetUpdateNetworkConfigurationProperties +type VirtualMachineScaleSetUpdateNetworkProfile = original.VirtualMachineScaleSetUpdateNetworkProfile +type VirtualMachineScaleSetUpdateOSDisk = original.VirtualMachineScaleSetUpdateOSDisk +type VirtualMachineScaleSetUpdateOSProfile = original.VirtualMachineScaleSetUpdateOSProfile +type VirtualMachineScaleSetUpdateProperties = original.VirtualMachineScaleSetUpdateProperties +type VirtualMachineScaleSetUpdatePublicIPAddressConfiguration = original.VirtualMachineScaleSetUpdatePublicIPAddressConfiguration +type VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties = original.VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties +type VirtualMachineScaleSetUpdateStorageProfile = original.VirtualMachineScaleSetUpdateStorageProfile +type VirtualMachineScaleSetUpdateVMProfile = original.VirtualMachineScaleSetUpdateVMProfile +type VirtualMachineScaleSetVM = original.VirtualMachineScaleSetVM +type VirtualMachineScaleSetVMExtensionsSummary = original.VirtualMachineScaleSetVMExtensionsSummary +type VirtualMachineScaleSetVMInstanceIDs = original.VirtualMachineScaleSetVMInstanceIDs +type VirtualMachineScaleSetVMInstanceRequiredIDs = original.VirtualMachineScaleSetVMInstanceRequiredIDs +type VirtualMachineScaleSetVMInstanceView = original.VirtualMachineScaleSetVMInstanceView +type VirtualMachineScaleSetVMListResult = original.VirtualMachineScaleSetVMListResult +type VirtualMachineScaleSetVMListResultIterator = original.VirtualMachineScaleSetVMListResultIterator +type VirtualMachineScaleSetVMListResultPage = original.VirtualMachineScaleSetVMListResultPage +type VirtualMachineScaleSetVMNetworkProfileConfiguration = original.VirtualMachineScaleSetVMNetworkProfileConfiguration +type VirtualMachineScaleSetVMProfile = original.VirtualMachineScaleSetVMProfile +type VirtualMachineScaleSetVMProperties = original.VirtualMachineScaleSetVMProperties +type VirtualMachineScaleSetVMProtectionPolicy = original.VirtualMachineScaleSetVMProtectionPolicy +type VirtualMachineScaleSetVMReimageParameters = original.VirtualMachineScaleSetVMReimageParameters +type VirtualMachineScaleSetVMsClient = original.VirtualMachineScaleSetVMsClient +type VirtualMachineScaleSetVMsDeallocateFuture = original.VirtualMachineScaleSetVMsDeallocateFuture +type VirtualMachineScaleSetVMsDeleteFuture = original.VirtualMachineScaleSetVMsDeleteFuture +type VirtualMachineScaleSetVMsPerformMaintenanceFuture = original.VirtualMachineScaleSetVMsPerformMaintenanceFuture +type VirtualMachineScaleSetVMsPowerOffFuture = original.VirtualMachineScaleSetVMsPowerOffFuture +type VirtualMachineScaleSetVMsRedeployFuture = original.VirtualMachineScaleSetVMsRedeployFuture +type VirtualMachineScaleSetVMsReimageAllFuture = original.VirtualMachineScaleSetVMsReimageAllFuture +type VirtualMachineScaleSetVMsReimageFuture = original.VirtualMachineScaleSetVMsReimageFuture +type VirtualMachineScaleSetVMsRestartFuture = original.VirtualMachineScaleSetVMsRestartFuture +type VirtualMachineScaleSetVMsRunCommandFuture = original.VirtualMachineScaleSetVMsRunCommandFuture +type VirtualMachineScaleSetVMsStartFuture = original.VirtualMachineScaleSetVMsStartFuture +type VirtualMachineScaleSetVMsUpdateFuture = original.VirtualMachineScaleSetVMsUpdateFuture +type VirtualMachineScaleSetsClient = original.VirtualMachineScaleSetsClient +type VirtualMachineScaleSetsCreateOrUpdateFuture = original.VirtualMachineScaleSetsCreateOrUpdateFuture +type VirtualMachineScaleSetsDeallocateFuture = original.VirtualMachineScaleSetsDeallocateFuture +type VirtualMachineScaleSetsDeleteFuture = original.VirtualMachineScaleSetsDeleteFuture +type VirtualMachineScaleSetsDeleteInstancesFuture = original.VirtualMachineScaleSetsDeleteInstancesFuture +type VirtualMachineScaleSetsPerformMaintenanceFuture = original.VirtualMachineScaleSetsPerformMaintenanceFuture +type VirtualMachineScaleSetsPowerOffFuture = original.VirtualMachineScaleSetsPowerOffFuture +type VirtualMachineScaleSetsRedeployFuture = original.VirtualMachineScaleSetsRedeployFuture +type VirtualMachineScaleSetsReimageAllFuture = original.VirtualMachineScaleSetsReimageAllFuture +type VirtualMachineScaleSetsReimageFuture = original.VirtualMachineScaleSetsReimageFuture +type VirtualMachineScaleSetsRestartFuture = original.VirtualMachineScaleSetsRestartFuture +type VirtualMachineScaleSetsStartFuture = original.VirtualMachineScaleSetsStartFuture +type VirtualMachineScaleSetsUpdateFuture = original.VirtualMachineScaleSetsUpdateFuture +type VirtualMachineScaleSetsUpdateInstancesFuture = original.VirtualMachineScaleSetsUpdateInstancesFuture +type VirtualMachineSize = original.VirtualMachineSize +type VirtualMachineSizeListResult = original.VirtualMachineSizeListResult +type VirtualMachineSizesClient = original.VirtualMachineSizesClient +type VirtualMachineStatusCodeCount = original.VirtualMachineStatusCodeCount +type VirtualMachineUpdate = original.VirtualMachineUpdate +type VirtualMachinesCaptureFuture = original.VirtualMachinesCaptureFuture +type VirtualMachinesClient = original.VirtualMachinesClient +type VirtualMachinesConvertToManagedDisksFuture = original.VirtualMachinesConvertToManagedDisksFuture +type VirtualMachinesCreateOrUpdateFuture = original.VirtualMachinesCreateOrUpdateFuture +type VirtualMachinesDeallocateFuture = original.VirtualMachinesDeallocateFuture +type VirtualMachinesDeleteFuture = original.VirtualMachinesDeleteFuture +type VirtualMachinesPerformMaintenanceFuture = original.VirtualMachinesPerformMaintenanceFuture +type VirtualMachinesPowerOffFuture = original.VirtualMachinesPowerOffFuture +type VirtualMachinesRedeployFuture = original.VirtualMachinesRedeployFuture +type VirtualMachinesReimageFuture = original.VirtualMachinesReimageFuture +type VirtualMachinesRestartFuture = original.VirtualMachinesRestartFuture +type VirtualMachinesRunCommandFuture = original.VirtualMachinesRunCommandFuture +type VirtualMachinesStartFuture = original.VirtualMachinesStartFuture +type VirtualMachinesUpdateFuture = original.VirtualMachinesUpdateFuture +type WinRMConfiguration = original.WinRMConfiguration +type WinRMListener = original.WinRMListener +type WindowsConfiguration = original.WindowsConfiguration + +func New(subscriptionID string) BaseClient { + return original.New(subscriptionID) +} +func NewAvailabilitySetListResultIterator(page AvailabilitySetListResultPage) AvailabilitySetListResultIterator { + return original.NewAvailabilitySetListResultIterator(page) +} +func NewAvailabilitySetListResultPage(getNextPage func(context.Context, AvailabilitySetListResult) (AvailabilitySetListResult, error)) AvailabilitySetListResultPage { + return original.NewAvailabilitySetListResultPage(getNextPage) +} +func NewAvailabilitySetsClient(subscriptionID string) AvailabilitySetsClient { + return original.NewAvailabilitySetsClient(subscriptionID) +} +func NewAvailabilitySetsClientWithBaseURI(baseURI string, subscriptionID string) AvailabilitySetsClient { + return original.NewAvailabilitySetsClientWithBaseURI(baseURI, subscriptionID) +} +func NewContainerServiceListResultIterator(page ContainerServiceListResultPage) ContainerServiceListResultIterator { + return original.NewContainerServiceListResultIterator(page) +} +func NewContainerServiceListResultPage(getNextPage func(context.Context, ContainerServiceListResult) (ContainerServiceListResult, error)) ContainerServiceListResultPage { + return original.NewContainerServiceListResultPage(getNextPage) +} +func NewContainerServicesClient(subscriptionID string) ContainerServicesClient { + return original.NewContainerServicesClient(subscriptionID) +} +func NewContainerServicesClientWithBaseURI(baseURI string, subscriptionID string) ContainerServicesClient { + return original.NewContainerServicesClientWithBaseURI(baseURI, subscriptionID) +} +func NewDiskListIterator(page DiskListPage) DiskListIterator { + return original.NewDiskListIterator(page) +} +func NewDiskListPage(getNextPage func(context.Context, DiskList) (DiskList, error)) DiskListPage { + return original.NewDiskListPage(getNextPage) +} +func NewDisksClient(subscriptionID string) DisksClient { + return original.NewDisksClient(subscriptionID) +} +func NewDisksClientWithBaseURI(baseURI string, subscriptionID string) DisksClient { + return original.NewDisksClientWithBaseURI(baseURI, subscriptionID) +} +func NewGalleriesClient(subscriptionID string) GalleriesClient { + return original.NewGalleriesClient(subscriptionID) +} +func NewGalleriesClientWithBaseURI(baseURI string, subscriptionID string) GalleriesClient { + return original.NewGalleriesClientWithBaseURI(baseURI, subscriptionID) +} +func NewGalleryImageListIterator(page GalleryImageListPage) GalleryImageListIterator { + return original.NewGalleryImageListIterator(page) +} +func NewGalleryImageListPage(getNextPage func(context.Context, GalleryImageList) (GalleryImageList, error)) GalleryImageListPage { + return original.NewGalleryImageListPage(getNextPage) +} +func NewGalleryImageVersionListIterator(page GalleryImageVersionListPage) GalleryImageVersionListIterator { + return original.NewGalleryImageVersionListIterator(page) +} +func NewGalleryImageVersionListPage(getNextPage func(context.Context, GalleryImageVersionList) (GalleryImageVersionList, error)) GalleryImageVersionListPage { + return original.NewGalleryImageVersionListPage(getNextPage) +} +func NewGalleryImageVersionsClient(subscriptionID string) GalleryImageVersionsClient { + return original.NewGalleryImageVersionsClient(subscriptionID) +} +func NewGalleryImageVersionsClientWithBaseURI(baseURI string, subscriptionID string) GalleryImageVersionsClient { + return original.NewGalleryImageVersionsClientWithBaseURI(baseURI, subscriptionID) +} +func NewGalleryImagesClient(subscriptionID string) GalleryImagesClient { + return original.NewGalleryImagesClient(subscriptionID) +} +func NewGalleryImagesClientWithBaseURI(baseURI string, subscriptionID string) GalleryImagesClient { + return original.NewGalleryImagesClientWithBaseURI(baseURI, subscriptionID) +} +func NewGalleryListIterator(page GalleryListPage) GalleryListIterator { + return original.NewGalleryListIterator(page) +} +func NewGalleryListPage(getNextPage func(context.Context, GalleryList) (GalleryList, error)) GalleryListPage { + return original.NewGalleryListPage(getNextPage) +} +func NewImageListResultIterator(page ImageListResultPage) ImageListResultIterator { + return original.NewImageListResultIterator(page) +} +func NewImageListResultPage(getNextPage func(context.Context, ImageListResult) (ImageListResult, error)) ImageListResultPage { + return original.NewImageListResultPage(getNextPage) +} +func NewImagesClient(subscriptionID string) ImagesClient { + return original.NewImagesClient(subscriptionID) +} +func NewImagesClientWithBaseURI(baseURI string, subscriptionID string) ImagesClient { + return original.NewImagesClientWithBaseURI(baseURI, subscriptionID) +} +func NewListUsagesResultIterator(page ListUsagesResultPage) ListUsagesResultIterator { + return original.NewListUsagesResultIterator(page) +} +func NewListUsagesResultPage(getNextPage func(context.Context, ListUsagesResult) (ListUsagesResult, error)) ListUsagesResultPage { + return original.NewListUsagesResultPage(getNextPage) +} +func NewLogAnalyticsClient(subscriptionID string) LogAnalyticsClient { + return original.NewLogAnalyticsClient(subscriptionID) +} +func NewLogAnalyticsClientWithBaseURI(baseURI string, subscriptionID string) LogAnalyticsClient { + return original.NewLogAnalyticsClientWithBaseURI(baseURI, subscriptionID) +} +func NewOperationsClient(subscriptionID string) OperationsClient { + return original.NewOperationsClient(subscriptionID) +} +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return original.NewOperationsClientWithBaseURI(baseURI, subscriptionID) +} +func NewProximityPlacementGroupListResultIterator(page ProximityPlacementGroupListResultPage) ProximityPlacementGroupListResultIterator { + return original.NewProximityPlacementGroupListResultIterator(page) +} +func NewProximityPlacementGroupListResultPage(getNextPage func(context.Context, ProximityPlacementGroupListResult) (ProximityPlacementGroupListResult, error)) ProximityPlacementGroupListResultPage { + return original.NewProximityPlacementGroupListResultPage(getNextPage) +} +func NewProximityPlacementGroupsClient(subscriptionID string) ProximityPlacementGroupsClient { + return original.NewProximityPlacementGroupsClient(subscriptionID) +} +func NewProximityPlacementGroupsClientWithBaseURI(baseURI string, subscriptionID string) ProximityPlacementGroupsClient { + return original.NewProximityPlacementGroupsClientWithBaseURI(baseURI, subscriptionID) +} +func NewResourceSkusClient(subscriptionID string) ResourceSkusClient { + return original.NewResourceSkusClient(subscriptionID) +} +func NewResourceSkusClientWithBaseURI(baseURI string, subscriptionID string) ResourceSkusClient { + return original.NewResourceSkusClientWithBaseURI(baseURI, subscriptionID) +} +func NewResourceSkusResultIterator(page ResourceSkusResultPage) ResourceSkusResultIterator { + return original.NewResourceSkusResultIterator(page) +} +func NewResourceSkusResultPage(getNextPage func(context.Context, ResourceSkusResult) (ResourceSkusResult, error)) ResourceSkusResultPage { + return original.NewResourceSkusResultPage(getNextPage) +} +func NewRunCommandListResultIterator(page RunCommandListResultPage) RunCommandListResultIterator { + return original.NewRunCommandListResultIterator(page) +} +func NewRunCommandListResultPage(getNextPage func(context.Context, RunCommandListResult) (RunCommandListResult, error)) RunCommandListResultPage { + return original.NewRunCommandListResultPage(getNextPage) +} +func NewSnapshotListIterator(page SnapshotListPage) SnapshotListIterator { + return original.NewSnapshotListIterator(page) +} +func NewSnapshotListPage(getNextPage func(context.Context, SnapshotList) (SnapshotList, error)) SnapshotListPage { + return original.NewSnapshotListPage(getNextPage) +} +func NewSnapshotsClient(subscriptionID string) SnapshotsClient { + return original.NewSnapshotsClient(subscriptionID) +} +func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) SnapshotsClient { + return original.NewSnapshotsClientWithBaseURI(baseURI, subscriptionID) +} +func NewUsageClient(subscriptionID string) UsageClient { + return original.NewUsageClient(subscriptionID) +} +func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient { + return original.NewUsageClientWithBaseURI(baseURI, subscriptionID) +} +func NewVirtualMachineExtensionImagesClient(subscriptionID string) VirtualMachineExtensionImagesClient { + return original.NewVirtualMachineExtensionImagesClient(subscriptionID) +} +func NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionImagesClient { + return original.NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI, subscriptionID) +} +func NewVirtualMachineExtensionsClient(subscriptionID string) VirtualMachineExtensionsClient { + return original.NewVirtualMachineExtensionsClient(subscriptionID) +} +func NewVirtualMachineExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionsClient { + return original.NewVirtualMachineExtensionsClientWithBaseURI(baseURI, subscriptionID) +} +func NewVirtualMachineImagesClient(subscriptionID string) VirtualMachineImagesClient { + return original.NewVirtualMachineImagesClient(subscriptionID) +} +func NewVirtualMachineImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineImagesClient { + return original.NewVirtualMachineImagesClientWithBaseURI(baseURI, subscriptionID) +} +func NewVirtualMachineListResultIterator(page VirtualMachineListResultPage) VirtualMachineListResultIterator { + return original.NewVirtualMachineListResultIterator(page) +} +func NewVirtualMachineListResultPage(getNextPage func(context.Context, VirtualMachineListResult) (VirtualMachineListResult, error)) VirtualMachineListResultPage { + return original.NewVirtualMachineListResultPage(getNextPage) +} +func NewVirtualMachineRunCommandsClient(subscriptionID string) VirtualMachineRunCommandsClient { + return original.NewVirtualMachineRunCommandsClient(subscriptionID) +} +func NewVirtualMachineRunCommandsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineRunCommandsClient { + return original.NewVirtualMachineRunCommandsClientWithBaseURI(baseURI, subscriptionID) +} +func NewVirtualMachineScaleSetExtensionListResultIterator(page VirtualMachineScaleSetExtensionListResultPage) VirtualMachineScaleSetExtensionListResultIterator { + return original.NewVirtualMachineScaleSetExtensionListResultIterator(page) +} +func NewVirtualMachineScaleSetExtensionListResultPage(getNextPage func(context.Context, VirtualMachineScaleSetExtensionListResult) (VirtualMachineScaleSetExtensionListResult, error)) VirtualMachineScaleSetExtensionListResultPage { + return original.NewVirtualMachineScaleSetExtensionListResultPage(getNextPage) +} +func NewVirtualMachineScaleSetExtensionsClient(subscriptionID string) VirtualMachineScaleSetExtensionsClient { + return original.NewVirtualMachineScaleSetExtensionsClient(subscriptionID) +} +func NewVirtualMachineScaleSetExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetExtensionsClient { + return original.NewVirtualMachineScaleSetExtensionsClientWithBaseURI(baseURI, subscriptionID) +} +func NewVirtualMachineScaleSetListOSUpgradeHistoryIterator(page VirtualMachineScaleSetListOSUpgradeHistoryPage) VirtualMachineScaleSetListOSUpgradeHistoryIterator { + return original.NewVirtualMachineScaleSetListOSUpgradeHistoryIterator(page) +} +func NewVirtualMachineScaleSetListOSUpgradeHistoryPage(getNextPage func(context.Context, VirtualMachineScaleSetListOSUpgradeHistory) (VirtualMachineScaleSetListOSUpgradeHistory, error)) VirtualMachineScaleSetListOSUpgradeHistoryPage { + return original.NewVirtualMachineScaleSetListOSUpgradeHistoryPage(getNextPage) +} +func NewVirtualMachineScaleSetListResultIterator(page VirtualMachineScaleSetListResultPage) VirtualMachineScaleSetListResultIterator { + return original.NewVirtualMachineScaleSetListResultIterator(page) +} +func NewVirtualMachineScaleSetListResultPage(getNextPage func(context.Context, VirtualMachineScaleSetListResult) (VirtualMachineScaleSetListResult, error)) VirtualMachineScaleSetListResultPage { + return original.NewVirtualMachineScaleSetListResultPage(getNextPage) +} +func NewVirtualMachineScaleSetListSkusResultIterator(page VirtualMachineScaleSetListSkusResultPage) VirtualMachineScaleSetListSkusResultIterator { + return original.NewVirtualMachineScaleSetListSkusResultIterator(page) +} +func NewVirtualMachineScaleSetListSkusResultPage(getNextPage func(context.Context, VirtualMachineScaleSetListSkusResult) (VirtualMachineScaleSetListSkusResult, error)) VirtualMachineScaleSetListSkusResultPage { + return original.NewVirtualMachineScaleSetListSkusResultPage(getNextPage) +} +func NewVirtualMachineScaleSetListWithLinkResultIterator(page VirtualMachineScaleSetListWithLinkResultPage) VirtualMachineScaleSetListWithLinkResultIterator { + return original.NewVirtualMachineScaleSetListWithLinkResultIterator(page) +} +func NewVirtualMachineScaleSetListWithLinkResultPage(getNextPage func(context.Context, VirtualMachineScaleSetListWithLinkResult) (VirtualMachineScaleSetListWithLinkResult, error)) VirtualMachineScaleSetListWithLinkResultPage { + return original.NewVirtualMachineScaleSetListWithLinkResultPage(getNextPage) +} +func NewVirtualMachineScaleSetRollingUpgradesClient(subscriptionID string) VirtualMachineScaleSetRollingUpgradesClient { + return original.NewVirtualMachineScaleSetRollingUpgradesClient(subscriptionID) +} +func NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetRollingUpgradesClient { + return original.NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI(baseURI, subscriptionID) +} +func NewVirtualMachineScaleSetVMListResultIterator(page VirtualMachineScaleSetVMListResultPage) VirtualMachineScaleSetVMListResultIterator { + return original.NewVirtualMachineScaleSetVMListResultIterator(page) +} +func NewVirtualMachineScaleSetVMListResultPage(getNextPage func(context.Context, VirtualMachineScaleSetVMListResult) (VirtualMachineScaleSetVMListResult, error)) VirtualMachineScaleSetVMListResultPage { + return original.NewVirtualMachineScaleSetVMListResultPage(getNextPage) +} +func NewVirtualMachineScaleSetVMsClient(subscriptionID string) VirtualMachineScaleSetVMsClient { + return original.NewVirtualMachineScaleSetVMsClient(subscriptionID) +} +func NewVirtualMachineScaleSetVMsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetVMsClient { + return original.NewVirtualMachineScaleSetVMsClientWithBaseURI(baseURI, subscriptionID) +} +func NewVirtualMachineScaleSetsClient(subscriptionID string) VirtualMachineScaleSetsClient { + return original.NewVirtualMachineScaleSetsClient(subscriptionID) +} +func NewVirtualMachineScaleSetsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetsClient { + return original.NewVirtualMachineScaleSetsClientWithBaseURI(baseURI, subscriptionID) +} +func NewVirtualMachineSizesClient(subscriptionID string) VirtualMachineSizesClient { + return original.NewVirtualMachineSizesClient(subscriptionID) +} +func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineSizesClient { + return original.NewVirtualMachineSizesClientWithBaseURI(baseURI, subscriptionID) +} +func NewVirtualMachinesClient(subscriptionID string) VirtualMachinesClient { + return original.NewVirtualMachinesClient(subscriptionID) +} +func NewVirtualMachinesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachinesClient { + return original.NewVirtualMachinesClientWithBaseURI(baseURI, subscriptionID) +} +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return original.NewWithBaseURI(baseURI, subscriptionID) +} +func PossibleAccessLevelValues() []AccessLevel { + return original.PossibleAccessLevelValues() +} +func PossibleAggregatedReplicationStateValues() []AggregatedReplicationState { + return original.PossibleAggregatedReplicationStateValues() +} +func PossibleAvailabilitySetSkuTypesValues() []AvailabilitySetSkuTypes { + return original.PossibleAvailabilitySetSkuTypesValues() +} +func PossibleCachingTypesValues() []CachingTypes { + return original.PossibleCachingTypesValues() +} +func PossibleComponentNamesValues() []ComponentNames { + return original.PossibleComponentNamesValues() +} +func PossibleContainerServiceOrchestratorTypesValues() []ContainerServiceOrchestratorTypes { + return original.PossibleContainerServiceOrchestratorTypesValues() +} +func PossibleContainerServiceVMSizeTypesValues() []ContainerServiceVMSizeTypes { + return original.PossibleContainerServiceVMSizeTypesValues() +} +func PossibleDiffDiskOptionsValues() []DiffDiskOptions { + return original.PossibleDiffDiskOptionsValues() +} +func PossibleDiskCreateOptionTypesValues() []DiskCreateOptionTypes { + return original.PossibleDiskCreateOptionTypesValues() +} +func PossibleDiskCreateOptionValues() []DiskCreateOption { + return original.PossibleDiskCreateOptionValues() +} +func PossibleDiskStateValues() []DiskState { + return original.PossibleDiskStateValues() +} +func PossibleDiskStorageAccountTypesValues() []DiskStorageAccountTypes { + return original.PossibleDiskStorageAccountTypesValues() +} +func PossibleHostCachingValues() []HostCaching { + return original.PossibleHostCachingValues() +} +func PossibleHyperVGenerationTypesValues() []HyperVGenerationTypes { + return original.PossibleHyperVGenerationTypesValues() +} +func PossibleHyperVGenerationValues() []HyperVGeneration { + return original.PossibleHyperVGenerationValues() +} +func PossibleIPVersionValues() []IPVersion { + return original.PossibleIPVersionValues() +} +func PossibleInstanceViewTypesValues() []InstanceViewTypes { + return original.PossibleInstanceViewTypesValues() +} +func PossibleIntervalInMinsValues() []IntervalInMins { + return original.PossibleIntervalInMinsValues() +} +func PossibleMaintenanceOperationResultCodeTypesValues() []MaintenanceOperationResultCodeTypes { + return original.PossibleMaintenanceOperationResultCodeTypesValues() +} +func PossibleOperatingSystemStateTypesValues() []OperatingSystemStateTypes { + return original.PossibleOperatingSystemStateTypesValues() +} +func PossibleOperatingSystemTypesValues() []OperatingSystemTypes { + return original.PossibleOperatingSystemTypesValues() +} +func PossiblePassNamesValues() []PassNames { + return original.PossiblePassNamesValues() +} +func PossibleProtocolTypesValues() []ProtocolTypes { + return original.PossibleProtocolTypesValues() +} +func PossibleProvisioningState1Values() []ProvisioningState1 { + return original.PossibleProvisioningState1Values() +} +func PossibleProvisioningState2Values() []ProvisioningState2 { + return original.PossibleProvisioningState2Values() +} +func PossibleProvisioningStateValues() []ProvisioningState { + return original.PossibleProvisioningStateValues() +} +func PossibleProximityPlacementGroupTypeValues() []ProximityPlacementGroupType { + return original.PossibleProximityPlacementGroupTypeValues() +} +func PossibleReplicationStateValues() []ReplicationState { + return original.PossibleReplicationStateValues() +} +func PossibleReplicationStatusTypesValues() []ReplicationStatusTypes { + return original.PossibleReplicationStatusTypesValues() +} +func PossibleResourceIdentityTypeValues() []ResourceIdentityType { + return original.PossibleResourceIdentityTypeValues() +} +func PossibleResourceSkuCapacityScaleTypeValues() []ResourceSkuCapacityScaleType { + return original.PossibleResourceSkuCapacityScaleTypeValues() +} +func PossibleResourceSkuRestrictionsReasonCodeValues() []ResourceSkuRestrictionsReasonCode { + return original.PossibleResourceSkuRestrictionsReasonCodeValues() +} +func PossibleResourceSkuRestrictionsTypeValues() []ResourceSkuRestrictionsType { + return original.PossibleResourceSkuRestrictionsTypeValues() +} +func PossibleRollingUpgradeActionTypeValues() []RollingUpgradeActionType { + return original.PossibleRollingUpgradeActionTypeValues() +} +func PossibleRollingUpgradeStatusCodeValues() []RollingUpgradeStatusCode { + return original.PossibleRollingUpgradeStatusCodeValues() +} +func PossibleSettingNamesValues() []SettingNames { + return original.PossibleSettingNamesValues() +} +func PossibleSnapshotStorageAccountTypesValues() []SnapshotStorageAccountTypes { + return original.PossibleSnapshotStorageAccountTypesValues() +} +func PossibleStatusLevelTypesValues() []StatusLevelTypes { + return original.PossibleStatusLevelTypesValues() +} +func PossibleStorageAccountTypeValues() []StorageAccountType { + return original.PossibleStorageAccountTypeValues() +} +func PossibleStorageAccountTypesValues() []StorageAccountTypes { + return original.PossibleStorageAccountTypesValues() +} +func PossibleUpgradeModeValues() []UpgradeMode { + return original.PossibleUpgradeModeValues() +} +func PossibleUpgradeOperationInvokerValues() []UpgradeOperationInvoker { + return original.PossibleUpgradeOperationInvokerValues() +} +func PossibleUpgradeStateValues() []UpgradeState { + return original.PossibleUpgradeStateValues() +} +func PossibleVirtualMachineEvictionPolicyTypesValues() []VirtualMachineEvictionPolicyTypes { + return original.PossibleVirtualMachineEvictionPolicyTypesValues() +} +func PossibleVirtualMachinePriorityTypesValues() []VirtualMachinePriorityTypes { + return original.PossibleVirtualMachinePriorityTypesValues() +} +func PossibleVirtualMachineScaleSetSkuScaleTypeValues() []VirtualMachineScaleSetSkuScaleType { + return original.PossibleVirtualMachineScaleSetSkuScaleTypeValues() +} +func PossibleVirtualMachineSizeTypesValues() []VirtualMachineSizeTypes { + return original.PossibleVirtualMachineSizeTypesValues() +} +func UserAgent() string { + return original.UserAgent() + " profiles/latest" +} +func Version() string { + return original.Version() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/computeapi/interfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/computeapi/interfaces.go new file mode 100644 index 000000000..668736597 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/computeapi/interfaces.go @@ -0,0 +1,299 @@ +package computeapi + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" + "github.com/Azure/go-autorest/autorest" +) + +// OperationsClientAPI contains the set of methods on the OperationsClient type. +type OperationsClientAPI interface { + List(ctx context.Context) (result compute.OperationListResult, err error) +} + +var _ OperationsClientAPI = (*compute.OperationsClient)(nil) + +// AvailabilitySetsClientAPI contains the set of methods on the AvailabilitySetsClient type. +type AvailabilitySetsClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters compute.AvailabilitySet) (result compute.AvailabilitySet, err error) + Delete(ctx context.Context, resourceGroupName string, availabilitySetName string) (result autorest.Response, err error) + Get(ctx context.Context, resourceGroupName string, availabilitySetName string) (result compute.AvailabilitySet, err error) + List(ctx context.Context, resourceGroupName string) (result compute.AvailabilitySetListResultPage, err error) + ListAvailableSizes(ctx context.Context, resourceGroupName string, availabilitySetName string) (result compute.VirtualMachineSizeListResult, err error) + ListBySubscription(ctx context.Context) (result compute.AvailabilitySetListResultPage, err error) + Update(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters compute.AvailabilitySetUpdate) (result compute.AvailabilitySet, err error) +} + +var _ AvailabilitySetsClientAPI = (*compute.AvailabilitySetsClient)(nil) + +// ProximityPlacementGroupsClientAPI contains the set of methods on the ProximityPlacementGroupsClient type. +type ProximityPlacementGroupsClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters compute.ProximityPlacementGroup) (result compute.ProximityPlacementGroup, err error) + Delete(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string) (result autorest.Response, err error) + Get(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string) (result compute.ProximityPlacementGroup, err error) + ListByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.ProximityPlacementGroupListResultPage, err error) + ListBySubscription(ctx context.Context) (result compute.ProximityPlacementGroupListResultPage, err error) + Update(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters compute.ProximityPlacementGroupUpdate) (result compute.ProximityPlacementGroup, err error) +} + +var _ ProximityPlacementGroupsClientAPI = (*compute.ProximityPlacementGroupsClient)(nil) + +// VirtualMachineExtensionImagesClientAPI contains the set of methods on the VirtualMachineExtensionImagesClient type. +type VirtualMachineExtensionImagesClientAPI interface { + Get(ctx context.Context, location string, publisherName string, typeParameter string, version string) (result compute.VirtualMachineExtensionImage, err error) + ListTypes(ctx context.Context, location string, publisherName string) (result compute.ListVirtualMachineExtensionImage, err error) + ListVersions(ctx context.Context, location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (result compute.ListVirtualMachineExtensionImage, err error) +} + +var _ VirtualMachineExtensionImagesClientAPI = (*compute.VirtualMachineExtensionImagesClient)(nil) + +// VirtualMachineExtensionsClientAPI contains the set of methods on the VirtualMachineExtensionsClient type. +type VirtualMachineExtensionsClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, extensionParameters compute.VirtualMachineExtension) (result compute.VirtualMachineExtensionsCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string) (result compute.VirtualMachineExtensionsDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, expand string) (result compute.VirtualMachineExtension, err error) + List(ctx context.Context, resourceGroupName string, VMName string, expand string) (result compute.VirtualMachineExtensionsListResult, err error) + Update(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, extensionParameters compute.VirtualMachineExtensionUpdate) (result compute.VirtualMachineExtensionsUpdateFuture, err error) +} + +var _ VirtualMachineExtensionsClientAPI = (*compute.VirtualMachineExtensionsClient)(nil) + +// VirtualMachineImagesClientAPI contains the set of methods on the VirtualMachineImagesClient type. +type VirtualMachineImagesClientAPI interface { + Get(ctx context.Context, location string, publisherName string, offer string, skus string, version string) (result compute.VirtualMachineImage, err error) + List(ctx context.Context, location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (result compute.ListVirtualMachineImageResource, err error) + ListOffers(ctx context.Context, location string, publisherName string) (result compute.ListVirtualMachineImageResource, err error) + ListPublishers(ctx context.Context, location string) (result compute.ListVirtualMachineImageResource, err error) + ListSkus(ctx context.Context, location string, publisherName string, offer string) (result compute.ListVirtualMachineImageResource, err error) +} + +var _ VirtualMachineImagesClientAPI = (*compute.VirtualMachineImagesClient)(nil) + +// UsageClientAPI contains the set of methods on the UsageClient type. +type UsageClientAPI interface { + List(ctx context.Context, location string) (result compute.ListUsagesResultPage, err error) +} + +var _ UsageClientAPI = (*compute.UsageClient)(nil) + +// VirtualMachinesClientAPI contains the set of methods on the VirtualMachinesClient type. +type VirtualMachinesClientAPI interface { + Capture(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineCaptureParameters) (result compute.VirtualMachinesCaptureFuture, err error) + ConvertToManagedDisks(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesConvertToManagedDisksFuture, err error) + CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (result compute.VirtualMachinesCreateOrUpdateFuture, err error) + Deallocate(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesDeallocateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesDeleteFuture, err error) + Generalize(ctx context.Context, resourceGroupName string, VMName string) (result autorest.Response, err error) + Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) + InstanceView(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachineInstanceView, err error) + List(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultPage, err error) + ListAll(ctx context.Context) (result compute.VirtualMachineListResultPage, err error) + ListAvailableSizes(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachineSizeListResult, err error) + ListByLocation(ctx context.Context, location string) (result compute.VirtualMachineListResultPage, err error) + PerformMaintenance(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesPerformMaintenanceFuture, err error) + PowerOff(ctx context.Context, resourceGroupName string, VMName string, skipShutdown *bool) (result compute.VirtualMachinesPowerOffFuture, err error) + Redeploy(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesRedeployFuture, err error) + Reimage(ctx context.Context, resourceGroupName string, VMName string, parameters *compute.VirtualMachineReimageParameters) (result compute.VirtualMachinesReimageFuture, err error) + Restart(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesRestartFuture, err error) + RunCommand(ctx context.Context, resourceGroupName string, VMName string, parameters compute.RunCommandInput) (result compute.VirtualMachinesRunCommandFuture, err error) + Start(ctx context.Context, resourceGroupName string, VMName string) (result compute.VirtualMachinesStartFuture, err error) + Update(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate) (result compute.VirtualMachinesUpdateFuture, err error) +} + +var _ VirtualMachinesClientAPI = (*compute.VirtualMachinesClient)(nil) + +// VirtualMachineSizesClientAPI contains the set of methods on the VirtualMachineSizesClient type. +type VirtualMachineSizesClientAPI interface { + List(ctx context.Context, location string) (result compute.VirtualMachineSizeListResult, err error) +} + +var _ VirtualMachineSizesClientAPI = (*compute.VirtualMachineSizesClient)(nil) + +// ImagesClientAPI contains the set of methods on the ImagesClient type. +type ImagesClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, imageName string, parameters compute.Image) (result compute.ImagesCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, imageName string) (result compute.ImagesDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, imageName string, expand string) (result compute.Image, err error) + List(ctx context.Context) (result compute.ImageListResultPage, err error) + ListByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.ImageListResultPage, err error) + Update(ctx context.Context, resourceGroupName string, imageName string, parameters compute.ImageUpdate) (result compute.ImagesUpdateFuture, err error) +} + +var _ ImagesClientAPI = (*compute.ImagesClient)(nil) + +// VirtualMachineScaleSetsClientAPI contains the set of methods on the VirtualMachineScaleSetsClient type. +type VirtualMachineScaleSetsClientAPI interface { + ConvertToSinglePlacementGroup(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VMScaleSetConvertToSinglePlacementGroupInput) (result autorest.Response, err error) + CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) (result compute.VirtualMachineScaleSetsCreateOrUpdateFuture, err error) + Deallocate(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs) (result compute.VirtualMachineScaleSetsDeallocateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetsDeleteFuture, err error) + DeleteInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (result compute.VirtualMachineScaleSetsDeleteInstancesFuture, err error) + ForceRecoveryServiceFabricPlatformUpdateDomainWalk(ctx context.Context, resourceGroupName string, VMScaleSetName string, platformUpdateDomain int32) (result compute.RecoveryWalkResponse, err error) + Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) + GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetInstanceView, err error) + GetOSUpgradeHistory(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetListOSUpgradeHistoryPage, err error) + List(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineScaleSetListResultPage, err error) + ListAll(ctx context.Context) (result compute.VirtualMachineScaleSetListWithLinkResultPage, err error) + ListSkus(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetListSkusResultPage, err error) + PerformMaintenance(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs) (result compute.VirtualMachineScaleSetsPerformMaintenanceFuture, err error) + PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs, skipShutdown *bool) (result compute.VirtualMachineScaleSetsPowerOffFuture, err error) + Redeploy(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs) (result compute.VirtualMachineScaleSetsRedeployFuture, err error) + Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMScaleSetReimageInput *compute.VirtualMachineScaleSetReimageParameters) (result compute.VirtualMachineScaleSetsReimageFuture, err error) + ReimageAll(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs) (result compute.VirtualMachineScaleSetsReimageAllFuture, err error) + Restart(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs) (result compute.VirtualMachineScaleSetsRestartFuture, err error) + Start(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *compute.VirtualMachineScaleSetVMInstanceIDs) (result compute.VirtualMachineScaleSetsStartFuture, err error) + Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSetUpdate) (result compute.VirtualMachineScaleSetsUpdateFuture, err error) + UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (result compute.VirtualMachineScaleSetsUpdateInstancesFuture, err error) +} + +var _ VirtualMachineScaleSetsClientAPI = (*compute.VirtualMachineScaleSetsClient)(nil) + +// VirtualMachineScaleSetExtensionsClientAPI contains the set of methods on the VirtualMachineScaleSetExtensionsClient type. +type VirtualMachineScaleSetExtensionsClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, extensionParameters compute.VirtualMachineScaleSetExtension) (result compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string) (result compute.VirtualMachineScaleSetExtensionsDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, expand string) (result compute.VirtualMachineScaleSetExtension, err error) + List(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetExtensionListResultPage, err error) +} + +var _ VirtualMachineScaleSetExtensionsClientAPI = (*compute.VirtualMachineScaleSetExtensionsClient)(nil) + +// VirtualMachineScaleSetRollingUpgradesClientAPI contains the set of methods on the VirtualMachineScaleSetRollingUpgradesClient type. +type VirtualMachineScaleSetRollingUpgradesClientAPI interface { + Cancel(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetRollingUpgradesCancelFuture, err error) + GetLatest(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.RollingUpgradeStatusInfo, err error) + StartExtensionUpgrade(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture, err error) + StartOSUpgrade(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture, err error) +} + +var _ VirtualMachineScaleSetRollingUpgradesClientAPI = (*compute.VirtualMachineScaleSetRollingUpgradesClient)(nil) + +// VirtualMachineScaleSetVMsClientAPI contains the set of methods on the VirtualMachineScaleSetVMsClient type. +type VirtualMachineScaleSetVMsClientAPI interface { + Deallocate(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsDeallocateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) + GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) + List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResultPage, err error) + PerformMaintenance(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture, err error) + PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, skipShutdown *bool) (result compute.VirtualMachineScaleSetVMsPowerOffFuture, err error) + Redeploy(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsRedeployFuture, err error) + Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMScaleSetVMReimageInput *compute.VirtualMachineScaleSetVMReimageParameters) (result compute.VirtualMachineScaleSetVMsReimageFuture, err error) + ReimageAll(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsReimageAllFuture, err error) + Restart(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsRestartFuture, err error) + RunCommand(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.RunCommandInput) (result compute.VirtualMachineScaleSetVMsRunCommandFuture, err error) + Start(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsStartFuture, err error) + Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) (result compute.VirtualMachineScaleSetVMsUpdateFuture, err error) +} + +var _ VirtualMachineScaleSetVMsClientAPI = (*compute.VirtualMachineScaleSetVMsClient)(nil) + +// LogAnalyticsClientAPI contains the set of methods on the LogAnalyticsClient type. +type LogAnalyticsClientAPI interface { + ExportRequestRateByInterval(ctx context.Context, parameters compute.RequestRateByIntervalInput, location string) (result compute.LogAnalyticsExportRequestRateByIntervalFuture, err error) + ExportThrottledRequests(ctx context.Context, parameters compute.ThrottledRequestsInput, location string) (result compute.LogAnalyticsExportThrottledRequestsFuture, err error) +} + +var _ LogAnalyticsClientAPI = (*compute.LogAnalyticsClient)(nil) + +// VirtualMachineRunCommandsClientAPI contains the set of methods on the VirtualMachineRunCommandsClient type. +type VirtualMachineRunCommandsClientAPI interface { + Get(ctx context.Context, location string, commandID string) (result compute.RunCommandDocument, err error) + List(ctx context.Context, location string) (result compute.RunCommandListResultPage, err error) +} + +var _ VirtualMachineRunCommandsClientAPI = (*compute.VirtualMachineRunCommandsClient)(nil) + +// ResourceSkusClientAPI contains the set of methods on the ResourceSkusClient type. +type ResourceSkusClientAPI interface { + List(ctx context.Context) (result compute.ResourceSkusResultPage, err error) +} + +var _ ResourceSkusClientAPI = (*compute.ResourceSkusClient)(nil) + +// DisksClientAPI contains the set of methods on the DisksClient type. +type DisksClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, disk compute.Disk) (result compute.DisksCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, diskName string) (result compute.DisksDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, err error) + GrantAccess(ctx context.Context, resourceGroupName string, diskName string, grantAccessData compute.GrantAccessData) (result compute.DisksGrantAccessFuture, err error) + List(ctx context.Context) (result compute.DiskListPage, err error) + ListByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.DiskListPage, err error) + RevokeAccess(ctx context.Context, resourceGroupName string, diskName string) (result compute.DisksRevokeAccessFuture, err error) + Update(ctx context.Context, resourceGroupName string, diskName string, disk compute.DiskUpdate) (result compute.DisksUpdateFuture, err error) +} + +var _ DisksClientAPI = (*compute.DisksClient)(nil) + +// SnapshotsClientAPI contains the set of methods on the SnapshotsClient type. +type SnapshotsClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot compute.Snapshot) (result compute.SnapshotsCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, snapshotName string) (result compute.SnapshotsDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, snapshotName string) (result compute.Snapshot, err error) + GrantAccess(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData compute.GrantAccessData) (result compute.SnapshotsGrantAccessFuture, err error) + List(ctx context.Context) (result compute.SnapshotListPage, err error) + ListByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.SnapshotListPage, err error) + RevokeAccess(ctx context.Context, resourceGroupName string, snapshotName string) (result compute.SnapshotsRevokeAccessFuture, err error) + Update(ctx context.Context, resourceGroupName string, snapshotName string, snapshot compute.SnapshotUpdate) (result compute.SnapshotsUpdateFuture, err error) +} + +var _ SnapshotsClientAPI = (*compute.SnapshotsClient)(nil) + +// GalleriesClientAPI contains the set of methods on the GalleriesClient type. +type GalleriesClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, gallery compute.Gallery) (result compute.GalleriesCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, galleryName string) (result compute.GalleriesDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, galleryName string) (result compute.Gallery, err error) + List(ctx context.Context) (result compute.GalleryListPage, err error) + ListByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.GalleryListPage, err error) +} + +var _ GalleriesClientAPI = (*compute.GalleriesClient)(nil) + +// GalleryImagesClientAPI contains the set of methods on the GalleryImagesClient type. +type GalleryImagesClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage compute.GalleryImage) (result compute.GalleryImagesCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result compute.GalleryImagesDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result compute.GalleryImage, err error) + ListByGallery(ctx context.Context, resourceGroupName string, galleryName string) (result compute.GalleryImageListPage, err error) +} + +var _ GalleryImagesClientAPI = (*compute.GalleryImagesClient)(nil) + +// GalleryImageVersionsClientAPI contains the set of methods on the GalleryImageVersionsClient type. +type GalleryImageVersionsClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion compute.GalleryImageVersion) (result compute.GalleryImageVersionsCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string) (result compute.GalleryImageVersionsDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, expand compute.ReplicationStatusTypes) (result compute.GalleryImageVersion, err error) + ListByGalleryImage(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result compute.GalleryImageVersionListPage, err error) +} + +var _ GalleryImageVersionsClientAPI = (*compute.GalleryImageVersionsClient)(nil) + +// ContainerServicesClientAPI contains the set of methods on the ContainerServicesClient type. +type ContainerServicesClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, containerServiceName string, parameters compute.ContainerService) (result compute.ContainerServicesCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, containerServiceName string) (result compute.ContainerServicesDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, containerServiceName string) (result compute.ContainerService, err error) + List(ctx context.Context) (result compute.ContainerServiceListResultPage, err error) + ListByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.ContainerServiceListResultPage, err error) +} + +var _ ContainerServicesClientAPI = (*compute.ContainerServicesClient)(nil) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go new file mode 100644 index 000000000..20855d4ab --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -0,0 +1,712 @@ +package auth + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "unicode/utf16" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure/cli" + "github.com/dimchansky/utfbom" + "golang.org/x/crypto/pkcs12" +) + +// The possible keys in the Values map. +const ( + SubscriptionID = "AZURE_SUBSCRIPTION_ID" + TenantID = "AZURE_TENANT_ID" + ClientID = "AZURE_CLIENT_ID" + ClientSecret = "AZURE_CLIENT_SECRET" + CertificatePath = "AZURE_CERTIFICATE_PATH" + CertificatePassword = "AZURE_CERTIFICATE_PASSWORD" + Username = "AZURE_USERNAME" + Password = "AZURE_PASSWORD" + EnvironmentName = "AZURE_ENVIRONMENT" + Resource = "AZURE_AD_RESOURCE" + ActiveDirectoryEndpoint = "ActiveDirectoryEndpoint" + ResourceManagerEndpoint = "ResourceManagerEndpoint" + GraphResourceID = "GraphResourceID" + SQLManagementEndpoint = "SQLManagementEndpoint" + GalleryEndpoint = "GalleryEndpoint" + ManagementEndpoint = "ManagementEndpoint" +) + +// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) { + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + return settings.GetAuthorizer() +} + +// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) { + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + settings.Values[Resource] = resource + return settings.GetAuthorizer() +} + +// EnvironmentSettings contains the available authentication settings. +type EnvironmentSettings struct { + Values map[string]string + Environment azure.Environment +} + +// GetSettingsFromEnvironment returns the available authentication settings from the environment. +func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) { + s = EnvironmentSettings{ + Values: map[string]string{}, + } + s.setValue(SubscriptionID) + s.setValue(TenantID) + s.setValue(ClientID) + s.setValue(ClientSecret) + s.setValue(CertificatePath) + s.setValue(CertificatePassword) + s.setValue(Username) + s.setValue(Password) + s.setValue(EnvironmentName) + s.setValue(Resource) + if v := s.Values[EnvironmentName]; v == "" { + s.Environment = azure.PublicCloud + } else { + s.Environment, err = azure.EnvironmentFromName(v) + } + if s.Values[Resource] == "" { + s.Values[Resource] = s.Environment.ResourceManagerEndpoint + } + return +} + +// GetSubscriptionID returns the available subscription ID or an empty string. +func (settings EnvironmentSettings) GetSubscriptionID() string { + return settings.Values[SubscriptionID] +} + +// adds the specified environment variable value to the Values map if it exists +func (settings EnvironmentSettings) setValue(key string) { + if v := os.Getenv(key); v != "" { + settings.Values[key] = v + } +} + +// helper to return client and tenant IDs +func (settings EnvironmentSettings) getClientAndTenant() (string, string) { + clientID := settings.Values[ClientID] + tenantID := settings.Values[TenantID] + return clientID, tenantID +} + +// GetClientCredentials creates a config object from the available client credentials. +// An error is returned if no client credentials are available. +func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsConfig, error) { + secret := settings.Values[ClientSecret] + if secret == "" { + return ClientCredentialsConfig{}, errors.New("missing client secret") + } + clientID, tenantID := settings.getClientAndTenant() + config := NewClientCredentialsConfig(clientID, secret, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config, nil +} + +// GetClientCertificate creates a config object from the available certificate credentials. +// An error is returned if no certificate credentials are available. +func (settings EnvironmentSettings) GetClientCertificate() (ClientCertificateConfig, error) { + certPath := settings.Values[CertificatePath] + if certPath == "" { + return ClientCertificateConfig{}, errors.New("missing certificate path") + } + certPwd := settings.Values[CertificatePassword] + clientID, tenantID := settings.getClientAndTenant() + config := NewClientCertificateConfig(certPath, certPwd, clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config, nil +} + +// GetUsernamePassword creates a config object from the available username/password credentials. +// An error is returned if no username/password credentials are available. +func (settings EnvironmentSettings) GetUsernamePassword() (UsernamePasswordConfig, error) { + username := settings.Values[Username] + password := settings.Values[Password] + if username == "" || password == "" { + return UsernamePasswordConfig{}, errors.New("missing username/password") + } + clientID, tenantID := settings.getClientAndTenant() + config := NewUsernamePasswordConfig(username, password, clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config, nil +} + +// GetMSI creates a MSI config object from the available client ID. +func (settings EnvironmentSettings) GetMSI() MSIConfig { + config := NewMSIConfig() + config.Resource = settings.Values[Resource] + config.ClientID = settings.Values[ClientID] + return config +} + +// GetDeviceFlow creates a device-flow config object from the available client and tenant IDs. +func (settings EnvironmentSettings) GetDeviceFlow() DeviceFlowConfig { + clientID, tenantID := settings.getClientAndTenant() + config := NewDeviceFlowConfig(clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config +} + +// GetAuthorizer creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) { + //1.Client Credentials + if c, e := settings.GetClientCredentials(); e == nil { + return c.Authorizer() + } + + //2. Client Certificate + if c, e := settings.GetClientCertificate(); e == nil { + return c.Authorizer() + } + + //3. Username Password + if c, e := settings.GetUsernamePassword(); e == nil { + return c.Authorizer() + } + + // 4. MSI + return settings.GetMSI().Authorizer() +} + +// NewAuthorizerFromFile creates an Authorizer configured from a configuration file in the following order. +// 1. Client credentials +// 2. Client certificate +func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) { + settings, err := GetSettingsFromFile() + if err != nil { + return nil, err + } + if a, err := settings.ClientCredentialsAuthorizer(baseURI); err == nil { + return a, err + } + if a, err := settings.ClientCertificateAuthorizer(baseURI); err == nil { + return a, err + } + return nil, errors.New("auth file missing client and certificate credentials") +} + +// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file in the following order. +// 1. Client credentials +// 2. Client certificate +func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) { + s, err := GetSettingsFromFile() + if err != nil { + return nil, err + } + if a, err := s.ClientCredentialsAuthorizerWithResource(resource); err == nil { + return a, err + } + if a, err := s.ClientCertificateAuthorizerWithResource(resource); err == nil { + return a, err + } + return nil, errors.New("auth file missing client and certificate credentials") +} + +// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. +func NewAuthorizerFromCLI() (autorest.Authorizer, error) { + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + + if settings.Values[Resource] == "" { + settings.Values[Resource] = settings.Environment.ResourceManagerEndpoint + } + + return NewAuthorizerFromCLIWithResource(settings.Values[Resource]) +} + +// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. +func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) { + token, err := cli.GetTokenFromCLI(resource) + if err != nil { + return nil, err + } + + adalToken, err := token.ToADALToken() + if err != nil { + return nil, err + } + + return autorest.NewBearerAuthorizer(&adalToken), nil +} + +// GetSettingsFromFile returns the available authentication settings from an Azure CLI authentication file. +func GetSettingsFromFile() (FileSettings, error) { + s := FileSettings{} + fileLocation := os.Getenv("AZURE_AUTH_LOCATION") + if fileLocation == "" { + return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set") + } + + contents, err := ioutil.ReadFile(fileLocation) + if err != nil { + return s, err + } + + // Auth file might be encoded + decoded, err := decode(contents) + if err != nil { + return s, err + } + + authFile := map[string]interface{}{} + err = json.Unmarshal(decoded, &authFile) + if err != nil { + return s, err + } + + s.Values = map[string]string{} + s.setKeyValue(ClientID, authFile["clientId"]) + s.setKeyValue(ClientSecret, authFile["clientSecret"]) + s.setKeyValue(CertificatePath, authFile["clientCertificate"]) + s.setKeyValue(CertificatePassword, authFile["clientCertificatePassword"]) + s.setKeyValue(SubscriptionID, authFile["subscriptionId"]) + s.setKeyValue(TenantID, authFile["tenantId"]) + s.setKeyValue(ActiveDirectoryEndpoint, authFile["activeDirectoryEndpointUrl"]) + s.setKeyValue(ResourceManagerEndpoint, authFile["resourceManagerEndpointUrl"]) + s.setKeyValue(GraphResourceID, authFile["activeDirectoryGraphResourceId"]) + s.setKeyValue(SQLManagementEndpoint, authFile["sqlManagementEndpointUrl"]) + s.setKeyValue(GalleryEndpoint, authFile["galleryEndpointUrl"]) + s.setKeyValue(ManagementEndpoint, authFile["managementEndpointUrl"]) + return s, nil +} + +// FileSettings contains the available authentication settings. +type FileSettings struct { + Values map[string]string +} + +// GetSubscriptionID returns the available subscription ID or an empty string. +func (settings FileSettings) GetSubscriptionID() string { + return settings.Values[SubscriptionID] +} + +// adds the specified value to the Values map if it isn't nil +func (settings FileSettings) setKeyValue(key string, val interface{}) { + if val != nil { + settings.Values[key] = val.(string) + } +} + +// returns the specified AAD endpoint or the public cloud endpoint if unspecified +func (settings FileSettings) getAADEndpoint() string { + if v, ok := settings.Values[ActiveDirectoryEndpoint]; ok { + return v + } + return azure.PublicCloud.ActiveDirectoryEndpoint +} + +// ServicePrincipalTokenFromClientCredentials creates a ServicePrincipalToken from the available client credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCredentials(baseURI string) (*adal.ServicePrincipalToken, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) +} + +// ClientCredentialsAuthorizer creates an authorizer from the available client credentials. +func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ClientCredentialsAuthorizerWithResource(resource) +} + +// ServicePrincipalTokenFromClientCredentialsWithResource creates a ServicePrincipalToken +// from the available client credentials and the specified resource. +func (settings FileSettings) ServicePrincipalTokenFromClientCredentialsWithResource(resource string) (*adal.ServicePrincipalToken, error) { + if _, ok := settings.Values[ClientSecret]; !ok { + return nil, errors.New("missing client secret") + } + config, err := adal.NewOAuthConfig(settings.getAADEndpoint(), settings.Values[TenantID]) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalToken(*config, settings.Values[ClientID], settings.Values[ClientSecret], resource) +} + +func (settings FileSettings) clientCertificateConfigWithResource(resource string) (ClientCertificateConfig, error) { + if _, ok := settings.Values[CertificatePath]; !ok { + return ClientCertificateConfig{}, errors.New("missing certificate path") + } + cfg := NewClientCertificateConfig(settings.Values[CertificatePath], settings.Values[CertificatePassword], settings.Values[ClientID], settings.Values[TenantID]) + cfg.AADEndpoint = settings.getAADEndpoint() + cfg.Resource = resource + return cfg, nil +} + +// ClientCredentialsAuthorizerWithResource creates an authorizer from the available client credentials and the specified resource. +func (settings FileSettings) ClientCredentialsAuthorizerWithResource(resource string) (autorest.Authorizer, error) { + spToken, err := settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) + if err != nil { + return nil, err + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// ServicePrincipalTokenFromClientCertificate creates a ServicePrincipalToken from the available certificate credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCertificate(baseURI string) (*adal.ServicePrincipalToken, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ServicePrincipalTokenFromClientCertificateWithResource(resource) +} + +// ClientCertificateAuthorizer creates an authorizer from the available certificate credentials. +func (settings FileSettings) ClientCertificateAuthorizer(baseURI string) (autorest.Authorizer, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ClientCertificateAuthorizerWithResource(resource) +} + +// ServicePrincipalTokenFromClientCertificateWithResource creates a ServicePrincipalToken from the available certificate credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCertificateWithResource(resource string) (*adal.ServicePrincipalToken, error) { + cfg, err := settings.clientCertificateConfigWithResource(resource) + if err != nil { + return nil, err + } + return cfg.ServicePrincipalToken() +} + +// ClientCertificateAuthorizerWithResource creates an authorizer from the available certificate credentials and the specified resource. +func (settings FileSettings) ClientCertificateAuthorizerWithResource(resource string) (autorest.Authorizer, error) { + cfg, err := settings.clientCertificateConfigWithResource(resource) + if err != nil { + return nil, err + } + return cfg.Authorizer() +} + +func decode(b []byte) ([]byte, error) { + reader, enc := utfbom.Skip(bytes.NewReader(b)) + + switch enc { + case utfbom.UTF16LittleEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.LittleEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + case utfbom.UTF16BigEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.BigEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + } + return ioutil.ReadAll(reader) +} + +func (settings FileSettings) getResourceForToken(baseURI string) (string, error) { + // Compare dafault base URI from the SDK to the endpoints from the public cloud + // Base URI and token resource are the same string. This func finds the authentication + // file field that matches the SDK base URI. The SDK defines the public cloud + // endpoint as its default base URI + if !strings.HasSuffix(baseURI, "/") { + baseURI += "/" + } + switch baseURI { + case azure.PublicCloud.ServiceManagementEndpoint: + return settings.Values[ManagementEndpoint], nil + case azure.PublicCloud.ResourceManagerEndpoint: + return settings.Values[ResourceManagerEndpoint], nil + case azure.PublicCloud.ActiveDirectoryEndpoint: + return settings.Values[ActiveDirectoryEndpoint], nil + case azure.PublicCloud.GalleryEndpoint: + return settings.Values[GalleryEndpoint], nil + case azure.PublicCloud.GraphEndpoint: + return settings.Values[GraphResourceID], nil + } + return "", fmt.Errorf("auth: base URI not found in endpoints") +} + +// NewClientCredentialsConfig creates an AuthorizerConfig object configured to obtain an Authorizer through Client Credentials. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCredentialsConfig(clientID string, clientSecret string, tenantID string) ClientCredentialsConfig { + return ClientCredentialsConfig{ + ClientID: clientID, + ClientSecret: clientSecret, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewClientCertificateConfig creates a ClientCertificateConfig object configured to obtain an Authorizer through client certificate. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCertificateConfig(certificatePath string, certificatePassword string, clientID string, tenantID string) ClientCertificateConfig { + return ClientCertificateConfig{ + CertificatePath: certificatePath, + CertificatePassword: certificatePassword, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewUsernamePasswordConfig creates an UsernamePasswordConfig object configured to obtain an Authorizer through username and password. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewUsernamePasswordConfig(username string, password string, clientID string, tenantID string) UsernamePasswordConfig { + return UsernamePasswordConfig{ + Username: username, + Password: password, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewMSIConfig creates an MSIConfig object configured to obtain an Authorizer through MSI. +func NewMSIConfig() MSIConfig { + return MSIConfig{ + Resource: azure.PublicCloud.ResourceManagerEndpoint, + } +} + +// NewDeviceFlowConfig creates a DeviceFlowConfig object configured to obtain an Authorizer through device flow. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewDeviceFlowConfig(clientID string, tenantID string) DeviceFlowConfig { + return DeviceFlowConfig{ + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +//AuthorizerConfig provides an authorizer from the configuration provided. +type AuthorizerConfig interface { + Authorizer() (autorest.Authorizer, error) +} + +// ClientCredentialsConfig provides the options to get a bearer authorizer from client credentials. +type ClientCredentialsConfig struct { + ClientID string + ClientSecret string + TenantID string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from client credentials. +func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) +} + +// Authorizer gets the authorizer from client credentials. +func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := ccc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from client credentials: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate. +type ClientCertificateConfig struct { + ClientID string + CertificatePath string + CertificatePassword string + TenantID string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from client certificate. +func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + if err != nil { + return nil, err + } + certData, err := ioutil.ReadFile(ccc.CertificatePath) + if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) + } + certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword) + if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) + } + return adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource) +} + +// Authorizer gets an authorizer object from client certificate. +func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := ccc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// DeviceFlowConfig provides the options to get a bearer authorizer using device flow authentication. +type DeviceFlowConfig struct { + ClientID string + TenantID string + AADEndpoint string + Resource string +} + +// Authorizer gets the authorizer from device flow. +func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := dfc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// ServicePrincipalToken gets the service principal token from device flow. +func (dfc DeviceFlowConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID) + if err != nil { + return nil, err + } + oauthClient := &autorest.Client{} + deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to start device auth flow: %s", err) + } + log.Println(*deviceCode.Message) + token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) + if err != nil { + return nil, fmt.Errorf("failed to finish device auth flow: %s", err) + } + return adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token) +} + +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, err + } + + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") + } + + return certificate, rsaPrivateKey, nil +} + +// UsernamePasswordConfig provides the options to get a bearer authorizer from a username and a password. +type UsernamePasswordConfig struct { + ClientID string + Username string + Password string + TenantID string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from username and password. +func (ups UsernamePasswordConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource) +} + +// Authorizer gets the authorizer from a username and a password. +func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := ups.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// MSIConfig provides the options to get a bearer authorizer through MSI. +type MSIConfig struct { + Resource string + ClientID string +} + +// Authorizer gets the authorizer from MSI. +func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) { + msiEndpoint, err := adal.GetMSIVMEndpoint() + if err != nil { + return nil, err + } + + var spToken *adal.ServicePrincipalToken + if mc.ClientID == "" { + spToken, err = adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err) + } + } else { + spToken, err = adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, mc.Resource, mc.ClientID) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from MSI for user assigned identity: %v", err) + } + } + + return autorest.NewBearerAuthorizer(spToken), nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go new file mode 100644 index 000000000..a336b958d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go @@ -0,0 +1,79 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/dimchansky/utfbom" + "github.com/mitchellh/go-homedir" +) + +// Profile represents a Profile from the Azure CLI +type Profile struct { + InstallationID string `json:"installationId"` + Subscriptions []Subscription `json:"subscriptions"` +} + +// Subscription represents a Subscription from the Azure CLI +type Subscription struct { + EnvironmentName string `json:"environmentName"` + ID string `json:"id"` + IsDefault bool `json:"isDefault"` + Name string `json:"name"` + State string `json:"state"` + TenantID string `json:"tenantId"` + User *User `json:"user"` +} + +// User represents a User from the Azure CLI +type User struct { + Name string `json:"name"` + Type string `json:"type"` +} + +const azureProfileJSON = "azureProfile.json" + +// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI +func ProfilePath() (string, error) { + if cfgDir := os.Getenv("AZURE_CONFIG_DIR"); cfgDir != "" { + return filepath.Join(cfgDir, azureProfileJSON), nil + } + return homedir.Expand("~/.azure/" + azureProfileJSON) +} + +// LoadProfile restores a Profile object from a file located at 'path'. +func LoadProfile(path string) (result Profile, err error) { + var contents []byte + contents, err = ioutil.ReadFile(path) + if err != nil { + err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + return + } + reader := utfbom.SkipOnly(bytes.NewReader(contents)) + + dec := json.NewDecoder(reader) + if err = dec.Decode(&result); err != nil { + err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err) + return + } + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go new file mode 100644 index 000000000..810075ba6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go @@ -0,0 +1,170 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "regexp" + "runtime" + "strconv" + "time" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/date" + "github.com/mitchellh/go-homedir" +) + +// Token represents an AccessToken from the Azure CLI +type Token struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` +} + +// ToADALToken converts an Azure CLI `Token`` to an `adal.Token`` +func (t Token) ToADALToken() (converted adal.Token, err error) { + tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn) + if err != nil { + err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) + return + } + + difference := tokenExpirationDate.Sub(date.UnixEpoch()) + + converted = adal.Token{ + AccessToken: t.AccessToken, + Type: t.TokenType, + ExpiresIn: "3600", + ExpiresOn: json.Number(strconv.Itoa(int(difference.Seconds()))), + RefreshToken: t.RefreshToken, + Resource: t.Resource, + } + return +} + +// AccessTokensPath returns the path where access tokens are stored from the Azure CLI +// TODO(#199): add unit test. +func AccessTokensPath() (string, error) { + // Azure-CLI allows user to customize the path of access tokens thorugh environment variable. + var accessTokenPath = os.Getenv("AZURE_ACCESS_TOKEN_FILE") + var err error + + // Fallback logic to default path on non-cloud-shell environment. + // TODO(#200): remove the dependency on hard-coding path. + if accessTokenPath == "" { + accessTokenPath, err = homedir.Expand("~/.azure/accessTokens.json") + } + + return accessTokenPath, err +} + +// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object +func ParseExpirationDate(input string) (*time.Time, error) { + // CloudShell (and potentially the Azure CLI in future) + expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) + if cloudShellErr != nil { + // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) + const cliFormat = "2006-01-02 15:04:05.999999" + expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) + if cliErr == nil { + return &expirationDate, nil + } + + return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) + } + + return &expirationDate, nil +} + +// LoadTokens restores a set of Token objects from a file located at 'path'. +func LoadTokens(path string) ([]Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var tokens []Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&tokens); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err) + } + + return tokens, nil +} + +// GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios. +func GetTokenFromCLI(resource string) (*Token, error) { + // This is the path that a developer can set to tell this class what the install path for Azure CLI is. + const azureCLIPath = "AzureCLIPath" + + // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI. + azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles")) + + // Default path for non-Windows. + const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin" + + // Validate resource, since it gets sent as a command line argument to Azure CLI + const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed." + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + if err != nil { + return nil, err + } + if !match { + return nil, fmt.Errorf(invalidResourceErrorTemplate, resource) + } + + // Execute Azure CLI to get token + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir"))) + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows)) + cliCmd.Args = append(cliCmd.Args, "/c", "az") + } else { + cliCmd = exec.Command("az") + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath)) + } + cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json", "--resource", resource) + + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String()) + } + + tokenResponse := Token{} + err = json.Unmarshal(output, &tokenResponse) + if err != nil { + return nil, err + } + + return &tokenResponse, err +} diff --git a/vendor/github.com/dimchansky/utfbom/.gitignore b/vendor/github.com/dimchansky/utfbom/.gitignore new file mode 100644 index 000000000..d7ec5cebb --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/.gitignore @@ -0,0 +1,37 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib +*.o +*.a + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.prof + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# Gogland +.idea/ \ No newline at end of file diff --git a/vendor/github.com/dimchansky/utfbom/.travis.yml b/vendor/github.com/dimchansky/utfbom/.travis.yml new file mode 100644 index 000000000..b2e420c0d --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - '1.10' + - '1.11' + +# sudo=false makes the build run using a container +sudo: false + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + - go get golang.org/x/tools/cmd/goimports + - go get github.com/golang/lint/golint +script: + - gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false) + - golint ./... # This won't break the build, just show warnings + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/dimchansky/utfbom/LICENSE b/vendor/github.com/dimchansky/utfbom/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/dimchansky/utfbom/README.md b/vendor/github.com/dimchansky/utfbom/README.md new file mode 100644 index 000000000..8ece28008 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/README.md @@ -0,0 +1,66 @@ +# utfbom [![Godoc](https://godoc.org/github.com/dimchansky/utfbom?status.png)](https://godoc.org/github.com/dimchansky/utfbom) [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Build Status](https://travis-ci.org/dimchansky/utfbom.svg?branch=master)](https://travis-ci.org/dimchansky/utfbom) [![Go Report Card](https://goreportcard.com/badge/github.com/dimchansky/utfbom)](https://goreportcard.com/report/github.com/dimchansky/utfbom) [![Coverage Status](https://coveralls.io/repos/github/dimchansky/utfbom/badge.svg?branch=master)](https://coveralls.io/github/dimchansky/utfbom?branch=master) + +The package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. It can also return the encoding detected by the BOM. + +## Installation + + go get -u github.com/dimchansky/utfbom + +## Example + +```go +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + + "github.com/dimchansky/utfbom" +) + +func main() { + trySkip([]byte("\xEF\xBB\xBFhello")) + trySkip([]byte("hello")) +} + +func trySkip(byteData []byte) { + fmt.Println("Input:", byteData) + + // just skip BOM + output, err := ioutil.ReadAll(utfbom.SkipOnly(bytes.NewReader(byteData))) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM skipping", output) + + // skip BOM and detect encoding + sr, enc := utfbom.Skip(bytes.NewReader(byteData)) + fmt.Printf("Detected encoding: %s\n", enc) + output, err = ioutil.ReadAll(sr) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM detection and skipping", output) + fmt.Println() +} +``` + +Output: + +``` +$ go run main.go +Input: [239 187 191 104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: UTF8 +ReadAll with BOM detection and skipping [104 101 108 108 111] + +Input: [104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: Unknown +ReadAll with BOM detection and skipping [104 101 108 108 111] +``` + + diff --git a/vendor/github.com/dimchansky/utfbom/go.mod b/vendor/github.com/dimchansky/utfbom/go.mod new file mode 100644 index 000000000..4b9ecc6f5 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/go.mod @@ -0,0 +1 @@ +module github.com/dimchansky/utfbom \ No newline at end of file diff --git a/vendor/github.com/dimchansky/utfbom/utfbom.go b/vendor/github.com/dimchansky/utfbom/utfbom.go new file mode 100644 index 000000000..77a303e56 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/utfbom.go @@ -0,0 +1,192 @@ +// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. +// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader +// interface but provides automatic BOM checking and removing as necessary. +package utfbom + +import ( + "errors" + "io" +) + +// Encoding is type alias for detected UTF encoding. +type Encoding int + +// Constants to identify detected UTF encodings. +const ( + // Unknown encoding, returned when no BOM was detected + Unknown Encoding = iota + + // UTF8, BOM bytes: EF BB BF + UTF8 + + // UTF-16, big-endian, BOM bytes: FE FF + UTF16BigEndian + + // UTF-16, little-endian, BOM bytes: FF FE + UTF16LittleEndian + + // UTF-32, big-endian, BOM bytes: 00 00 FE FF + UTF32BigEndian + + // UTF-32, little-endian, BOM bytes: FF FE 00 00 + UTF32LittleEndian +) + +// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface. +func (e Encoding) String() string { + switch e { + case UTF8: + return "UTF8" + case UTF16BigEndian: + return "UTF16BigEndian" + case UTF16LittleEndian: + return "UTF16LittleEndian" + case UTF32BigEndian: + return "UTF32BigEndian" + case UTF32LittleEndian: + return "UTF32LittleEndian" + default: + return "Unknown" + } +} + +const maxConsecutiveEmptyReads = 100 + +// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +// It also returns the encoding detected by the BOM. +// If the detected encoding is not needed, you can call the SkipOnly function. +func Skip(rd io.Reader) (*Reader, Encoding) { + // Is it already a Reader? + b, ok := rd.(*Reader) + if ok { + return b, Unknown + } + + enc, left, err := detectUtf(rd) + return &Reader{ + rd: rd, + buf: left, + err: err, + }, enc +} + +// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +func SkipOnly(rd io.Reader) *Reader { + r, _ := Skip(rd) + return r +} + +// Reader implements automatic BOM (Unicode Byte Order Mark) checking and +// removing as necessary for an io.Reader object. +type Reader struct { + rd io.Reader // reader provided by the client + buf []byte // buffered data + err error // last error +} + +// Read is an implementation of io.Reader interface. +// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary. +func (r *Reader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + if r.buf == nil { + if r.err != nil { + return 0, r.readErr() + } + + return r.rd.Read(p) + } + + // copy as much as we can + n = copy(p, r.buf) + r.buf = nilIfEmpty(r.buf[n:]) + return n, nil +} + +func (r *Reader) readErr() error { + err := r.err + r.err = nil + return err +} + +var errNegativeRead = errors.New("utfbom: reader returned negative count from Read") + +func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) { + buf, err = readBOM(rd) + + if len(buf) >= 4 { + if isUTF32BigEndianBOM4(buf) { + return UTF32BigEndian, nilIfEmpty(buf[4:]), err + } + if isUTF32LittleEndianBOM4(buf) { + return UTF32LittleEndian, nilIfEmpty(buf[4:]), err + } + } + + if len(buf) > 2 && isUTF8BOM3(buf) { + return UTF8, nilIfEmpty(buf[3:]), err + } + + if (err != nil && err != io.EOF) || (len(buf) < 2) { + return Unknown, nilIfEmpty(buf), err + } + + if isUTF16BigEndianBOM2(buf) { + return UTF16BigEndian, nilIfEmpty(buf[2:]), err + } + if isUTF16LittleEndianBOM2(buf) { + return UTF16LittleEndian, nilIfEmpty(buf[2:]), err + } + + return Unknown, nilIfEmpty(buf), err +} + +func readBOM(rd io.Reader) (buf []byte, err error) { + const maxBOMSize = 4 + var bom [maxBOMSize]byte // used to read BOM + + // read as many bytes as possible + for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] { + if n, err = rd.Read(bom[len(buf):]); n < 0 { + panic(errNegativeRead) + } + if n > 0 { + nEmpty = 0 + } else { + nEmpty++ + if nEmpty >= maxConsecutiveEmptyReads { + err = io.ErrNoProgress + } + } + } + return +} + +func isUTF32BigEndianBOM4(buf []byte) bool { + return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF +} + +func isUTF32LittleEndianBOM4(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00 +} + +func isUTF8BOM3(buf []byte) bool { + return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF +} + +func isUTF16BigEndianBOM2(buf []byte) bool { + return buf[0] == 0xFE && buf[1] == 0xFF +} + +func isUTF16LittleEndianBOM2(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE +} + +func nilIfEmpty(buf []byte) (res []byte) { + if len(buf) > 0 { + res = buf + } + return +} diff --git a/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go new file mode 100644 index 000000000..233b8b62c --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "errors" + "unicode/utf16" +) + +// bmpString returns s encoded in UCS-2 with a zero terminator. +func bmpString(s string) ([]byte, error) { + // References: + // https://tools.ietf.org/html/rfc7292#appendix-B.1 + // https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane + // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes + // EncodeRune returns 0xfffd if the rune does not need special encoding + // - the above RFC provides the info that BMPStrings are NULL terminated. + + ret := make([]byte, 0, 2*len(s)+2) + + for _, r := range s { + if t, _ := utf16.EncodeRune(r); t != 0xfffd { + return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") + } + ret = append(ret, byte(r/256), byte(r%256)) + } + + return append(ret, 0, 0), nil +} + +func decodeBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // strip terminator if present + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go new file mode 100644 index 000000000..484ca51b7 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/cipher" + "crypto/des" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + "golang.org/x/crypto/pkcs12/internal/rc2" +) + +var ( + oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) +) + +// pbeCipher is an abstraction of a PKCS#12 cipher. +type pbeCipher interface { + // create returns a cipher.Block given a key. + create(key []byte) (cipher.Block, error) + // deriveKey returns a key derived from the given password and salt. + deriveKey(salt, password []byte, iterations int) []byte + // deriveKey returns an IV derived from the given password and salt. + deriveIV(salt, password []byte, iterations int) []byte +} + +type shaWithTripleDESCBC struct{} + +func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { + return des.NewTripleDESCipher(key) +} + +func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) +} + +func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type shaWith40BitRC2CBC struct{} + +func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { + return rc2.New(key, len(key)*8) +} + +func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) +} + +func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type pbeParams struct { + Salt []byte + Iterations int +} + +func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { + var cipherType pbeCipher + + switch { + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): + cipherType = shaWithTripleDESCBC{} + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): + cipherType = shaWith40BitRC2CBC{} + default: + return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") + } + + var params pbeParams + if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, 0, err + } + + key := cipherType.deriveKey(params.Salt, password, params.Iterations) + iv := cipherType.deriveIV(params.Salt, password, params.Iterations) + + block, err := cipherType.create(key) + if err != nil { + return nil, 0, err + } + + return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil +} + +func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { + cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) + if err != nil { + return nil, err + } + + encrypted := info.Data() + if len(encrypted) == 0 { + return nil, errors.New("pkcs12: empty encrypted data") + } + if len(encrypted)%blockSize != 0 { + return nil, errors.New("pkcs12: input is not a multiple of the block size") + } + decrypted = make([]byte, len(encrypted)) + cbc.CryptBlocks(decrypted, encrypted) + + psLen := int(decrypted[len(decrypted)-1]) + if psLen == 0 || psLen > blockSize { + return nil, ErrDecryption + } + + if len(decrypted) < psLen { + return nil, ErrDecryption + } + ps := decrypted[len(decrypted)-psLen:] + decrypted = decrypted[:len(decrypted)-psLen] + if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { + return nil, ErrDecryption + } + + return +} + +// decryptable abstracts an object that contains ciphertext. +type decryptable interface { + Algorithm() pkix.AlgorithmIdentifier + Data() []byte +} diff --git a/vendor/golang.org/x/crypto/pkcs12/errors.go b/vendor/golang.org/x/crypto/pkcs12/errors.go new file mode 100644 index 000000000..7377ce6fb --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/errors.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import "errors" + +var ( + // ErrDecryption represents a failure to decrypt the input. + ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") + + // ErrIncorrectPassword is returned when an incorrect password is detected. + // Usually, P12/PFX data is signed to be able to verify the password. + ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") +) + +// NotImplementedError indicates that the input is not currently supported. +type NotImplementedError string + +func (e NotImplementedError) Error() string { + return "pkcs12: " + string(e) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go new file mode 100644 index 000000000..7499e3fb6 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go @@ -0,0 +1,271 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rc2 implements the RC2 cipher +/* +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. +*/ +package rc2 + +import ( + "crypto/cipher" + "encoding/binary" +) + +// The rc2 block size in bytes +const BlockSize = 8 + +type rc2Cipher struct { + k [64]uint16 +} + +// New returns a new rc2 cipher with the given key and effective key length t1 +func New(key []byte, t1 int) (cipher.Block, error) { + // TODO(dgryski): error checking for key length + return &rc2Cipher{ + k: expandKey(key, t1), + }, nil +} + +func (*rc2Cipher) BlockSize() int { return BlockSize } + +var piTable = [256]byte{ + 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, + 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, + 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, + 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, + 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, + 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, + 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, + 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, + 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, + 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, + 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, + 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, + 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, + 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, + 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, + 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, +} + +func expandKey(key []byte, t1 int) [64]uint16 { + + l := make([]byte, 128) + copy(l, key) + + var t = len(key) + var t8 = (t1 + 7) / 8 + var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) + + for i := len(key); i < 128; i++ { + l[i] = piTable[l[i-1]+l[uint8(i-t)]] + } + + l[128-t8] = piTable[l[128-t8]&tm] + + for i := 127 - t8; i >= 0; i-- { + l[i] = piTable[l[i+1]^l[i+t8]] + } + + var k [64]uint16 + + for i := range k { + k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 + } + + return k +} + +func rotl16(x uint16, b uint) uint16 { + return (x >> (16 - b)) | (x << b) +} + +func (c *rc2Cipher) Encrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + var j int + + for j <= 16 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 40 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 60 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} + +func (c *rc2Cipher) Decrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + j := 63 + + for j >= 44 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 20 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 0 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/mac.go b/vendor/golang.org/x/crypto/pkcs12/mac.go new file mode 100644 index 000000000..5f38aa7de --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/mac.go @@ -0,0 +1,45 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/x509/pkix" + "encoding/asn1" +) + +type macData struct { + Mac digestInfo + MacSalt []byte + Iterations int `asn1:"optional,default:1"` +} + +// from PKCS#7: +type digestInfo struct { + Algorithm pkix.AlgorithmIdentifier + Digest []byte +} + +var ( + oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) +) + +func verifyMac(macData *macData, message, password []byte) error { + if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { + return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) + } + + key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) + + mac := hmac.New(sha1.New, key) + mac.Write(message) + expectedMAC := mac.Sum(nil) + + if !hmac.Equal(macData.Mac.Digest, expectedMAC) { + return ErrIncorrectPassword + } + return nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pbkdf.go b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go new file mode 100644 index 000000000..5c419d41e --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/sha1" + "math/big" +) + +var ( + one = big.NewInt(1) +) + +// sha1Sum returns the SHA-1 hash of in. +func sha1Sum(in []byte) []byte { + sum := sha1.Sum(in) + return sum[:] +} + +// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of +// repeats of pattern. +func fillWithRepeats(pattern []byte, v int) []byte { + if len(pattern) == 0 { + return nil + } + outputLen := v * ((len(pattern) + v - 1) / v) + return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] +} + +func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { + // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments + + // Let H be a hash function built around a compression function f: + + // Z_2^u x Z_2^v -> Z_2^u + + // (that is, H has a chaining variable and output of length u bits, and + // the message input to the compression function of H is v bits). The + // values for u and v are as follows: + + // HASH FUNCTION VALUE u VALUE v + // MD2, MD5 128 512 + // SHA-1 160 512 + // SHA-224 224 512 + // SHA-256 256 512 + // SHA-384 384 1024 + // SHA-512 512 1024 + // SHA-512/224 224 1024 + // SHA-512/256 256 1024 + + // Furthermore, let r be the iteration count. + + // We assume here that u and v are both multiples of 8, as are the + // lengths of the password and salt strings (which we denote by p and s, + // respectively) and the number n of pseudorandom bits required. In + // addition, u and v are of course non-zero. + + // For information on security considerations for MD5 [19], see [25] and + // [1], and on those for MD2, see [18]. + + // The following procedure can be used to produce pseudorandom bits for + // a particular "purpose" that is identified by a byte called "ID". + // This standard specifies 3 different values for the ID byte: + + // 1. If ID=1, then the pseudorandom bits being produced are to be used + // as key material for performing encryption or decryption. + + // 2. If ID=2, then the pseudorandom bits being produced are to be used + // as an IV (Initial Value) for encryption or decryption. + + // 3. If ID=3, then the pseudorandom bits being produced are to be used + // as an integrity key for MACing. + + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 + // copies of ID. + var D []byte + for i := 0; i < v; i++ { + D = append(D, ID) + } + + // 2. Concatenate copies of the salt together to create a string S of + // length v(ceiling(s/v)) bits (the final copy of the salt may be + // truncated to create S). Note that if the salt is the empty + // string, then so is S. + + S := fillWithRepeats(salt, v) + + // 3. Concatenate copies of the password together to create a string P + // of length v(ceiling(p/v)) bits (the final copy of the password + // may be truncated to create P). Note that if the password is the + // empty string, then so is P. + + P := fillWithRepeats(password, v) + + // 4. Set I=S||P to be the concatenation of S and P. + I := append(S, P...) + + // 5. Set c=ceiling(n/u). + c := (size + u - 1) / u + + // 6. For i=1, 2, ..., c, do the following: + A := make([]byte, c*20) + var IjBuf []byte + for i := 0; i < c; i++ { + // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, + // H(H(H(... H(D||I)))) + Ai := hash(append(D, I...)) + for j := 1; j < r; j++ { + Ai = hash(Ai) + } + copy(A[i*20:], Ai[:]) + + if i < c-1 { // skip on last iteration + // B. Concatenate copies of Ai to create a string B of length v + // bits (the final copy of Ai may be truncated to create B). + var B []byte + for len(B) < v { + B = append(B, Ai[:]...) + } + B = B[:v] + + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit + // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by + // setting I_j=(I_j+B+1) mod 2^v for each j. + { + Bbi := new(big.Int).SetBytes(B) + Ij := new(big.Int) + + for j := 0; j < len(I)/v; j++ { + Ij.SetBytes(I[j*v : (j+1)*v]) + Ij.Add(Ij, Bbi) + Ij.Add(Ij, one) + Ijb := Ij.Bytes() + // We expect Ijb to be exactly v bytes, + // if it is longer or shorter we must + // adjust it accordingly. + if len(Ijb) > v { + Ijb = Ijb[len(Ijb)-v:] + } + if len(Ijb) < v { + if IjBuf == nil { + IjBuf = make([]byte, v) + } + bytesShort := v - len(Ijb) + for i := 0; i < bytesShort; i++ { + IjBuf[i] = 0 + } + copy(IjBuf[bytesShort:], Ijb) + Ijb = IjBuf + } + copy(I[j*v:(j+1)*v], Ijb) + } + } + } + } + // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom + // bit string, A. + + // 8. Use the first n bits of A as the output of this entire process. + return A[:size] + + // If the above process is being used to generate a DES key, the process + // should be used to create 64 random bits, and the key's parity bits + // should be set after the 64 bits have been produced. Similar concerns + // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any + // similar keys with parity bits "built into them". +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go new file mode 100644 index 000000000..55f7691d4 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go @@ -0,0 +1,349 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkcs12 implements some of PKCS#12. +// +// This implementation is distilled from https://tools.ietf.org/html/rfc7292 +// and referenced documents. It is intended for decoding P12/PFX-stored +// certificates and keys for use with the crypto/tls package. +// +// This package is frozen. If it's missing functionality you need, consider +// an alternative like software.sslmate.com/src/go-pkcs12. +package pkcs12 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" +) + +var ( + oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) + oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) + + oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) + oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) + oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) +) + +type pfxPdu struct { + Version int + AuthSafe contentInfo + MacData macData `asn1:"optional"` +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.ContentEncryptionAlgorithm +} + +func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } + +type safeBag struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"tag:0,explicit"` + Attributes []pkcs12Attribute `asn1:"set,optional"` +} + +type pkcs12Attribute struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +type encryptedPrivateKeyInfo struct { + AlgorithmIdentifier pkix.AlgorithmIdentifier + EncryptedData []byte +} + +func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.AlgorithmIdentifier +} + +func (i encryptedPrivateKeyInfo) Data() []byte { + return i.EncryptedData +} + +// PEM block types +const ( + certificateType = "CERTIFICATE" + privateKeyType = "PRIVATE KEY" +) + +// unmarshal calls asn1.Unmarshal, but also returns an error if there is any +// trailing data after unmarshaling. +func unmarshal(in []byte, out interface{}) error { + trailing, err := asn1.Unmarshal(in, out) + if err != nil { + return err + } + if len(trailing) != 0 { + return errors.New("pkcs12: trailing data found") + } + return nil +} + +// ToPEM converts all "safe bags" contained in pfxData to PEM blocks. +func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, ErrIncorrectPassword + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + + if err != nil { + return nil, err + } + + blocks := make([]*pem.Block, 0, len(bags)) + for _, bag := range bags { + block, err := convertBag(&bag, encodedPassword) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { + block := &pem.Block{ + Headers: make(map[string]string), + } + + for _, attribute := range bag.Attributes { + k, v, err := convertAttribute(&attribute) + if err != nil { + return nil, err + } + block.Headers[k] = v + } + + switch { + case bag.Id.Equal(oidCertBag): + block.Type = certificateType + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, err + } + block.Bytes = certsData + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + block.Type = privateKeyType + + key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) + if err != nil { + return nil, err + } + + switch key := key.(type) { + case *rsa.PrivateKey: + block.Bytes = x509.MarshalPKCS1PrivateKey(key) + case *ecdsa.PrivateKey: + block.Bytes, err = x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + default: + return nil, errors.New("found unknown private key type in PKCS#8 wrapping") + } + default: + return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) + } + return block, nil +} + +func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { + isString := false + + switch { + case attribute.Id.Equal(oidFriendlyName): + key = "friendlyName" + isString = true + case attribute.Id.Equal(oidLocalKeyID): + key = "localKeyId" + case attribute.Id.Equal(oidMicrosoftCSPName): + // This key is chosen to match OpenSSL. + key = "Microsoft CSP Name" + isString = true + default: + return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String()) + } + + if isString { + if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { + return "", "", err + } + if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { + return "", "", err + } + } else { + var id []byte + if err := unmarshal(attribute.Value.Bytes, &id); err != nil { + return "", "", err + } + value = hex.EncodeToString(id) + } + + return key, value, nil +} + +// Decode extracts a certificate and private key from pfxData. This function +// assumes that there is only one certificate and only one private key in the +// pfxData; if there are more use ToPEM instead. +func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, nil, err + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + if err != nil { + return nil, nil, err + } + + if len(bags) != 2 { + err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") + return + } + + for _, bag := range bags { + switch { + case bag.Id.Equal(oidCertBag): + if certificate != nil { + err = errors.New("pkcs12: expected exactly one certificate bag") + } + + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, nil, err + } + certs, err := x509.ParseCertificates(certsData) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + err = errors.New("pkcs12: expected exactly one certificate in the certBag") + return nil, nil, err + } + certificate = certs[0] + + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + if privateKey != nil { + err = errors.New("pkcs12: expected exactly one key bag") + } + + if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { + return nil, nil, err + } + } + } + + if certificate == nil { + return nil, nil, errors.New("pkcs12: certificate missing") + } + if privateKey == nil { + return nil, nil, errors.New("pkcs12: private key missing") + } + + return +} + +func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { + pfx := new(pfxPdu) + if err := unmarshal(p12Data, pfx); err != nil { + return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) + } + + if pfx.Version != 3 { + return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") + } + + if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { + return nil, nil, NotImplementedError("only password-protected PFX is implemented") + } + + // unmarshal the explicit bytes in the content for type 'data' + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { + return nil, nil, err + } + + if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { + return nil, nil, errors.New("pkcs12: no MAC in data") + } + + if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { + if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { + // some implementations use an empty byte array + // for the empty string password try one more + // time with empty-empty password + password = nil + err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) + } + if err != nil { + return nil, nil, err + } + } + + var authenticatedSafe []contentInfo + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { + return nil, nil, err + } + + if len(authenticatedSafe) != 2 { + return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") + } + + for _, ci := range authenticatedSafe { + var data []byte + + switch { + case ci.ContentType.Equal(oidDataContentType): + if err := unmarshal(ci.Content.Bytes, &data); err != nil { + return nil, nil, err + } + case ci.ContentType.Equal(oidEncryptedDataContentType): + var encryptedData encryptedData + if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { + return nil, nil, err + } + if encryptedData.Version != 0 { + return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") + } + if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { + return nil, nil, err + } + default: + return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") + } + + var safeContents []safeBag + if err := unmarshal(data, &safeContents); err != nil { + return nil, nil, err + } + bags = append(bags, safeContents...) + } + + return bags, password, nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/safebags.go b/vendor/golang.org/x/crypto/pkcs12/safebags.go new file mode 100644 index 000000000..def1f7b98 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/safebags.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/x509" + "encoding/asn1" + "errors" +) + +var ( + // see https://tools.ietf.org/html/rfc7292#appendix-D + oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) + oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) + oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) +) + +type certBag struct { + Id asn1.ObjectIdentifier + Data []byte `asn1:"tag:0,explicit"` +} + +func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { + pkinfo := new(encryptedPrivateKeyInfo) + if err = unmarshal(asn1Data, pkinfo); err != nil { + return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) + } + + pkData, err := pbDecrypt(pkinfo, password) + if err != nil { + return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) + } + + ret := new(asn1.RawValue) + if err = unmarshal(pkData, ret); err != nil { + return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) + } + + if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { + return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) + } + + return privateKey, nil +} + +func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { + bag := new(certBag) + if err := unmarshal(asn1Data, bag); err != nil { + return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) + } + if !bag.Id.Equal(oidCertTypeX509Certificate) { + return nil, NotImplementedError("only X509 certificates are supported") + } + return bag.Data, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 9446197b2..6f4714a00 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -11,8 +11,11 @@ contrib.go.opencensus.io/exporter/ocagent # github.com/1and1/oneandone-cloudserver-sdk-go v1.0.1 github.com/1and1/oneandone-cloudserver-sdk-go # github.com/Azure/azure-sdk-for-go v30.0.0+incompatible +github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute +github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute/computeapi github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute +github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/computeapi github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-01-01/network github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-06-01/subscriptions github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-02-01/resources @@ -23,6 +26,8 @@ github.com/Azure/azure-sdk-for-go/version github.com/Azure/go-autorest/autorest github.com/Azure/go-autorest/autorest/adal github.com/Azure/go-autorest/autorest/azure +github.com/Azure/go-autorest/autorest/azure/auth +github.com/Azure/go-autorest/autorest/azure/cli github.com/Azure/go-autorest/autorest/date github.com/Azure/go-autorest/autorest/to github.com/Azure/go-autorest/autorest/validation @@ -169,6 +174,8 @@ github.com/digitalocean/go-libvirt/internal/go-xdr/xdr2 github.com/digitalocean/go-qemu/qmp # github.com/digitalocean/godo v1.11.1 github.com/digitalocean/godo +# github.com/dimchansky/utfbom v1.1.0 +github.com/dimchansky/utfbom # github.com/docker/docker v0.0.0-20180422163414-57142e89befe github.com/docker/docker/pkg/namesgenerator # github.com/dustin/go-humanize v1.0.0 @@ -605,6 +612,8 @@ golang.org/x/crypto/ed25519/internal/edwards25519 golang.org/x/crypto/internal/chacha20 golang.org/x/crypto/internal/subtle golang.org/x/crypto/md4 +golang.org/x/crypto/pkcs12 +golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/poly1305 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent From 716f19a4576e4f4d33a1f13db77bea0b75e7c77f Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Thu, 3 Oct 2019 04:23:37 +0000 Subject: [PATCH 39/55] make fmt --- builder/azure/chroot/builder_test.go | 2 +- builder/azure/common/client/metadata.go | 6 +++--- builder/azure/common/client/testclient.go | 13 ++++++------- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/builder/azure/chroot/builder_test.go b/builder/azure/chroot/builder_test.go index 707cdf196..a0b3e2ca8 100644 --- a/builder/azure/chroot/builder_test.go +++ b/builder/azure/chroot/builder_test.go @@ -20,7 +20,7 @@ func TestBuilder_Prepare_DiskAsInput(t *testing.T) { t.Error("Expected the returned error to be of type packer.MultiError") } for _, err := range errs.Errors { - if matched, _:=regexp.MatchString(`(^|\W)source\W`,err.Error()); matched { + if matched, _ := regexp.MatchString(`(^|\W)source\W`, err.Error()); matched { t.Errorf("Did not expect an error about the 'source' field, but found %q", err) } } diff --git a/builder/azure/common/client/metadata.go b/builder/azure/common/client/metadata.go index ad76cb007..c6dcf2783 100644 --- a/builder/azure/common/client/metadata.go +++ b/builder/azure/common/client/metadata.go @@ -17,11 +17,11 @@ type MetadataClientAPI interface { GetComputeInfo() (*ComputeInfo, error) } -type ComputeInfo struct{ +type ComputeInfo struct { Name string ResourceGroupName string SubscriptionID string -Location string + Location string } // metadataClient implements MetadataClient @@ -65,7 +65,7 @@ func (client metadataClient) GetComputeInfo() (*ComputeInfo, error) { return &vminfo.ComputeInfo, nil } -func(ci ComputeInfo) ResourceID() string{ +func (ci ComputeInfo) ResourceID() string { return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s", ci.SubscriptionID, ci.ResourceGroupName, diff --git a/builder/azure/common/client/testclient.go b/builder/azure/common/client/testclient.go index 964e04795..afb2156dc 100644 --- a/builder/azure/common/client/testclient.go +++ b/builder/azure/common/client/testclient.go @@ -1,31 +1,30 @@ package client import ( + "errors" + "net/http" "os" "testing" - "net/http" - "errors" "github.com/Azure/go-autorest/autorest/azure/auth" ) func GetTestClientSet(t *testing.T) (AzureClientSet, error) { - if (os.Getenv("AZURE_INTEGRATION_TEST") == "") { - t.Skip("AZURE_INTEGRATION_TEST not set") + if os.Getenv("AZURE_INTEGRATION_TEST") == "" { + t.Skip("AZURE_INTEGRATION_TEST not set") } else { a, err := auth.NewAuthorizerFromEnvironment() if err == nil { cli := azureClientSet{} - cli.authorizer = a + cli.authorizer = a cli.subscriptionID = os.Getenv("AZURE_SUBSCRIPTION_ID") cli.PollingDelay = 0 cli.sender = http.DefaultClient return cli, nil - } else { + } else { t.Skipf("Could not create Azure client: %v", err) } } return nil, errors.New("Couldn't create client set") } - From 59438810512dbbbbe6230c467b98ecc859212141 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Thu, 3 Oct 2019 04:45:39 +0000 Subject: [PATCH 40/55] Default for Gen 1 VMs --- builder/azure/chroot/builder.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index e72507441..711146703 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -89,7 +89,7 @@ type Config struct { // The image to create using this build. ImageResourceID string `mapstructure:"image_resource_id" required:"true"` // The [Hyper-V generation type](https://docs.microsoft.com/en-us/rest/api/compute/images/createorupdate#hypervgenerationtypes). - // Defaults to `V2`. + // Defaults to `V1`. ImageHyperVGeneration string `mapstructure:"image_hyperv_generation"` ctx interpolate.Context @@ -190,7 +190,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } if b.config.ImageHyperVGeneration == "" { - b.config.ImageHyperVGeneration = string(compute.V2) + b.config.ImageHyperVGeneration = string(compute.V1) } // checks, accumulate any errors or warnings From 97e48153bc41552044d9c456b077d87c8f2ec1bb Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Thu, 3 Oct 2019 21:01:17 +0000 Subject: [PATCH 41/55] fixup! Default for Gen 1 VMs --- .../partials/builder/azure/chroot/_Config-not-required.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/partials/builder/azure/chroot/_Config-not-required.html.md b/website/source/partials/builder/azure/chroot/_Config-not-required.html.md index 8f8556919..91029b5f1 100644 --- a/website/source/partials/builder/azure/chroot/_Config-not-required.html.md +++ b/website/source/partials/builder/azure/chroot/_Config-not-required.html.md @@ -46,5 +46,5 @@ - `os_disk_skip_cleanup` (bool) - If set to `true`, leaves the temporary disk behind in the Packer VM resource group. Defaults to `false` - `image_hyperv_generation` (string) - The [Hyper-V generation type](https://docs.microsoft.com/en-us/rest/api/compute/images/createorupdate#hypervgenerationtypes). - Defaults to `V2`. + Defaults to `V1`. \ No newline at end of file From eb5dc9326d5ed6e5d5ac3ec4cad794722b37e7d7 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 4 Oct 2019 16:59:41 +0000 Subject: [PATCH 42/55] Adding more tests --- builder/azure/chroot/diskattacher.go | 2 +- builder/azure/chroot/step_attach_disk_test.go | 131 ++++++++++++++++++ .../azure/chroot/step_verify_source_disk.go | 30 +++- .../chroot/step_verify_source_disk_test.go | 24 +++- 4 files changed, 178 insertions(+), 9 deletions(-) create mode 100644 builder/azure/chroot/step_attach_disk_test.go diff --git a/builder/azure/chroot/diskattacher.go b/builder/azure/chroot/diskattacher.go index c52e294eb..abeba5af6 100644 --- a/builder/azure/chroot/diskattacher.go +++ b/builder/azure/chroot/diskattacher.go @@ -26,7 +26,7 @@ type DiskAttacher interface { WaitForDetach(ctx context.Context, diskID string) error } -func NewDiskAttacher(azureClient client.AzureClientSet) DiskAttacher { +var NewDiskAttacher = func (azureClient client.AzureClientSet) DiskAttacher { return &diskAttacher{ azcli: azureClient, } diff --git a/builder/azure/chroot/step_attach_disk_test.go b/builder/azure/chroot/step_attach_disk_test.go new file mode 100644 index 000000000..e644b6a78 --- /dev/null +++ b/builder/azure/chroot/step_attach_disk_test.go @@ -0,0 +1,131 @@ +package chroot + +import ( + "context" + "errors" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + "github.com/Azure/go-autorest/autorest" + "github.com/hashicorp/packer/builder/azure/common/client" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +func TestStepAttachDisk_Run(t *testing.T) { + type fields struct { + GetDiskResponseCode int + GetDiskResponseBody string + + attachError error + waitForDeviceError error + } + tests := []struct { + name string + fields fields + want multistep.StepAction + }{ + { + name: "HappyPath", + want: multistep.ActionContinue, + }, + { + name: "AttachError", + fields: fields{ + attachError: errors.New("unit test"), + }, + want: multistep.ActionHalt, + }, + { + name: "WaitForDeviceError", + fields: fields{ + waitForDeviceError: errors.New("unit test"), + }, + want: multistep.ActionHalt, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &StepAttachDisk{} + + NewDiskAttacher = func(azcli client.AzureClientSet) DiskAttacher { + return &fakeDiskAttacher{ + attachError: tt.fields.attachError, + waitForDeviceError: tt.fields.waitForDeviceError, + } + } + + dm := compute.NewDisksClient("subscriptionId") + dm.Sender = autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + return &http.Response{ + Request: r, + Body: ioutil.NopCloser(strings.NewReader(tt.fields.GetDiskResponseBody)), + StatusCode: tt.fields.GetDiskResponseCode, + }, nil + }) + + errorBuffer := &strings.Builder{} + ui := &packer.BasicUi{ + Reader: strings.NewReader(""), + Writer: ioutil.Discard, + ErrorWriter: errorBuffer, + } + + state := new(multistep.BasicStateBag) + state.Put("azureclient", &client.AzureClientSetMock{}) + state.Put("ui", ui) + state.Put("os_disk_resource_id", "/subscriptions/12345/resourceGroups/group1/providers/Microsoft.Compute/disks/disk1") + + got := s.Run(context.TODO(), state) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("StepAttachDisk.Run() = %v, want %v", got, tt.want) + } + + if got == multistep.ActionHalt { + if _, ok := state.GetOk("error"); !ok { + t.Fatal("Expected 'error' to be set in statebag after failure") + } + } + }) + } +} + +type fakeDiskAttacher struct { + attachError error + waitForDeviceError error +} + +var _ DiskAttacher = &fakeDiskAttacher{} + +func (da *fakeDiskAttacher) AttachDisk(ctx context.Context, disk string) (lun int32, err error) { + if da.attachError != nil { + return 0, da.attachError + } + return 3, nil +} + +func (da *fakeDiskAttacher) DiskPathForLun(lun int32) string { + panic("not implemented") +} + +func (da *fakeDiskAttacher) WaitForDevice(ctx context.Context, lun int32) (device string, err error) { + if da.waitForDeviceError != nil { + return "", da.waitForDeviceError + } + if lun == 3 { + return "/dev/sdq", nil + } + panic("expected lun==3") +} + +func (da *fakeDiskAttacher) DetachDisk(ctx context.Context, disk string) (err error) { + panic("not implemented") +} + +func (da *fakeDiskAttacher) WaitForDetach(ctx context.Context, diskID string) error { + panic("not implemented") +} diff --git a/builder/azure/chroot/step_verify_source_disk.go b/builder/azure/chroot/step_verify_source_disk.go index b949447cb..04879abd9 100644 --- a/builder/azure/chroot/step_verify_source_disk.go +++ b/builder/azure/chroot/step_verify_source_disk.go @@ -3,6 +3,7 @@ package chroot import ( "context" "fmt" + "log" "strings" "github.com/Azure/go-autorest/autorest/azure" @@ -26,36 +27,51 @@ func (s StepVerifySourceDisk) Run(ctx context.Context, state multistep.StateBag) ui.Say("Checking source disk location") resource, err := azure.ParseResourceID(s.SourceDiskResourceID) if err != nil { - ui.Error(fmt.Sprintf("Could not parse resource id %q: %s", s.SourceDiskResourceID, err)) + log.Printf("StepVerifySourceDisk.Run: error: %+v", err) + err := fmt.Errorf("Could not parse resource id %q: %s", s.SourceDiskResourceID, err) + state.Put("error", err) + ui.Error(err.Error()) return multistep.ActionHalt } if !strings.EqualFold(resource.SubscriptionID, s.SubscriptionID) { - ui.Error(fmt.Sprintf("Source disk resource %q is in a different subscription than this VM (%q). "+ + err := fmt.Errorf("Source disk resource %q is in a different subscription than this VM (%q). "+ "Packer does not know how to handle that.", - s.SourceDiskResourceID, s.SubscriptionID)) + s.SourceDiskResourceID, s.SubscriptionID) + log.Printf("StepVerifySourceDisk.Run: error: %+v", err) + state.Put("error", err) + ui.Error(err.Error()) return multistep.ActionHalt } if !(strings.EqualFold(resource.Provider, "Microsoft.Compute") && strings.EqualFold(resource.ResourceType, "disks")) { - ui.Error(fmt.Sprintf("Resource ID %q is not a managed disk resource", s.SourceDiskResourceID)) + err := fmt.Errorf("Resource ID %q is not a managed disk resource", s.SourceDiskResourceID) + log.Printf("StepVerifySourceDisk.Run: error: %+v", err) + state.Put("error", err) + ui.Error(err.Error()) return multistep.ActionHalt } disk, err := azcli.DisksClient().Get(ctx, resource.ResourceGroup, resource.ResourceName) if err != nil { - ui.Error(fmt.Sprintf("Unable to retrieve disk (%q): %s", s.SourceDiskResourceID, err)) + err := fmt.Errorf("Unable to retrieve disk (%q): %s", s.SourceDiskResourceID, err) + log.Printf("StepVerifySourceDisk.Run: error: %+v", err) + state.Put("error", err) + ui.Error(err.Error()) return multistep.ActionHalt } location := to.String(disk.Location) if !strings.EqualFold(location, s.Location) { - ui.Error(fmt.Sprintf("Source disk resource %q is in a different location (%q) than this VM (%q). "+ + err := fmt.Errorf("Source disk resource %q is in a different location (%q) than this VM (%q). "+ "Packer does not know how to handle that.", s.SourceDiskResourceID, location, - s.Location)) + s.Location) + log.Printf("StepVerifySourceDisk.Run: error: %+v", err) + state.Put("error", err) + ui.Error(err.Error()) return multistep.ActionHalt } diff --git a/builder/azure/chroot/step_verify_source_disk_test.go b/builder/azure/chroot/step_verify_source_disk_test.go index 113fd3f73..3f60e5c57 100644 --- a/builder/azure/chroot/step_verify_source_disk_test.go +++ b/builder/azure/chroot/step_verify_source_disk_test.go @@ -47,6 +47,19 @@ func Test_StepVerifySourceDisk_Run(t *testing.T) { }, want: multistep.ActionContinue, }, + { + name: "NotAResourceID", + fields: fields{ + SubscriptionID: "subid1", + SourceDiskResourceID: "/other", + Location: "westus2", + + GetDiskResponseCode: 200, + GetDiskResponseBody: `{"location":"westus2"}`, + }, + want: multistep.ActionHalt, + errormatch: "Could not parse resource id", + }, { name: "DiskNotFound", fields: fields{ @@ -115,6 +128,7 @@ func Test_StepVerifySourceDisk_Run(t *testing.T) { StatusCode: tt.fields.GetDiskResponseCode, }, nil }) + errorBuffer := &strings.Builder{} ui := &packer.BasicUi{ Reader: strings.NewReader(""), @@ -128,14 +142,22 @@ func Test_StepVerifySourceDisk_Run(t *testing.T) { }) state.Put("ui", ui) - if got := s.Run(context.TODO(), state); !reflect.DeepEqual(got, tt.want) { + got := s.Run(context.TODO(), state); + if !reflect.DeepEqual(got, tt.want) { t.Errorf("StepVerifySourceDisk.Run() = %v, want %v", got, tt.want) } + if tt.errormatch != "" { if !regexp.MustCompile(tt.errormatch).MatchString(errorBuffer.String()) { t.Errorf("Expected the error output (%q) to match %q", errorBuffer.String(), tt.errormatch) } } + + if got == multistep.ActionHalt { + if _, ok := state.GetOk("error"); !ok { + t.Fatal("Expected 'error' to be set in statebag after failure") + } + } }) } } From 98175c06d5301da87dba2c15d5c2fdf1e1b6e35c Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 4 Oct 2019 17:14:10 +0000 Subject: [PATCH 43/55] Add test that verifies disksize bug (1/2) --- builder/azure/chroot/diskattacher_test.go | 2 +- builder/azure/chroot/step_create_new_disk.go | 1 - .../azure/chroot/step_create_new_disk_test.go | 166 +++++++++++++----- 3 files changed, 123 insertions(+), 46 deletions(-) diff --git a/builder/azure/chroot/diskattacher_test.go b/builder/azure/chroot/diskattacher_test.go index 088dbd147..3b8c55e4d 100644 --- a/builder/azure/chroot/diskattacher_test.go +++ b/builder/azure/chroot/diskattacher_test.go @@ -19,7 +19,7 @@ const ( // Tests assume current machine is capable of running chroot builder (i.e. an Azure VM) func Test_DiskAttacherAttachesDiskToVM(t *testing.T) { - azcli, err := client.GetTestClientSet(t) + azcli, err := client.GetTestClientSet(t) // integration test require.Nil(t, err) da := NewDiskAttacher(azcli) testDiskName := t.Name() diff --git a/builder/azure/chroot/step_create_new_disk.go b/builder/azure/chroot/step_create_new_disk.go index ed5844c4e..cf033ccf0 100644 --- a/builder/azure/chroot/step_create_new_disk.go +++ b/builder/azure/chroot/step_create_new_disk.go @@ -49,7 +49,6 @@ func (s StepCreateNewDisk) Run(ctx context.Context, state multistep.StateBag) mu OsType: "Linux", HyperVGeneration: compute.HyperVGeneration(s.HyperVGeneration), CreationData: &compute.CreationData{}, - DiskSizeGB: to.Int32Ptr(s.DiskSizeGB), }, //Tags: map[string]*string{ } diff --git a/builder/azure/chroot/step_create_new_disk_test.go b/builder/azure/chroot/step_create_new_disk_test.go index dc95a8b8e..4e05adcd5 100644 --- a/builder/azure/chroot/step_create_new_disk_test.go +++ b/builder/azure/chroot/step_create_new_disk_test.go @@ -4,6 +4,7 @@ import ( "context" "io/ioutil" "net/http" + "reflect" "regexp" "testing" @@ -14,56 +15,133 @@ import ( "github.com/hashicorp/packer/packer" ) -func Test_StepCreateNewDisk_FromDisk(t *testing.T) { - sut := StepCreateNewDisk{ - SubscriptionID: "SubscriptionID", - ResourceGroup: "ResourceGroupName", - DiskName: "TemporaryOSDiskName", - DiskSizeGB: 42, - DiskStorageAccountType: string(compute.PremiumLRS), - HyperVGeneration: string(compute.V1), - Location: "westus", - SourceDiskResourceID: "SourceDisk", - } +func TestStepCreateNewDisk_Run(t *testing.T) { + type fields struct { + SubscriptionID string + ResourceGroup string + DiskName string + DiskSizeGB int32 + DiskStorageAccountType string + HyperVGeneration string + Location string + PlatformImage *client.PlatformImage + SourceDiskResourceID string - expected := regexp.MustCompile(`[\s\n]`).ReplaceAllString(` -{ - "location": "westus", - "properties": { - "osType": "Linux", - "hyperVGeneration": "V1", - "creationData": { - "createOption": "Copy", - "sourceResourceId": "SourceDisk" + expectedPutDiskBody string + } + tests := []struct { + name string + fields fields + want multistep.StepAction + }{ + { + name: "HappyPathDiskSource", + fields: fields{ + SubscriptionID: "SubscriptionID", + ResourceGroup: "ResourceGroupName", + DiskName: "TemporaryOSDiskName", + DiskSizeGB: 42, + DiskStorageAccountType: string(compute.PremiumLRS), + HyperVGeneration: string(compute.V1), + Location: "westus", + SourceDiskResourceID: "SourceDisk", + + expectedPutDiskBody: ` + { + "location": "westus", + "properties": { + "osType": "Linux", + "hyperVGeneration": "V1", + "creationData": { + "createOption": "Copy", + "sourceResourceId": "SourceDisk" + }, + "diskSizeGB": 42 + }, + "sku": { + "name": "Premium_LRS" + } + }`, + }, + want: multistep.ActionContinue, + }, + { + name: "HappyPathDiskSource", + fields: fields{ + SubscriptionID: "SubscriptionID", + ResourceGroup: "ResourceGroupName", + DiskName: "TemporaryOSDiskName", + DiskStorageAccountType: string(compute.StandardLRS), + HyperVGeneration: string(compute.V1), + Location: "westus", + PlatformImage: &client.PlatformImage{ + Publisher: "Microsoft", + Offer: "Windows", + Sku: "2016-DataCenter", + Version: "2016.1.4", + }, + + expectedPutDiskBody: ` + { + "location": "westus", + "properties": { + "osType": "Linux", + "hyperVGeneration": "V1", + "creationData": { + "createOption":"FromImage", + "imageReference": { + "id":"/subscriptions/SubscriptionID/providers/Microsoft.Compute/locations/westus/publishers/Microsoft/artifacttypes/vmimage/offers/Windows/skus/2016-DataCenter/versions/2016.1.4" + } + } + }, + "sku": { + "name": "Standard_LRS" + } + }`, + }, + want: multistep.ActionContinue, }, - "diskSizeGB": 42 - }, - "sku": { - "name": "Premium_LRS" } -}`, "") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := StepCreateNewDisk{ + SubscriptionID: tt.fields.SubscriptionID, + ResourceGroup: tt.fields.ResourceGroup, + DiskName: tt.fields.DiskName, + DiskSizeGB: tt.fields.DiskSizeGB, + DiskStorageAccountType: tt.fields.DiskStorageAccountType, + HyperVGeneration: tt.fields.HyperVGeneration, + Location: tt.fields.Location, + PlatformImage: tt.fields.PlatformImage, + SourceDiskResourceID: tt.fields.SourceDiskResourceID, + } - m := compute.NewDisksClient("subscriptionId") - m.Sender = autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - b, _ := ioutil.ReadAll(r.Body) - if string(b) != expected { - t.Fatalf("expected body to be %q, but got %q", expected, string(b)) - } - return &http.Response{ - Request: r, - StatusCode: 200, - }, nil - }) + expectedPutDiskBody := regexp.MustCompile(`[\s\n]`).ReplaceAllString(tt.fields.expectedPutDiskBody, "") - state := new(multistep.BasicStateBag) - state.Put("azureclient", &client.AzureClientSetMock{ - DisksClientMock: m, - }) - state.Put("ui", packer.TestUi(t)) + m := compute.NewDisksClient("subscriptionId") + m.Sender = autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.Method != "PUT" { + t.Fatal("Expected only a PUT disk call") + } + b, _ := ioutil.ReadAll(r.Body) + if string(b) != expectedPutDiskBody { + t.Fatalf("expected body to be %q, but got %q", expectedPutDiskBody, string(b)) + } + return &http.Response{ + Request: r, + StatusCode: 200, + }, nil + }) - r := sut.Run(context.TODO(), state) + state := new(multistep.BasicStateBag) + state.Put("azureclient", &client.AzureClientSetMock{ + DisksClientMock: m, + }) + state.Put("ui", packer.TestUi(t)) - if r != multistep.ActionContinue { - t.Fatal("Run failed") + if got := s.Run(context.TODO(), state); !reflect.DeepEqual(got, tt.want) { + t.Errorf("StepCreateNewDisk.Run() = %v, want %v", got, tt.want) + } + }) } } From b5c0742951a8b3dc5912860188517011d5f43873 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 4 Oct 2019 17:30:34 +0000 Subject: [PATCH 44/55] Add test that verifies disksize bug (2/2) --- builder/azure/chroot/builder_test.go | 45 ++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/builder/azure/chroot/builder_test.go b/builder/azure/chroot/builder_test.go index a0b3e2ca8..40156bf1f 100644 --- a/builder/azure/chroot/builder_test.go +++ b/builder/azure/chroot/builder_test.go @@ -1,6 +1,7 @@ package chroot import ( + "reflect" "regexp" "testing" @@ -26,3 +27,47 @@ func TestBuilder_Prepare_DiskAsInput(t *testing.T) { } } } + +func TestBuilder_Prepare(t *testing.T) { + type config map[string]interface{} + + tests := []struct { + name string + config config + want []string + validate func(Config) + wantErr bool + }{ + { + name: "HappyPath", + config: config{ + "client_id": "123", + "client_secret": "456", + "subscription_id": "789", + "resource_group": "rgname", + "image_resource_id": "/subscriptions/789/resourceGroups/otherrgname/providers/Microsoft.Compute/images/MyDebianOSImage-{{timestamp}}", + "source": "credativ:Debian:9:latest", + }, + wantErr: false, + validate: func(c Config){ + if(c.OSDiskSizeGB!=0){ + t.Fatalf("Expected OSDiskSizeGB to be 0, was %+v", c.OSDiskSizeGB) + } + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &Builder{} + + got, err := b.Prepare(tt.config) + if (err != nil) != tt.wantErr { + t.Errorf("Builder.Prepare() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Builder.Prepare() = %v, want %v", got, tt.want) + } + }) + } +} From f72c4ec7a37fbb7ec24829b0eb5d02e02008e4c2 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 4 Oct 2019 18:01:02 +0000 Subject: [PATCH 45/55] Update example and docs --- builder/azure/chroot/builder_test.go | 65 +++++++++---------- examples/azure/debian-chroot.json | 5 +- .../docs/builders/azure-chroot.html.md.erb | 9 +-- 3 files changed, 39 insertions(+), 40 deletions(-) diff --git a/builder/azure/chroot/builder_test.go b/builder/azure/chroot/builder_test.go index 40156bf1f..dd40f7737 100644 --- a/builder/azure/chroot/builder_test.go +++ b/builder/azure/chroot/builder_test.go @@ -1,58 +1,53 @@ package chroot import ( - "reflect" - "regexp" "testing" - "github.com/hashicorp/packer/packer" + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" ) -func TestBuilder_Prepare_DiskAsInput(t *testing.T) { - b := Builder{} - _, err := b.Prepare(map[string]interface{}{ - "source": "/subscriptions/28279221-ccbe-40f0-b70b-4d78ab822e09/resourceGroups/testrg/providers/Microsoft.Compute/disks/diskname", - }) - - if err != nil { - // make sure there is no error about the source field - errs, ok := err.(*packer.MultiError) - if !ok { - t.Error("Expected the returned error to be of type packer.MultiError") - } - for _, err := range errs.Errors { - if matched, _ := regexp.MatchString(`(^|\W)source\W`, err.Error()); matched { - t.Errorf("Did not expect an error about the 'source' field, but found %q", err) - } - } - } -} - func TestBuilder_Prepare(t *testing.T) { type config map[string]interface{} - + type regexMatchers map[string]string // map of regex : error message + tests := []struct { name string config config - want []string validate func(Config) wantErr bool }{ { - name: "HappyPath", + name: "HappyPathFromPlatformImage", config: config{ "client_id": "123", "client_secret": "456", "subscription_id": "789", - "resource_group": "rgname", "image_resource_id": "/subscriptions/789/resourceGroups/otherrgname/providers/Microsoft.Compute/images/MyDebianOSImage-{{timestamp}}", "source": "credativ:Debian:9:latest", }, - wantErr: false, - validate: func(c Config){ - if(c.OSDiskSizeGB!=0){ - t.Fatalf("Expected OSDiskSizeGB to be 0, was %+v", c.OSDiskSizeGB) + validate: func(c Config) { + if c.OSDiskSizeGB != 0 { + t.Errorf("Expected OSDiskSizeGB to be 0, was %+v", c.OSDiskSizeGB) } + if c.MountPartition != "1" { + t.Errorf("Expected MountPartition to be %s, but found %s", "1", c.MountPartition) + } + if c.OSDiskStorageAccountType != string(compute.PremiumLRS) { + t.Errorf("Expected OSDiskStorageAccountType to be %s, but found %s", string(compute.PremiumLRS), c.OSDiskStorageAccountType) + } + if c.OSDiskCacheType != string(compute.CachingTypesReadOnly) { + t.Errorf("Expected OSDiskCacheType to be %s, but found %s", string(compute.CachingTypesReadOnly), c.OSDiskCacheType) + } + if c.ImageHyperVGeneration != string(compute.V1) { + t.Errorf("Expected ImageHyperVGeneration to be %s, but found %s", string(compute.V1), c.ImageHyperVGeneration) + } + }, + }, + { + name: "HappyPathFromPlatformImage", + config: config{ + "image_resource_id": "/subscriptions/789/resourceGroups/otherrgname/providers/Microsoft.Compute/images/MyDebianOSImage-{{timestamp}}", + "source": "/subscriptions/789/resourceGroups/testrg/providers/Microsoft.Compute/disks/diskname", }, }, } @@ -60,13 +55,15 @@ func TestBuilder_Prepare(t *testing.T) { t.Run(tt.name, func(t *testing.T) { b := &Builder{} - got, err := b.Prepare(tt.config) + _, err := b.Prepare(tt.config) + if (err != nil) != tt.wantErr { t.Errorf("Builder.Prepare() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Builder.Prepare() = %v, want %v", got, tt.want) + + if tt.validate != nil { + tt.validate(b.config) } }) } diff --git a/examples/azure/debian-chroot.json b/examples/azure/debian-chroot.json index 4b29632b1..00909a55f 100644 --- a/examples/azure/debian-chroot.json +++ b/examples/azure/debian-chroot.json @@ -2,7 +2,8 @@ "variables": { "client_id": "{{env `ARM_CLIENT_ID`}}", "client_secret": "{{env `ARM_CLIENT_SECRET`}}", - "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}" + "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}", + "resource_group": "{{env `ARM_IMAGE_RESOURCEGROUP_ID`}}" }, "builders": [{ "type": "azure-chroot", @@ -23,4 +24,4 @@ "inline_shebang": "/bin/sh -x", "type": "shell" }] -} +} \ No newline at end of file diff --git a/website/source/docs/builders/azure-chroot.html.md.erb b/website/source/docs/builders/azure-chroot.html.md.erb index 71b488deb..7244671a7 100644 --- a/website/source/docs/builders/azure-chroot.html.md.erb +++ b/website/source/docs/builders/azure-chroot.html.md.erb @@ -104,18 +104,19 @@ mounts `/prod` and `/dev`: ## Example Here is an example that creates a Debian image with updated packages. Specify all environment variables (`ARM_CLIENT_ID`, `ARM_CLIENT_SECRET`, -`ARM_SUBSCRIPTION_ID`) to use a service principal, specify only `ARM_SUBSCRIPTION_ID` -to use interactive login or leave them empty to use the system-assigned identity -of the VM you run Packer on. +`ARM_SUBSCRIPTION_ID`) to use a service principal. The identity you choose should have permission to create disks and images and also to update your VM. +Set the `ARM_IMAGE_RESOURCEGROUP_ID` variable to an existing resource group in the +subscription where the resulting image will be created. ``` json { "variables": { "client_id": "{{env `ARM_CLIENT_ID`}}", "client_secret": "{{env `ARM_CLIENT_SECRET`}}", - "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}" + "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}", + "resource_group": "{{env `ARM_IMAGE_RESOURCEGROUP_ID`}}" }, "builders": [{ "type": "azure-chroot", From 19a3502b3655e9c2400df720232f9ab4f3fea9dd Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 4 Oct 2019 18:59:11 +0000 Subject: [PATCH 46/55] Add 'vm' template function --- builder/azure/chroot/builder.go | 4 ++ builder/azure/chroot/template_funcs.go | 37 +++++++++++++++ .../docs/builders/azure-chroot.html.md.erb | 47 ++++++++++++++++++- 3 files changed, 87 insertions(+), 1 deletion(-) create mode 100644 builder/azure/chroot/template_funcs.go diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index 711146703..a5e26f9b3 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -115,6 +115,7 @@ type Builder struct { func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.ctx.Funcs = azcommon.TemplateFuncs + b.config.ctx.Funcs["vm"] = CreateVMMetadataTemplateFunc() err := config.Decode(&b.config, &config.DecodeOpts{ Interpolate: true, InterpolateContext: &b.config.ctx, @@ -129,6 +130,9 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { }, }, }, raws...) + if err != nil { + return nil, err + } var errs *packer.MultiError var warns []string diff --git a/builder/azure/chroot/template_funcs.go b/builder/azure/chroot/template_funcs.go new file mode 100644 index 000000000..7594cf949 --- /dev/null +++ b/builder/azure/chroot/template_funcs.go @@ -0,0 +1,37 @@ +package chroot + +import ( + "fmt" + "sync" + + "github.com/hashicorp/packer/builder/azure/common/client" +) + +// CreateVMMetadataTemplateFunc returns a template function that retrieves VM metadata. VM metadata is retrieved only once and reused for all executions of the function. +func CreateVMMetadataTemplateFunc() func(string) (string, error) { + var data *client.ComputeInfo + var dataErr error + once := sync.Once{} + return func(key string) (string, error) { + once.Do(func() { + data, dataErr = client.DefaultMetadataClient.GetComputeInfo() + }) + if dataErr != nil { + return "", dataErr + } + switch key { + case "name": + return data.Name, nil + case "subscription_id": + return data.SubscriptionID, nil + case "resource_group": + return data.ResourceGroupName, nil + case "location": + return data.Location, nil + case "resource_id": + return data.ResourceID(), nil + default: + return "", fmt.Errorf("unknown metadata key: %s (supported: name, subscription_id, resource_group, location, resource_id)", key) + } + } +} diff --git a/website/source/docs/builders/azure-chroot.html.md.erb b/website/source/docs/builders/azure-chroot.html.md.erb index 7244671a7..4a11152bd 100644 --- a/website/source/docs/builders/azure-chroot.html.md.erb +++ b/website/source/docs/builders/azure-chroot.html.md.erb @@ -101,7 +101,52 @@ mounts `/prod` and `/dev`: - The mount directory. -## Example +## Additional template function + +Because this builder runs on an Azure VM, there is an additional template function +available called `vm`, which returns the following VM metadata: + +- name +- subscription_id +- resource_group +- location +- resource_id + +This function can be used in the configuration templates, for example, use +``` +"{{ vm `subscription_id` }}" +``` +to fill in the subscription ID of the VM in any of the configuration options. + +## Examples +Here are some examples using this builder. + +### Using a VM with a Managed Identity +On a VM with a system-assigned managed identity that has the contributor role +on its own resource group, the following config can be used to create an +updated Debian image: +``` json +{ + "builders": [{ + "type": "azure-chroot", + + "command_wrapper": "sudo {{.Command}}", + + "image_resource_id": "/subscriptions/{{vm `subscription_id`}}/resourceGroups/{{vm `resource_group`}}/providers/Microsoft.Compute/images/MyDebianOSImage-{{timestamp}}", + "source": "credativ:Debian:9:latest" + }], + "provisioners": [{ + "inline": [ + "apt-get update", + "apt-get upgrade -y" + ], + "inline_shebang": "/bin/sh -x", + "type": "shell" + }] +} +``` + +### Using a Service Principal Here is an example that creates a Debian image with updated packages. Specify all environment variables (`ARM_CLIENT_ID`, `ARM_CLIENT_SECRET`, `ARM_SUBSCRIPTION_ID`) to use a service principal. From fe584f4448f884845753de503e607aa6c08f9726 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 4 Oct 2019 19:00:22 +0000 Subject: [PATCH 47/55] Format updates --- builder/azure/chroot/diskattacher.go | 2 +- builder/azure/chroot/step_verify_source_disk_test.go | 6 +++--- website/source/docs/builders/azure-chroot.html.md.erb | 1 + 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/builder/azure/chroot/diskattacher.go b/builder/azure/chroot/diskattacher.go index abeba5af6..0aa833b6d 100644 --- a/builder/azure/chroot/diskattacher.go +++ b/builder/azure/chroot/diskattacher.go @@ -26,7 +26,7 @@ type DiskAttacher interface { WaitForDetach(ctx context.Context, diskID string) error } -var NewDiskAttacher = func (azureClient client.AzureClientSet) DiskAttacher { +var NewDiskAttacher = func(azureClient client.AzureClientSet) DiskAttacher { return &diskAttacher{ azcli: azureClient, } diff --git a/builder/azure/chroot/step_verify_source_disk_test.go b/builder/azure/chroot/step_verify_source_disk_test.go index 3f60e5c57..8d2191ac0 100644 --- a/builder/azure/chroot/step_verify_source_disk_test.go +++ b/builder/azure/chroot/step_verify_source_disk_test.go @@ -57,7 +57,7 @@ func Test_StepVerifySourceDisk_Run(t *testing.T) { GetDiskResponseCode: 200, GetDiskResponseBody: `{"location":"westus2"}`, }, - want: multistep.ActionHalt, + want: multistep.ActionHalt, errormatch: "Could not parse resource id", }, { @@ -142,7 +142,7 @@ func Test_StepVerifySourceDisk_Run(t *testing.T) { }) state.Put("ui", ui) - got := s.Run(context.TODO(), state); + got := s.Run(context.TODO(), state) if !reflect.DeepEqual(got, tt.want) { t.Errorf("StepVerifySourceDisk.Run() = %v, want %v", got, tt.want) } @@ -152,7 +152,7 @@ func Test_StepVerifySourceDisk_Run(t *testing.T) { t.Errorf("Expected the error output (%q) to match %q", errorBuffer.String(), tt.errormatch) } } - + if got == multistep.ActionHalt { if _, ok := state.GetOk("error"); !ok { t.Fatal("Expected 'error' to be set in statebag after failure") diff --git a/website/source/docs/builders/azure-chroot.html.md.erb b/website/source/docs/builders/azure-chroot.html.md.erb index 4a11152bd..a6a52bc46 100644 --- a/website/source/docs/builders/azure-chroot.html.md.erb +++ b/website/source/docs/builders/azure-chroot.html.md.erb @@ -125,6 +125,7 @@ Here are some examples using this builder. On a VM with a system-assigned managed identity that has the contributor role on its own resource group, the following config can be used to create an updated Debian image: + ``` json { "builders": [{ From 0866cb3714df37df79cbf753ce354fcb9a65be50 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Mon, 7 Oct 2019 18:56:20 +0000 Subject: [PATCH 48/55] Reorganize client/common packages --- builder/azure/common/client/config.go | 3 +-- builder/azure/common/{ => client}/detect_azure.go | 2 +- builder/azure/common/{ => client}/detect_azure_linux.go | 2 +- builder/azure/common/{ => client}/detect_azure_linux_test.go | 2 +- builder/azure/common/{ => client}/devicelogin.go | 2 +- builder/azure/common/client/metadata_test.go | 3 +-- builder/azure/common/client/tokenprovider_devicewflow.go | 3 +-- 7 files changed, 7 insertions(+), 10 deletions(-) rename builder/azure/common/{ => client}/detect_azure.go (90%) rename builder/azure/common/{ => client}/detect_azure_linux.go (96%) rename builder/azure/common/{ => client}/detect_azure_linux_test.go (97%) rename builder/azure/common/{ => client}/devicelogin.go (99%) diff --git a/builder/azure/common/client/config.go b/builder/azure/common/client/config.go index 230188eb5..7591b4b33 100644 --- a/builder/azure/common/client/config.go +++ b/builder/azure/common/client/config.go @@ -4,7 +4,6 @@ package client import ( "fmt" - "github.com/hashicorp/packer/builder/azure/common" "os" "strings" "time" @@ -303,4 +302,4 @@ func (c *Config) FillParameters() error { } // allow override for unit tests -var findTenantID = common.FindTenantID +var findTenantID = FindTenantID diff --git a/builder/azure/common/detect_azure.go b/builder/azure/common/client/detect_azure.go similarity index 90% rename from builder/azure/common/detect_azure.go rename to builder/azure/common/client/detect_azure.go index 001ca1b4f..fef7a2932 100644 --- a/builder/azure/common/detect_azure.go +++ b/builder/azure/common/client/detect_azure.go @@ -1,6 +1,6 @@ // +build !linux -package common +package client // IsAzure returns true if Packer is running on Azure (currently only works on Linux) func IsAzure() bool { diff --git a/builder/azure/common/detect_azure_linux.go b/builder/azure/common/client/detect_azure_linux.go similarity index 96% rename from builder/azure/common/detect_azure_linux.go rename to builder/azure/common/client/detect_azure_linux.go index f57eb42b9..c2b9a2800 100644 --- a/builder/azure/common/detect_azure_linux.go +++ b/builder/azure/common/client/detect_azure_linux.go @@ -1,4 +1,4 @@ -package common +package client import ( "bytes" diff --git a/builder/azure/common/detect_azure_linux_test.go b/builder/azure/common/client/detect_azure_linux_test.go similarity index 97% rename from builder/azure/common/detect_azure_linux_test.go rename to builder/azure/common/client/detect_azure_linux_test.go index 9d755cfd9..5975d7a6b 100644 --- a/builder/azure/common/detect_azure_linux_test.go +++ b/builder/azure/common/client/detect_azure_linux_test.go @@ -1,4 +1,4 @@ -package common +package client import ( "io/ioutil" diff --git a/builder/azure/common/devicelogin.go b/builder/azure/common/client/devicelogin.go similarity index 99% rename from builder/azure/common/devicelogin.go rename to builder/azure/common/client/devicelogin.go index ea177638a..888e851bf 100644 --- a/builder/azure/common/devicelogin.go +++ b/builder/azure/common/client/devicelogin.go @@ -1,4 +1,4 @@ -package common +package client import ( "context" diff --git a/builder/azure/common/client/metadata_test.go b/builder/azure/common/client/metadata_test.go index dddc9a2a0..75353d141 100644 --- a/builder/azure/common/client/metadata_test.go +++ b/builder/azure/common/client/metadata_test.go @@ -6,12 +6,11 @@ import ( "github.com/Azure/go-autorest/autorest/azure" - "github.com/hashicorp/packer/builder/azure/common" "github.com/stretchr/testify/assert" ) func Test_MetadataReturnsComputeInfo(t *testing.T) { - if !common.IsAzure() { + if !IsAzure() { t.Skipf("Not running on Azure, skipping live IMDS test") } mdc := NewMetadataClient() diff --git a/builder/azure/common/client/tokenprovider_devicewflow.go b/builder/azure/common/client/tokenprovider_devicewflow.go index 3c2b9fd17..00d83488a 100644 --- a/builder/azure/common/client/tokenprovider_devicewflow.go +++ b/builder/azure/common/client/tokenprovider_devicewflow.go @@ -6,7 +6,6 @@ import ( "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" - packerAzureCommon "github.com/hashicorp/packer/builder/azure/common" ) func NewDeviceFlowOAuthTokenProvider(env azure.Environment, say func(string), tenantID string) oAuthTokenProvider { @@ -36,5 +35,5 @@ func (tp *deviceflowOauthTokenProvider) getServicePrincipalTokenWithResource(res tp.say(fmt.Sprintf("Getting token for %s", resource)) } - return packerAzureCommon.Authenticate(tp.env, tp.tenantID, tp.say, resource) + return Authenticate(tp.env, tp.tenantID, tp.say, resource) } From 9b724d4ca05e7f151929375ef1a9d47777ef47f4 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Mon, 7 Oct 2019 19:20:08 +0000 Subject: [PATCH 49/55] Add artifact --- builder/azure/chroot/builder.go | 11 +++- builder/azure/common/artifact.go | 103 +++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 builder/azure/common/artifact.go diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index a5e26f9b3..27f763706 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -28,6 +28,9 @@ import ( "github.com/Azure/go-autorest/autorest/to" ) +// BuilderId is the unique ID for this builder +const BuilderId = "azure.chroot" + // Config is the configuration that is chained through the steps and settable // from the template. type Config struct { @@ -437,7 +440,13 @@ func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (pack return nil, rawErr.(error) } - return nil, nil + // Build the artifact and return it + artifact := &azcommon.Artifact{ + Resources: []string{b.config.ImageResourceID}, + BuilderIdValue: BuilderId, + } + + return artifact, nil } var _ packer.Builder = &Builder{} diff --git a/builder/azure/common/artifact.go b/builder/azure/common/artifact.go new file mode 100644 index 000000000..d74ff9b54 --- /dev/null +++ b/builder/azure/common/artifact.go @@ -0,0 +1,103 @@ +package common + +import ( + "context" + "fmt" + "log" + "sort" + "strings" + + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/packer/builder/azure/common/client" + "github.com/hashicorp/packer/packer" +) + +// Artifact is an artifact implementation that contains built Managed Images or Disks. +type Artifact struct { + // Array of the Azure resource IDs that were created. + Resources []string + + // BuilderId is the unique ID for the builder that created this AMI + BuilderIdValue string + + // Azure client for performing API stuff. + AzureClientSet client.AzureClientSet +} + +func (a *Artifact) BuilderId() string { + return a.BuilderIdValue +} + +func (*Artifact) Files() []string { + // We have no files + return nil +} + +func (a *Artifact) Id() string { + parts := make([]string, 0, len(a.Resources)) + for _, resource := range a.Resources { + parts = append(parts, strings.ToLower(resource)) + } + + sort.Strings(parts) + return strings.Join(parts, ",") +} + +func (a *Artifact) String() string { + parts := make([]string, 0, len(a.Resources)) + for _, resource := range a.Resources { + parts = append(parts, strings.ToLower(resource)) + } + + sort.Strings(parts) + return fmt.Sprintf("Azure resources created:\n%s\n", strings.Join(parts, "\n")) +} + +func (a *Artifact) State(name string) interface{} { + switch name { + default: + return nil + } +} + +func (a *Artifact) Destroy() error { + errs := make([]error, 0) + + for _, resource := range a.Resources { + log.Printf("Deleting resource %s", resource) + + id, err := azure.ParseResourceID(resource) + if err != nil { + return fmt.Errorf("Unable to parse resource id (%s): %v", resource, err) + } + + ctx := context.TODO() + restype := strings.ToLower(fmt.Sprintf("%s/%s", id.Provider, id.ResourceType)) + + switch restype { + case "microsoft.compute/images": + res, err := a.AzureClientSet.ImagesClient().Delete(ctx, id.ResourceGroup, id.ResourceName) + if err != nil { + errs = append(errs, fmt.Errorf("Unable to initiate deletion of resource (%s): %v", resource, err)) + } else { + err := res.WaitForCompletionRef(ctx, a.AzureClientSet.PollClient()) + if err != nil { + errs = append(errs, fmt.Errorf("Unable to complete deletion of resource (%s): %v", resource, err)) + } + } + default: + errs = append(errs, fmt.Errorf("Don't know how to delete resources of type %s (%s)", resource, restype)) + } + + } + + if len(errs) > 0 { + if len(errs) == 1 { + return errs[0] + } else { + return &packer.MultiError{Errors: errs} + } + } + + return nil +} From ade95350d69dd086aea801cdd1d01fcade3b7692 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Mon, 7 Oct 2019 21:38:32 +0000 Subject: [PATCH 50/55] Use sudo in docs --- website/source/docs/builders/azure-chroot.html.md.erb | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/website/source/docs/builders/azure-chroot.html.md.erb b/website/source/docs/builders/azure-chroot.html.md.erb index a6a52bc46..069fc1853 100644 --- a/website/source/docs/builders/azure-chroot.html.md.erb +++ b/website/source/docs/builders/azure-chroot.html.md.erb @@ -120,6 +120,13 @@ to fill in the subscription ID of the VM in any of the configuration options. ## Examples Here are some examples using this builder. +This builder requires privileged actions, such as mounting disks, running +`chroot` and other admin commands. Usually it needs to be run with root +permissions, for example: + +``` +sudo -E packer build example.json +``` ### Using a VM with a Managed Identity On a VM with a system-assigned managed identity that has the contributor role @@ -131,8 +138,6 @@ updated Debian image: "builders": [{ "type": "azure-chroot", - "command_wrapper": "sudo {{.Command}}", - "image_resource_id": "/subscriptions/{{vm `subscription_id`}}/resourceGroups/{{vm `resource_group`}}/providers/Microsoft.Compute/images/MyDebianOSImage-{{timestamp}}", "source": "credativ:Debian:9:latest" }], From d3dc1be71c62414d82f8f980fb1694195984dbf1 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Fri, 11 Oct 2019 15:32:02 +0000 Subject: [PATCH 51/55] Doc updates --- builder/azure/chroot/builder.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/azure/chroot/builder.go b/builder/azure/chroot/builder.go index 27f763706..ee2dbe08f 100644 --- a/builder/azure/chroot/builder.go +++ b/builder/azure/chroot/builder.go @@ -1,7 +1,7 @@ //go:generate struct-markdown -// Package chroot is able to create an Azure manage image without requiring the -// launch of a new instance for every build. It does this by attaching and +// Package chroot is able to create an Azure managed image without requiring the +// launch of a new virtual machine for every build. It does this by attaching and // mounting the root disk and chrooting into that directory. // It then creates a managed image from that attached disk. package chroot From b716299d8ee578849e70a8d988a7b570e5672b3c Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Tue, 15 Oct 2019 21:13:07 +0000 Subject: [PATCH 52/55] PR comment --- builder/azure/chroot/diskattacher.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/builder/azure/chroot/diskattacher.go b/builder/azure/chroot/diskattacher.go index 0aa833b6d..6dbdec73c 100644 --- a/builder/azure/chroot/diskattacher.go +++ b/builder/azure/chroot/diskattacher.go @@ -49,8 +49,7 @@ func (da diskAttacher) WaitForDevice(ctx context.Context, lun int32) (device str link, err := os.Readlink(path) if err == nil { return filepath.Abs("/dev/disk/azure/scsi1/" + link) - } - if err != nil && err != os.ErrNotExist { + } else if err != os.ErrNotExist { if pe, ok := err.(*os.PathError); ok && pe.Err != syscall.ENOENT { return "", err } From 015c1949867f772f54e322b27215ce98180d6f0b Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Tue, 15 Oct 2019 22:29:09 +0000 Subject: [PATCH 53/55] clarify docs --- .../source/docs/builders/azure-chroot.html.md.erb | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/website/source/docs/builders/azure-chroot.html.md.erb b/website/source/docs/builders/azure-chroot.html.md.erb index 069fc1853..fd59e4fad 100644 --- a/website/source/docs/builders/azure-chroot.html.md.erb +++ b/website/source/docs/builders/azure-chroot.html.md.erb @@ -15,10 +15,11 @@ The `azure-chroot` builder is able to build Azure managed disk (MD) images. For more information on managed disks, see [Azure Managed Disks Overview](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/managed-disks-overview). The difference between this builder and the `azure-arm` builder is that this -builder is able to build a managed disk image without launching an Azure VM -instance. This can dramatically speed up image builds. It also allows for more -deterministic image content and enables some capabilities that are not possible -with the `azure-arm` builder. +builder is able to build a managed disk image without launching a new Azure VM +for every build, but instead use an already-running Azure VM. This can +dramatically speed up image builds. It also allows for more deterministic image +content and enables some capabilities that are not possible with the +`azure-arm` builder. > **This is an advanced builder** If you're just getting started with Packer, it is recommend to start with the [azure-arm builder](/docs/builders/azure-arm.html), @@ -57,7 +58,7 @@ information. <%= partial "partials/builder/azure/common/client/_Config-not-required.html" %> -### Azure chroot builder specific options +### Azure chroot builder specific options #### Required: <%= partial "partials/builder/azure/chroot/_Config-required.html" %> @@ -154,7 +155,7 @@ updated Debian image: ### Using a Service Principal Here is an example that creates a Debian image with updated packages. Specify -all environment variables (`ARM_CLIENT_ID`, `ARM_CLIENT_SECRET`, +all environment variables (`ARM_CLIENT_ID`, `ARM_CLIENT_SECRET`, `ARM_SUBSCRIPTION_ID`) to use a service principal. The identity you choose should have permission to create disks and images and also to update your VM. @@ -175,7 +176,7 @@ subscription where the resulting image will be created. "client_id": "{{user `client_id`}}", "client_secret": "{{user `client_secret`}}", "subscription_id": "{{user `subscription_id`}}", - + "image_resource_id": "/subscriptions/{{user `subscription_id`}}/resourceGroups/{{user `resource_group`}}/providers/Microsoft.Compute/images/MyDebianOSImage-{{timestamp}}", "source": "credativ:Debian:9:latest" From 3b6c6f46e7b4d44be89a16e73305ff010594e8a1 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Tue, 15 Oct 2019 15:57:43 -0700 Subject: [PATCH 54/55] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 696a9e236..16fa1e637 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## 1.4.5 (Upcoming) ### IMPROVEMENTS: +* builder/azure-chroot: Add Azure chroot builder [GH-8185] * builder/oracle-oci: Support defined tags for oci builder [GH-8172] * builder/proxmos: Add ability to select CPU type [GH-8201] * builder/proxmox: Add support for SCSI controller selection [GH-8199] From 11a1221b538cc28ba4b08ec495dd0f83eed806a5 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Tue, 15 Oct 2019 16:26:35 -0700 Subject: [PATCH 55/55] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 16fa1e637..9657e7047 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## 1.4.5 (Upcoming) ### IMPROVEMENTS: +* builder/azure-arm: Allow specification of polling duration [GH-8226] * builder/azure-chroot: Add Azure chroot builder [GH-8185] * builder/oracle-oci: Support defined tags for oci builder [GH-8172] * builder/proxmos: Add ability to select CPU type [GH-8201]