packer-cn/post-processor/googlecompute-export/post-processor.go

235 lines
7.7 KiB
Go
Raw Normal View History

//go:generate struct-markdown
//go:generate mapstructure-to-hcl2 -type Config
package googlecomputeexport
import (
2019-03-22 09:56:02 -04:00
"context"
"fmt"
"strings"
"time"
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
"github.com/hashicorp/hcl/v2/hcldec"
2017-04-04 16:39:01 -04:00
"github.com/hashicorp/packer/builder/googlecompute"
"github.com/hashicorp/packer/helper/communicator"
2017-04-04 16:39:01 -04:00
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/packer-plugin-sdk/common"
2020-11-17 19:31:03 -05:00
"github.com/hashicorp/packer/packer-plugin-sdk/multistep"
"github.com/hashicorp/packer/packer-plugin-sdk/multistep/commonsteps"
"github.com/hashicorp/packer/packer-plugin-sdk/template/interpolate"
"github.com/hashicorp/packer/post-processor/artifice"
)
type Config struct {
common.PackerConfig `mapstructure:",squash"`
//The JSON file containing your account credentials.
//If specified, the account file will take precedence over any `googlecompute` builder authentication method.
AccountFile string `mapstructure:"account_file"`
2020-10-01 15:39:06 -04:00
// This allows service account impersonation as per the [docs](https://cloud.google.com/iam/docs/impersonating-service-accounts).
ImpersonateServiceAccount string `mapstructure:"impersonate_service_account" required:"false"`
//The size of the export instances disk.
//The disk is unused for the export but a larger size will increase `pd-ssd` read speed.
//This defaults to `200`, which is 200GB.
DiskSizeGb int64 `mapstructure:"disk_size"`
//Type of disk used to back the export instance, like
//`pd-ssd` or `pd-standard`. Defaults to `pd-ssd`.
DiskType string `mapstructure:"disk_type"`
//The export instance machine type. Defaults to `"n1-highcpu-4"`.
MachineType string `mapstructure:"machine_type"`
//The Google Compute network id or URL to use for the export instance.
//Defaults to `"default"`. If the value is not a URL, it
//will be interpolated to `projects/((builder_project_id))/global/networks/((network))`.
//This value is not required if a `subnet` is specified.
Network string `mapstructure:"network"`
//A list of GCS paths where the image will be exported.
//For example `'gs://mybucket/path/to/file.tar.gz'`
Paths []string `mapstructure:"paths" required:"true"`
//The Google Compute subnetwork id or URL to use for
//the export instance. Only required if the `network` has been created with
//custom subnetting. Note, the region of the subnetwork must match the
//`zone` in which the VM is launched. If the value is not a URL,
//it will be interpolated to
//`projects/((builder_project_id))/regions/((region))/subnetworks/((subnetwork))`
Subnetwork string `mapstructure:"subnetwork"`
//The zone in which to launch the export instance. Defaults
//to `googlecompute` builder zone. Example: `"us-central1-a"`
Zone string `mapstructure:"zone"`
IAP bool `mapstructure-to-hcl2:",skip"`
VaultGCPOauthEngine string `mapstructure:"vault_gcp_oauth_engine"`
ServiceAccountEmail string `mapstructure:"service_account_email"`
2020-09-20 10:18:37 -04:00
account *googlecompute.ServiceAccount
ctx interpolate.Context
}
type PostProcessor struct {
config Config
runner multistep.Runner
}
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
func (p *PostProcessor) ConfigSpec() hcldec.ObjectSpec { return p.config.FlatMapstructure().HCL2Spec() }
func (p *PostProcessor) Configure(raws ...interface{}) error {
err := config.Decode(&p.config, &config.DecodeOpts{
PluginType: BuilderId,
Interpolate: true,
InterpolateContext: &p.config.ctx,
}, raws...)
if err != nil {
return err
}
errs := new(packer.MultiError)
if len(p.config.Paths) == 0 {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("paths must be specified"))
}
// Set defaults.
if p.config.DiskSizeGb == 0 {
p.config.DiskSizeGb = 200
}
if p.config.DiskType == "" {
p.config.DiskType = "pd-ssd"
}
if p.config.MachineType == "" {
p.config.MachineType = "n1-highcpu-4"
}
if p.config.Network == "" && p.config.Subnetwork == "" {
p.config.Network = "default"
}
if p.config.AccountFile != "" && p.config.VaultGCPOauthEngine != "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("May set either account_file or "+
"vault_gcp_oauth_engine, but not both."))
}
if len(errs.Errors) > 0 {
return errs
}
return nil
}
func (p *PostProcessor) PostProcess(ctx context.Context, ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) {
switch artifact.BuilderId() {
case googlecompute.BuilderId, artifice.BuilderId:
break
default:
err := fmt.Errorf(
"Unknown artifact type: %s\nCan only export from Google Compute Engine builder and Artifice post-processor artifacts.",
artifact.BuilderId())
return nil, false, false, err
}
builderAccountFile := artifact.State("AccountFilePath").(string)
builderImageName := artifact.State("ImageName").(string)
builderProjectId := artifact.State("ProjectId").(string)
builderZone := artifact.State("BuildZone").(string)
ui.Say(fmt.Sprintf("Exporting image %v to destination: %v", builderImageName, p.config.Paths))
if p.config.Zone == "" {
p.config.Zone = builderZone
}
// Set up credentials for GCE driver.
if builderAccountFile != "" {
cfg, err := googlecompute.ProcessAccountFile(builderAccountFile)
if err != nil {
return nil, false, false, err
}
p.config.account = cfg
}
if p.config.AccountFile != "" {
cfg, err := googlecompute.ProcessAccountFile(p.config.AccountFile)
if err != nil {
return nil, false, false, err
}
p.config.account = cfg
}
// Set up exporter instance configuration.
exporterName := fmt.Sprintf("%s-exporter", artifact.Id())
exporterMetadata := map[string]string{
"image_name": builderImageName,
"name": exporterName,
"paths": strings.Join(p.config.Paths, " "),
"startup-script": StartupScript,
"zone": p.config.Zone,
}
exporterConfig := googlecompute.Config{
DiskName: exporterName,
DiskSizeGb: p.config.DiskSizeGb,
DiskType: p.config.DiskType,
InstanceName: exporterName,
MachineType: p.config.MachineType,
Metadata: exporterMetadata,
Network: p.config.Network,
NetworkProjectId: builderProjectId,
StateTimeout: 5 * time.Minute,
SourceImageFamily: "debian-9-worker",
SourceImageProjectId: []string{"compute-image-tools"},
Subnetwork: p.config.Subnetwork,
Zone: p.config.Zone,
Scopes: []string{
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/userinfo.email",
},
}
if p.config.ServiceAccountEmail != "" {
exporterConfig.ServiceAccountEmail = p.config.ServiceAccountEmail
}
2020-10-01 15:39:06 -04:00
cfg := googlecompute.GCEDriverConfig{
Ui: ui,
ProjectId: builderProjectId,
Account: p.config.account,
ImpersonateServiceAccountName: p.config.ImpersonateServiceAccount,
VaultOauthEngineName: p.config.VaultGCPOauthEngine,
}
2020-10-01 15:39:06 -04:00
driver, err := googlecompute.NewDriverGCE(cfg)
if err != nil {
return nil, false, false, err
}
// Set up the state.
state := new(multistep.BasicStateBag)
state.Put("config", &exporterConfig)
state.Put("driver", driver)
state.Put("ui", ui)
// Build the steps.
steps := []multistep.Step{
&communicator.StepSSHKeyGen{
CommConf: &exporterConfig.Comm,
},
multistep.If(p.config.PackerDebug,
&communicator.StepDumpSSHKey{
Path: fmt.Sprintf("gce_%s.pem", p.config.PackerBuildName),
},
),
&googlecompute.StepCreateInstance{
Debug: p.config.PackerDebug,
},
new(googlecompute.StepWaitStartupScript),
new(googlecompute.StepTeardownInstance),
}
// Run the steps.
p.runner = commonsteps.NewRunner(steps, p.config.PackerConfig, ui)
2019-03-22 09:56:02 -04:00
p.runner.Run(ctx, state)
result := &Artifact{paths: p.config.Paths}
return result, false, false, nil
}