packer-cn/post-processor/googlecompute-import/post-processor.go

290 lines
9.3 KiB
Go
Raw Normal View History

//go:generate struct-markdown
//go:generate mapstructure-to-hcl2 -type Config
package googlecomputeimport
import (
2019-03-22 09:56:02 -04:00
"context"
"fmt"
"os"
"strings"
"time"
"google.golang.org/api/compute/v1"
2020-09-20 10:18:37 -04:00
"google.golang.org/api/option"
"google.golang.org/api/storage/v1"
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
"github.com/hashicorp/hcl/v2/hcldec"
"github.com/hashicorp/packer/builder/googlecompute"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/post-processor/artifice"
"github.com/hashicorp/packer/post-processor/compress"
"github.com/hashicorp/packer/template/interpolate"
)
type Config struct {
common.PackerConfig `mapstructure:",squash"`
//The JSON file containing your account credentials.
//If specified, the account file will take precedence over any `googlecompute` builder authentication method.
AccountFile string `mapstructure:"account_file" required:"true"`
2020-10-01 15:39:06 -04:00
// This allows service account impersonation as per the [docs](https://cloud.google.com/iam/docs/impersonating-service-accounts).
ImpersonateServiceAccount string `mapstructure:"impersonate_service_account" required:"false"`
//The project ID where the GCS bucket exists and where the GCE image is stored.
ProjectId string `mapstructure:"project_id" required:"true"`
IAP bool `mapstructure-to-hcl:",skip"`
//The name of the GCS bucket where the raw disk image will be uploaded.
Bucket string `mapstructure:"bucket" required:"true"`
//The name of the GCS object in `bucket` where
//the RAW disk image will be copied for import. This is treated as a
//[template engine](/docs/templates/engine). Therefore, you
//may use user variables and template functions in this field. Defaults to
//`packer-import-{{timestamp}}.tar.gz`.
GCSObjectName string `mapstructure:"gcs_object_name"`
//The description of the resulting image.
ImageDescription string `mapstructure:"image_description"`
//The name of the image family to which the resulting image belongs.
ImageFamily string `mapstructure:"image_family"`
//A list of features to enable on the guest operating system. Applicable only for bootable images. Valid
//values are `MULTI_IP_SUBNET`, `SECURE_BOOT`, `UEFI_COMPATIBLE`,
//`VIRTIO_SCSI_MULTIQUEUE` and `WINDOWS` currently.
ImageGuestOsFeatures []string `mapstructure:"image_guest_os_features"`
//Key/value pair labels to apply to the created image.
ImageLabels map[string]string `mapstructure:"image_labels"`
//The unique name of the resulting image.
ImageName string `mapstructure:"image_name" required:"true"`
//Skip removing the TAR file uploaded to the GCS
//bucket after the import process has completed. "true" means that we should
//leave it in the GCS bucket, "false" means to clean it out. Defaults to
//`false`.
SkipClean bool `mapstructure:"skip_clean"`
VaultGCPOauthEngine string `mapstructure:"vault_gcp_oauth_engine"`
2020-09-20 10:18:37 -04:00
account *googlecompute.ServiceAccount
ctx interpolate.Context
}
type PostProcessor struct {
config Config
}
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
func (p *PostProcessor) ConfigSpec() hcldec.ObjectSpec { return p.config.FlatMapstructure().HCL2Spec() }
func (p *PostProcessor) Configure(raws ...interface{}) error {
err := config.Decode(&p.config, &config.DecodeOpts{
PluginType: BuilderId,
Interpolate: true,
InterpolateContext: &p.config.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
"gcs_object_name",
},
},
}, raws...)
if err != nil {
return err
}
errs := new(packer.MultiError)
// Set defaults
if p.config.GCSObjectName == "" {
p.config.GCSObjectName = "packer-import-{{timestamp}}.tar.gz"
}
// Check and render gcs_object_name
if err = interpolate.Validate(p.config.GCSObjectName, &p.config.ctx); err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error parsing gcs_object_name template: %s", err))
}
if p.config.AccountFile != "" {
2020-10-01 15:39:06 -04:00
if p.config.VaultGCPOauthEngine != "" && p.config.ImpersonateServiceAccount != "" {
2020-09-20 10:18:37 -04:00
errs = packer.MultiErrorAppend(errs, fmt.Errorf("You cannot "+
2020-10-01 15:39:06 -04:00
"specify impersonate_service_account, account_file and vault_gcp_oauth_engine at the same time"))
2020-09-20 10:18:37 -04:00
}
cfg, err := googlecompute.ProcessAccountFile(p.config.AccountFile)
if err != nil {
errs = packer.MultiErrorAppend(errs, err)
}
p.config.account = cfg
}
templates := map[string]*string{
"bucket": &p.config.Bucket,
"image_name": &p.config.ImageName,
"project_id": &p.config.ProjectId,
}
for key, ptr := range templates {
if *ptr == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("%s must be set", key))
}
}
if len(errs.Errors) > 0 {
return errs
}
return nil
}
func (p *PostProcessor) PostProcess(ctx context.Context, ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) {
generatedData := artifact.State("generated_data")
if generatedData == nil {
// Make sure it's not a nil map so we can assign to it later.
generatedData = make(map[string]interface{})
}
p.config.ctx.Data = generatedData
2020-09-20 10:18:37 -04:00
var err error
2020-09-20 10:31:45 -04:00
var opts option.ClientOption
2020-10-01 15:39:06 -04:00
opts, err = googlecompute.NewClientOptionGoogle(p.config.account, p.config.VaultGCPOauthEngine, p.config.ImpersonateServiceAccount)
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, err
}
switch artifact.BuilderId() {
case compress.BuilderId, artifice.BuilderId:
break
default:
err := fmt.Errorf(
"Unknown artifact type: %s\nCan only import from Compress post-processor and Artifice post-processor artifacts.",
artifact.BuilderId())
2019-04-02 19:51:58 -04:00
return nil, false, false, err
}
p.config.GCSObjectName, err = interpolate.Render(p.config.GCSObjectName, &p.config.ctx)
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Error rendering gcs_object_name template: %s", err)
}
2020-09-20 10:18:37 -04:00
rawImageGcsPath, err := UploadToBucket(opts, ui, artifact, p.config.Bucket, p.config.GCSObjectName)
if err != nil {
return nil, false, false, err
}
2020-09-20 10:18:37 -04:00
gceImageArtifact, err := CreateGceImage(opts, ui, p.config.ProjectId, rawImageGcsPath, p.config.ImageName, p.config.ImageDescription, p.config.ImageFamily, p.config.ImageLabels, p.config.ImageGuestOsFeatures)
if err != nil {
return nil, false, false, err
}
if !p.config.SkipClean {
2020-09-20 10:18:37 -04:00
err = DeleteFromBucket(opts, ui, p.config.Bucket, p.config.GCSObjectName)
if err != nil {
return nil, false, false, err
}
}
return gceImageArtifact, false, false, nil
}
2020-09-20 10:31:45 -04:00
func UploadToBucket(opts option.ClientOption, ui packer.Ui, artifact packer.Artifact, bucket string, gcsObjectName string) (string, error) {
service, err := storage.NewService(context.TODO(), opts)
if err != nil {
return "", err
}
ui.Say("Looking for tar.gz file in list of artifacts...")
source := ""
for _, path := range artifact.Files() {
ui.Say(fmt.Sprintf("Found artifact %v...", path))
if strings.HasSuffix(path, ".tar.gz") {
source = path
break
}
}
if source == "" {
return "", fmt.Errorf("No tar.gz file found in list of artifacts")
}
artifactFile, err := os.Open(source)
if err != nil {
err := fmt.Errorf("error opening %v", source)
return "", err
}
ui.Say(fmt.Sprintf("Uploading file %v to GCS bucket %v/%v...", source, bucket, gcsObjectName))
storageObject, err := service.Objects.Insert(bucket, &storage.Object{Name: gcsObjectName}).Media(artifactFile).Do()
if err != nil {
ui.Say(fmt.Sprintf("Failed to upload: %v", storageObject))
return "", err
}
return storageObject.SelfLink, nil
}
2020-09-20 10:31:45 -04:00
func CreateGceImage(opts option.ClientOption, ui packer.Ui, project string, rawImageURL string, imageName string, imageDescription string, imageFamily string, imageLabels map[string]string, imageGuestOsFeatures []string) (packer.Artifact, error) {
service, err := compute.NewService(context.TODO(), opts)
if err != nil {
return nil, err
}
// Build up the imageFeatures
imageFeatures := make([]*compute.GuestOsFeature, len(imageGuestOsFeatures))
for _, v := range imageGuestOsFeatures {
imageFeatures = append(imageFeatures, &compute.GuestOsFeature{
Type: v,
})
}
gceImage := &compute.Image{
Description: imageDescription,
Family: imageFamily,
GuestOsFeatures: imageFeatures,
Labels: imageLabels,
Name: imageName,
RawDisk: &compute.ImageRawDisk{Source: rawImageURL},
SourceType: "RAW",
}
ui.Say(fmt.Sprintf("Creating GCE image %v...", imageName))
op, err := service.Images.Insert(project, gceImage).Do()
if err != nil {
ui.Say("Error creating GCE image")
return nil, err
}
ui.Say("Waiting for GCE image creation operation to complete...")
for op.Status != "DONE" {
op, err = service.GlobalOperations.Get(project, op.Name).Do()
if err != nil {
return nil, err
}
time.Sleep(5 * time.Second)
}
// fail if image creation operation has an error
if op.Error != nil {
var imageError string
for _, error := range op.Error.Errors {
imageError += error.Message
}
err = fmt.Errorf("failed to create GCE image %s: %s", imageName, imageError)
return nil, err
}
return &Artifact{paths: []string{op.TargetLink}}, nil
}
2020-09-20 10:31:45 -04:00
func DeleteFromBucket(opts option.ClientOption, ui packer.Ui, bucket string, gcsObjectName string) error {
service, err := storage.NewService(context.TODO(), opts)
if err != nil {
return err
}
ui.Say(fmt.Sprintf("Deleting import source from GCS %s/%s...", bucket, gcsObjectName))
err = service.Objects.Delete(bucket, gcsObjectName).Do()
if err != nil {
ui.Say(fmt.Sprintf("Failed to delete: %v/%v", bucket, gcsObjectName))
return err
}
return nil
}