Merge pull request #7222 from Adezandee/gce-post-processors

googlecompute-postprocessors: service account and export configs
This commit is contained in:
Megan Marsh 2019-01-31 16:58:42 -08:00 committed by GitHub
commit eef4fc7a01
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 195 additions and 160 deletions

View File

@ -34,7 +34,7 @@ type driverGCE struct {
var DriverScopes = []string{"https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/devstorage.full_control"} var DriverScopes = []string{"https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/devstorage.full_control"}
func NewDriverGCE(ui packer.Ui, p string, a *AccountFile) (Driver, error) { func NewClientGCE(a *AccountFile) (*http.Client, error) {
var err error var err error
var client *http.Client var client *http.Client
@ -78,6 +78,15 @@ func NewDriverGCE(ui packer.Ui, p string, a *AccountFile) (Driver, error) {
return nil, err return nil, err
} }
return client, nil
}
func NewDriverGCE(ui packer.Ui, p string, a *AccountFile) (Driver, error) {
client, err := NewClientGCE(a)
if err != nil {
return nil, err
}
log.Printf("[INFO] Instantiating GCE client...") log.Printf("[INFO] Instantiating GCE client...")
service, err := compute.New(client) service, err := compute.New(client)
if err != nil { if err != nil {

View File

@ -15,9 +15,18 @@ import (
type Config struct { type Config struct {
common.PackerConfig `mapstructure:",squash"` common.PackerConfig `mapstructure:",squash"`
Paths []string `mapstructure:"paths"` AccountFile string `mapstructure:"account_file"`
KeepOriginalImage bool `mapstructure:"keep_input_artifact"`
DiskSizeGb int64 `mapstructure:"disk_size"`
DiskType string `mapstructure:"disk_type"`
KeepOriginalImage bool `mapstructure:"keep_input_artifact"`
MachineType string `mapstructure:"machine_type"`
Network string `mapstructure:"network"`
Paths []string `mapstructure:"paths"`
Subnetwork string `mapstructure:"subnetwork"`
Zone string `mapstructure:"zone"`
Account googlecompute.AccountFile
ctx interpolate.Context ctx interpolate.Context
} }
@ -35,12 +44,38 @@ func (p *PostProcessor) Configure(raws ...interface{}) error {
return err return err
} }
errs := new(packer.MultiError)
if len(p.config.Paths) == 0 {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("paths must be specified"))
}
// Set defaults.
if p.config.DiskSizeGb == 0 {
p.config.DiskSizeGb = 200
}
if p.config.DiskType == "" {
p.config.DiskType = "pd-ssd"
}
if p.config.MachineType == "" {
p.config.MachineType = "n1-highcpu-4"
}
if p.config.Network == "" && p.config.Subnetwork == "" {
p.config.Network = "default"
}
if len(errs.Errors) > 0 {
return errs
}
return nil return nil
} }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
ui.Say("Starting googlecompute-export...")
ui.Say(fmt.Sprintf("Exporting image to destinations: %v", p.config.Paths))
if artifact.BuilderId() != googlecompute.BuilderId { if artifact.BuilderId() != googlecompute.BuilderId {
err := fmt.Errorf( err := fmt.Errorf(
"Unknown artifact type: %s\nCan only export from Google Compute Engine builder artifacts.", "Unknown artifact type: %s\nCan only export from Google Compute Engine builder artifacts.",
@ -48,52 +83,62 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac
return nil, p.config.KeepOriginalImage, err return nil, p.config.KeepOriginalImage, err
} }
result := &Artifact{paths: p.config.Paths} builderAccountFile := artifact.State("AccountFilePath").(string)
builderImageName := artifact.State("ImageName").(string)
builderProjectId := artifact.State("ProjectId").(string)
builderZone := artifact.State("BuildZone").(string)
if len(p.config.Paths) > 0 { ui.Say(fmt.Sprintf("Exporting image %v to destination: %v", builderImageName, p.config.Paths))
accountKeyFilePath := artifact.State("AccountFilePath").(string)
imageName := artifact.State("ImageName").(string)
imageSizeGb := artifact.State("ImageSizeGb").(int64)
projectId := artifact.State("ProjectId").(string)
zone := artifact.State("BuildZone").(string)
// Set up instance configuration. if p.config.Zone == "" {
instanceName := fmt.Sprintf("%s-exporter", artifact.Id()) p.config.Zone = builderZone
metadata := map[string]string{
"image_name": imageName,
"name": instanceName,
"paths": strings.Join(p.config.Paths, " "),
"startup-script": StartupScript,
"zone": zone,
} }
exporterConfig := googlecompute.Config{
InstanceName: instanceName,
SourceImageProjectId: "debian-cloud",
SourceImage: "debian-8-jessie-v20160629",
DiskName: instanceName,
DiskSizeGb: imageSizeGb + 10,
DiskType: "pd-standard",
Metadata: metadata,
MachineType: "n1-standard-4",
Zone: zone,
Network: "default",
RawStateTimeout: "5m",
Scopes: []string{
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.full_control",
},
}
exporterConfig.CalcTimeout()
// Set up credentials and GCE driver. // Set up credentials for GCE driver.
if accountKeyFilePath != "" { if builderAccountFile != "" {
err := googlecompute.ProcessAccountFile(&exporterConfig.Account, accountKeyFilePath) err := googlecompute.ProcessAccountFile(&p.config.Account, builderAccountFile)
if err != nil { if err != nil {
return nil, p.config.KeepOriginalImage, err return nil, p.config.KeepOriginalImage, err
} }
} }
driver, err := googlecompute.NewDriverGCE(ui, projectId, &exporterConfig.Account) if p.config.AccountFile != "" {
err := googlecompute.ProcessAccountFile(&p.config.Account, p.config.AccountFile)
if err != nil {
return nil, p.config.KeepOriginalImage, err
}
}
// Set up exporter instance configuration.
exporterName := fmt.Sprintf("%s-exporter", artifact.Id())
exporterMetadata := map[string]string{
"image_name": builderImageName,
"name": exporterName,
"paths": strings.Join(p.config.Paths, " "),
"startup-script": StartupScript,
"zone": p.config.Zone,
}
exporterConfig := googlecompute.Config{
DiskName: exporterName,
DiskSizeGb: p.config.DiskSizeGb,
DiskType: p.config.DiskType,
InstanceName: exporterName,
MachineType: p.config.MachineType,
Metadata: exporterMetadata,
Network: p.config.Network,
RawStateTimeout: "5m",
SourceImageFamily: "debian-9-worker",
SourceImageProjectId: "compute-image-tools",
Subnetwork: p.config.Subnetwork,
Zone: p.config.Zone,
Scopes: []string{
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/userinfo.email",
},
}
exporterConfig.CalcTimeout()
driver, err := googlecompute.NewDriverGCE(ui, builderProjectId, &p.config.Account)
if err != nil { if err != nil {
return nil, p.config.KeepOriginalImage, err return nil, p.config.KeepOriginalImage, err
} }
@ -120,7 +165,8 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac
// Run the steps. // Run the steps.
p.runner = common.NewRunner(steps, p.config.PackerConfig, ui) p.runner = common.NewRunner(steps, p.config.PackerConfig, ui)
p.runner.Run(state) p.runner.Run(state)
}
result := &Artifact{paths: p.config.Paths}
return result, p.config.KeepOriginalImage, nil return result, p.config.KeepOriginalImage, nil
} }

View File

@ -1,6 +1,6 @@
package googlecomputeexport package googlecomputeexport
var StartupScript string = `#!/bin/sh var StartupScript string = `#!/bin/bash
GetMetadata () { GetMetadata () {
echo "$(curl -f -H "Metadata-Flavor: Google" http://metadata/computeMetadata/v1/instance/attributes/$1 2> /dev/null)" echo "$(curl -f -H "Metadata-Flavor: Google" http://metadata/computeMetadata/v1/instance/attributes/$1 2> /dev/null)"
@ -8,11 +8,11 @@ GetMetadata () {
IMAGENAME=$(GetMetadata image_name) IMAGENAME=$(GetMetadata image_name)
NAME=$(GetMetadata name) NAME=$(GetMetadata name)
DISKNAME=${NAME}-toexport DISKNAME=${NAME}-toexport
PATHS=$(GetMetadata paths) PATHS=($(GetMetadata paths))
ZONE=$(GetMetadata zone) ZONE=$(GetMetadata zone)
Exit () { Exit () {
for i in ${PATHS}; do for i in ${PATHS[@]}; do
LOGDEST="${i}.exporter.log" LOGDEST="${i}.exporter.log"
echo "Uploading exporter log to ${LOGDEST}..." echo "Uploading exporter log to ${LOGDEST}..."
gsutil -h "Content-Type:text/plain" cp /var/log/daemon.log ${LOGDEST} gsutil -h "Content-Type:text/plain" cp /var/log/daemon.log ${LOGDEST}
@ -40,17 +40,15 @@ if ! gcloud compute instances attach-disk ${NAME} --disk ${DISKNAME} --device-na
Exit 1 Exit 1
fi fi
echo "Dumping disk..." echo "GCEExport: Running export tool."
if ! dd if=/dev/disk/by-id/google-toexport of=disk.raw bs=4096 conv=sparse; then gce_export -gcs_path "${PATHS[0]}" -disk /dev/disk/by-id/google-toexport -y
echo "Failed to dump disk to image." if [ $? -ne 0 ]; then
echo "ExportFailed: Failed to export disk source to ${PATHS[0]}."
Exit 1 Exit 1
fi fi
echo "Compressing and tar'ing disk image..." echo "ExportSuccess"
if ! tar -czf root.tar.gz disk.raw; then sync
echo "Failed to tar disk image."
Exit 1
fi
echo "Detaching disk..." echo "Detaching disk..."
if ! gcloud compute instances detach-disk ${NAME} --disk ${DISKNAME} --zone ${ZONE}; then if ! gcloud compute instances detach-disk ${NAME} --disk ${DISKNAME} --zone ${ZONE}; then
@ -64,10 +62,10 @@ if ! gcloud compute disks delete ${DISKNAME} --zone ${ZONE}; then
FAIL=1 FAIL=1
fi fi
for i in ${PATHS}; do for i in ${PATHS[@]:1}; do
echo "Uploading tar'ed disk image to ${i}..." echo "Copying archive image to ${i}..."
if ! gsutil -o GSUtil:parallel_composite_upload_threshold=100M cp root.tar.gz ${i}; then if ! gsutil -o GSUtil:parallel_composite_upload_threshold=100M cp ${PATHS[0]} ${i}; then
echo "Failed to upload image to ${i}." echo "Failed to copy image to ${i}."
FAIL=1 FAIL=1
fi fi
done done

View File

@ -13,13 +13,9 @@ import (
"github.com/hashicorp/packer/builder/googlecompute" "github.com/hashicorp/packer/builder/googlecompute"
"github.com/hashicorp/packer/common" "github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/helper/config" "github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/post-processor/compress" "github.com/hashicorp/packer/post-processor/compress"
"github.com/hashicorp/packer/template/interpolate" "github.com/hashicorp/packer/template/interpolate"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
) )
type Config struct { type Config struct {
@ -38,12 +34,12 @@ type Config struct {
KeepOriginalImage bool `mapstructure:"keep_input_artifact"` KeepOriginalImage bool `mapstructure:"keep_input_artifact"`
SkipClean bool `mapstructure:"skip_clean"` SkipClean bool `mapstructure:"skip_clean"`
Account googlecompute.AccountFile
ctx interpolate.Context ctx interpolate.Context
} }
type PostProcessor struct { type PostProcessor struct {
config Config config Config
runner multistep.Runner
} }
func (p *PostProcessor) Configure(raws ...interface{}) error { func (p *PostProcessor) Configure(raws ...interface{}) error {
@ -60,24 +56,29 @@ func (p *PostProcessor) Configure(raws ...interface{}) error {
return err return err
} }
errs := new(packer.MultiError)
// Set defaults // Set defaults
if p.config.GCSObjectName == "" { if p.config.GCSObjectName == "" {
p.config.GCSObjectName = "packer-import-{{timestamp}}.tar.gz" p.config.GCSObjectName = "packer-import-{{timestamp}}.tar.gz"
} }
errs := new(packer.MultiError)
// Check and render gcs_object_name // Check and render gcs_object_name
if err = interpolate.Validate(p.config.GCSObjectName, &p.config.ctx); err != nil { if err = interpolate.Validate(p.config.GCSObjectName, &p.config.ctx); err != nil {
errs = packer.MultiErrorAppend( errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error parsing gcs_object_name template: %s", err)) errs, fmt.Errorf("Error parsing gcs_object_name template: %s", err))
} }
if p.config.AccountFile != "" {
if err := googlecompute.ProcessAccountFile(&p.config.Account, p.config.AccountFile); err != nil {
errs = packer.MultiErrorAppend(errs, err)
}
}
templates := map[string]*string{ templates := map[string]*string{
"bucket": &p.config.Bucket, "bucket": &p.config.Bucket,
"image_name": &p.config.ImageName, "image_name": &p.config.ImageName,
"project_id": &p.config.ProjectId, "project_id": &p.config.ProjectId,
"account_file": &p.config.AccountFile,
} }
for key, ptr := range templates { for key, ptr := range templates {
if *ptr == "" { if *ptr == "" {
@ -94,7 +95,10 @@ func (p *PostProcessor) Configure(raws ...interface{}) error {
} }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
var err error client, err := googlecompute.NewClientGCE(&p.config.Account)
if err != nil {
return nil, false, err
}
if artifact.BuilderId() != compress.BuilderId { if artifact.BuilderId() != compress.BuilderId {
err = fmt.Errorf( err = fmt.Errorf(
@ -108,18 +112,18 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac
return nil, false, fmt.Errorf("Error rendering gcs_object_name template: %s", err) return nil, false, fmt.Errorf("Error rendering gcs_object_name template: %s", err)
} }
rawImageGcsPath, err := UploadToBucket(p.config.AccountFile, ui, artifact, p.config.Bucket, p.config.GCSObjectName) rawImageGcsPath, err := UploadToBucket(client, ui, artifact, p.config.Bucket, p.config.GCSObjectName)
if err != nil { if err != nil {
return nil, p.config.KeepOriginalImage, err return nil, p.config.KeepOriginalImage, err
} }
gceImageArtifact, err := CreateGceImage(p.config.AccountFile, ui, p.config.ProjectId, rawImageGcsPath, p.config.ImageName, p.config.ImageDescription, p.config.ImageFamily, p.config.ImageLabels, p.config.ImageGuestOsFeatures) gceImageArtifact, err := CreateGceImage(client, ui, p.config.ProjectId, rawImageGcsPath, p.config.ImageName, p.config.ImageDescription, p.config.ImageFamily, p.config.ImageLabels, p.config.ImageGuestOsFeatures)
if err != nil { if err != nil {
return nil, p.config.KeepOriginalImage, err return nil, p.config.KeepOriginalImage, err
} }
if !p.config.SkipClean { if !p.config.SkipClean {
err = DeleteFromBucket(p.config.AccountFile, ui, p.config.Bucket, p.config.GCSObjectName) err = DeleteFromBucket(client, ui, p.config.Bucket, p.config.GCSObjectName)
if err != nil { if err != nil {
return nil, p.config.KeepOriginalImage, err return nil, p.config.KeepOriginalImage, err
} }
@ -128,24 +132,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac
return gceImageArtifact, p.config.KeepOriginalImage, nil return gceImageArtifact, p.config.KeepOriginalImage, nil
} }
func UploadToBucket(accountFile string, ui packer.Ui, artifact packer.Artifact, bucket string, gcsObjectName string) (string, error) { func UploadToBucket(client *http.Client, ui packer.Ui, artifact packer.Artifact, bucket string, gcsObjectName string) (string, error) {
var client *http.Client
var account googlecompute.AccountFile
err := googlecompute.ProcessAccountFile(&account, accountFile)
if err != nil {
return "", err
}
var DriverScopes = []string{"https://www.googleapis.com/auth/devstorage.full_control"}
conf := jwt.Config{
Email: account.ClientEmail,
PrivateKey: []byte(account.PrivateKey),
Scopes: DriverScopes,
TokenURL: "https://accounts.google.com/o/oauth2/token",
}
client = conf.Client(oauth2.NoContext)
service, err := storage.New(client) service, err := storage.New(client)
if err != nil { if err != nil {
return "", err return "", err
@ -162,7 +149,7 @@ func UploadToBucket(accountFile string, ui packer.Ui, artifact packer.Artifact,
} }
if source == "" { if source == "" {
return "", fmt.Errorf("No tar.gz file found in list of articats") return "", fmt.Errorf("No tar.gz file found in list of artifacts")
} }
artifactFile, err := os.Open(source) artifactFile, err := os.Open(source)
@ -178,28 +165,10 @@ func UploadToBucket(accountFile string, ui packer.Ui, artifact packer.Artifact,
return "", err return "", err
} }
return "https://storage.googleapis.com/" + bucket + "/" + gcsObjectName, nil return storageObject.SelfLink, nil
} }
func CreateGceImage(accountFile string, ui packer.Ui, project string, rawImageURL string, imageName string, imageDescription string, imageFamily string, imageLabels map[string]string, imageGuestOsFeatures []string) (packer.Artifact, error) { func CreateGceImage(client *http.Client, ui packer.Ui, project string, rawImageURL string, imageName string, imageDescription string, imageFamily string, imageLabels map[string]string, imageGuestOsFeatures []string) (packer.Artifact, error) {
var client *http.Client
var account googlecompute.AccountFile
err := googlecompute.ProcessAccountFile(&account, accountFile)
if err != nil {
return nil, err
}
var DriverScopes = []string{"https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/devstorage.full_control"}
conf := jwt.Config{
Email: account.ClientEmail,
PrivateKey: []byte(account.PrivateKey),
Scopes: DriverScopes,
TokenURL: "https://accounts.google.com/o/oauth2/token",
}
client = conf.Client(oauth2.NoContext)
service, err := compute.New(client) service, err := compute.New(client)
if err != nil { if err != nil {
return nil, err return nil, err
@ -253,24 +222,7 @@ func CreateGceImage(accountFile string, ui packer.Ui, project string, rawImageUR
return &Artifact{paths: []string{op.TargetLink}}, nil return &Artifact{paths: []string{op.TargetLink}}, nil
} }
func DeleteFromBucket(accountFile string, ui packer.Ui, bucket string, gcsObjectName string) error { func DeleteFromBucket(client *http.Client, ui packer.Ui, bucket string, gcsObjectName string) error {
var client *http.Client
var account googlecompute.AccountFile
err := googlecompute.ProcessAccountFile(&account, accountFile)
if err != nil {
return err
}
var DriverScopes = []string{"https://www.googleapis.com/auth/devstorage.full_control"}
conf := jwt.Config{
Email: account.ClientEmail,
PrivateKey: []byte(account.PrivateKey),
Scopes: DriverScopes,
TokenURL: "https://accounts.google.com/o/oauth2/token",
}
client = conf.Client(oauth2.NoContext)
service, err := storage.New(client) service, err := storage.New(client)
if err != nil { if err != nil {
return err return err

View File

@ -34,9 +34,39 @@ permissions to the GCS `paths`.
### Optional ### Optional
- `account_file` (string) - The JSON file containing your account
credentials. If specified, this take precedence over `googlecompute`
builder authentication method.
- `disk_size` (number) - The size of the export instances disk, this disk
is unused for the export but a larger size increase `pd-ssd` read speed.
This defaults to `200`, which is 200GB.
- `disk_type` (string) - Type of disk used to back export instance, like
`pd-ssd` or `pd-standard`. Defaults to `pd-ssd`.
- `keep_input_artifact` (boolean) - If true, do not delete the Google Compute - `keep_input_artifact` (boolean) - If true, do not delete the Google Compute
Engine (GCE) image being exported. Engine (GCE) image being exported.
- `machine_type` (string) - The export instance machine type. Defaults
to `"n1-highcpu-4"`.
- `network` (string) - The Google Compute network id or URL to use for the
export instance. Defaults to `"default"`. If the value is not a URL, it
will be interpolated to
`projects/((network_project_id))/global/networks/((network))`. This value
is not required if a `subnet` is specified.
- `subnetwork` (string) - The Google Compute subnetwork id or URL to use for
the export instance. Only required if the `network` has been created with
custom subnetting. Note, the region of the subnetwork must match the
`zone` in which the VM is launched. If the value is not a URL,
it will be interpolated to
`projects/((network_project_id))/regions/((region))/subnetworks/((subnetwork))`
- `zone` (string) - The zone in which to launch the export instance. Defaults
to `googlecompute` builder zone. Example: `"us-central1-a"`
## Basic Example ## Basic Example
The following example builds a GCE image in the project, `my-project`, with an The following example builds a GCE image in the project, `my-project`, with an