packer-cn/builder/parallels/iso/builder.go

297 lines
11 KiB
Go
Raw Normal View History

//go:generate struct-markdown
//go:generate mapstructure-to-hcl2 -type Config
package iso
import (
"context"
"errors"
"fmt"
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
"github.com/hashicorp/hcl/v2/hcldec"
2017-04-04 16:39:01 -04:00
parallelscommon "github.com/hashicorp/packer/builder/parallels/common"
"github.com/hashicorp/packer/common"
2018-04-18 17:53:59 -04:00
"github.com/hashicorp/packer/common/bootcommand"
"github.com/hashicorp/packer/common/shutdowncommand"
2017-04-04 16:39:01 -04:00
"github.com/hashicorp/packer/helper/communicator"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/helper/multistep"
2017-04-04 16:39:01 -04:00
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
)
const BuilderId = "rickard-von-essen.parallels"
type Builder struct {
config Config
runner multistep.Runner
}
type Config struct {
common.PackerConfig `mapstructure:",squash"`
common.HTTPConfig `mapstructure:",squash"`
common.ISOConfig `mapstructure:",squash"`
2016-07-26 15:42:04 -04:00
common.FloppyConfig `mapstructure:",squash"`
2018-04-18 17:20:34 -04:00
bootcommand.BootConfig `mapstructure:",squash"`
parallelscommon.OutputConfig `mapstructure:",squash"`
parallelscommon.HWConfig `mapstructure:",squash"`
parallelscommon.PrlctlConfig `mapstructure:",squash"`
parallelscommon.PrlctlPostConfig `mapstructure:",squash"`
parallelscommon.PrlctlVersionConfig `mapstructure:",squash"`
shutdowncommand.ShutdownConfig `mapstructure:",squash"`
parallelscommon.SSHConfig `mapstructure:",squash"`
parallelscommon.ToolsConfig `mapstructure:",squash"`
// The size, in megabytes, of the hard disk to create
2019-06-06 10:29:25 -04:00
// for the VM. By default, this is 40000 (about 40 GB).
DiskSize uint `mapstructure:"disk_size" required:"false"`
// The type for image file based virtual disk drives,
2019-06-06 10:29:25 -04:00
// defaults to expand. Valid options are expand (expanding disk) that the
// image file is small initially and grows in size as you add data to it, and
// plain (plain disk) that the image file has a fixed size from the moment it
// is created (i.e the space is allocated for the full drive). Plain disks
// perform faster than expanding disks. skip_compaction will be set to true
// automatically for plain disks.
DiskType string `mapstructure:"disk_type" required:"false"`
// The guest OS type being installed. By default
2019-06-06 10:29:25 -04:00
// this is "other", but you can get dramatic performance improvements by
// setting this to the proper value. To view all available values for this run
// prlctl create x --distribution list. Setting the correct value hints to
// Parallels Desktop how to optimize the virtual hardware to work best with
// that operating system.
GuestOSType string `mapstructure:"guest_os_type" required:"false"`
// The type of controller that the hard
2019-06-06 10:29:25 -04:00
// drives are attached to, defaults to "sata". Valid options are "sata", "ide",
// and "scsi".
HardDriveInterface string `mapstructure:"hard_drive_interface" required:"false"`
// A list of which interfaces on the
2019-06-06 10:29:25 -04:00
// host should be searched for a IP address. The first IP address found on one
// of these will be used as {{ .HTTPIP }} in the boot_command. Defaults to
// ["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9",
// "ppp0", "ppp1", "ppp2"].
HostInterfaces []string `mapstructure:"host_interfaces" required:"false"`
// Virtual disk image is compacted at the end of
2019-06-06 10:29:25 -04:00
// the build process using prl_disk_tool utility (except for the case that
// disk_type is set to plain). In certain rare cases, this might corrupt
// the resulting disk image. If you find this to be the case, you can disable
// compaction using this configuration value.
SkipCompaction bool `mapstructure:"skip_compaction" required:"false"`
// This is the name of the PVM directory for the new
2019-06-06 10:29:25 -04:00
// virtual machine, without the file extension. By default this is
// "packer-BUILDNAME", where "BUILDNAME" is the name of the build.
VMName string `mapstructure:"vm_name" required:"false"`
ctx interpolate.Context
}
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
func (b *Builder) ConfigSpec() hcldec.ObjectSpec { return b.config.FlatMapstructure().HCL2Spec() }
func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) {
err := config.Decode(&b.config, &config.DecodeOpts{
Interpolate: true,
InterpolateContext: &b.config.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
"boot_command",
"prlctl",
"prlctl_post",
"parallels_tools_guest_path",
},
},
}, raws...)
if err != nil {
return nil, nil, err
}
// Accumulate any errors and warnings
var errs *packer.MultiError
warnings := make([]string, 0)
isoWarnings, isoErrs := b.config.ISOConfig.Prepare(&b.config.ctx)
warnings = append(warnings, isoWarnings...)
errs = packer.MultiErrorAppend(errs, isoErrs...)
errs = packer.MultiErrorAppend(errs, b.config.HTTPConfig.Prepare(&b.config.ctx)...)
errs = packer.MultiErrorAppend(errs, b.config.FloppyConfig.Prepare(&b.config.ctx)...)
errs = packer.MultiErrorAppend(
errs, b.config.OutputConfig.Prepare(&b.config.ctx, &b.config.PackerConfig)...)
errs = packer.MultiErrorAppend(errs, b.config.HWConfig.Prepare(&b.config.ctx)...)
errs = packer.MultiErrorAppend(errs, b.config.PrlctlConfig.Prepare(&b.config.ctx)...)
errs = packer.MultiErrorAppend(errs, b.config.PrlctlPostConfig.Prepare(&b.config.ctx)...)
errs = packer.MultiErrorAppend(errs, b.config.PrlctlVersionConfig.Prepare(&b.config.ctx)...)
errs = packer.MultiErrorAppend(errs, b.config.ShutdownConfig.Prepare(&b.config.ctx)...)
errs = packer.MultiErrorAppend(errs, b.config.SSHConfig.Prepare(&b.config.ctx)...)
errs = packer.MultiErrorAppend(errs, b.config.ToolsConfig.Prepare(&b.config.ctx)...)
2018-04-18 17:20:34 -04:00
errs = packer.MultiErrorAppend(errs, b.config.BootConfig.Prepare(&b.config.ctx)...)
if b.config.DiskSize == 0 {
b.config.DiskSize = 40000
}
if b.config.DiskType == "" {
b.config.DiskType = "expand"
}
if b.config.HardDriveInterface == "" {
b.config.HardDriveInterface = "sata"
}
if b.config.GuestOSType == "" {
b.config.GuestOSType = "other"
}
if len(b.config.HostInterfaces) == 0 {
b.config.HostInterfaces = []string{"en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7",
"en8", "en9", "ppp0", "ppp1", "ppp2"}
}
if b.config.VMName == "" {
b.config.VMName = fmt.Sprintf("packer-%s", b.config.PackerBuildName)
}
if b.config.DiskType != "expand" && b.config.DiskType != "plain" {
errs = packer.MultiErrorAppend(
errs, errors.New("disk_type can only be expand, or plain"))
}
2017-03-07 21:22:23 -05:00
if b.config.DiskType == "plain" && !b.config.SkipCompaction {
b.config.SkipCompaction = true
warnings = append(warnings,
"'skip_compaction' is enforced to be true for plain disks.")
}
if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" && b.config.HardDriveInterface != "scsi" {
errs = packer.MultiErrorAppend(
errs, errors.New("hard_drive_interface can only be ide, sata, or scsi"))
}
// Warnings
if b.config.ShutdownCommand == "" {
warnings = append(warnings,
"A shutdown_command was not specified. Without a shutdown command, Packer\n"+
"will forcibly halt the virtual machine, which may result in data loss.")
}
if errs != nil && len(errs.Errors) > 0 {
return nil, warnings, errs
}
return nil, warnings, nil
}
func (b *Builder) Run(ctx context.Context, ui packer.Ui, hook packer.Hook) (packer.Artifact, error) {
// Create the driver that we'll use to communicate with Parallels
driver, err := parallelscommon.NewDriver()
if err != nil {
return nil, fmt.Errorf("Failed creating Parallels driver: %s", err)
}
steps := []multistep.Step{
&parallelscommon.StepPrepareParallelsTools{
ParallelsToolsFlavor: b.config.ParallelsToolsFlavor,
ParallelsToolsMode: b.config.ParallelsToolsMode,
},
&common.StepDownload{
Checksum: b.config.ISOChecksum,
ChecksumType: b.config.ISOChecksumType,
Description: "ISO",
Extension: b.config.TargetExtension,
ResultKey: "iso_path",
TargetPath: b.config.TargetPath,
Url: b.config.ISOUrls,
},
&parallelscommon.StepOutputDir{
Force: b.config.PackerForce,
Path: b.config.OutputDir,
},
&common.StepCreateFloppy{
2016-10-11 17:43:50 -04:00
Files: b.config.FloppyConfig.FloppyFiles,
Directories: b.config.FloppyConfig.FloppyDirectories,
2019-09-12 08:25:22 -04:00
Label: b.config.FloppyConfig.FloppyLabel,
},
&common.StepHTTPServer{
HTTPDir: b.config.HTTPDir,
HTTPPortMin: b.config.HTTPPortMin,
HTTPPortMax: b.config.HTTPPortMax,
},
new(stepCreateVM),
new(stepCreateDisk),
new(stepSetBootOrder),
new(stepAttachISO),
&parallelscommon.StepAttachParallelsTools{
ParallelsToolsMode: b.config.ParallelsToolsMode,
},
new(parallelscommon.StepAttachFloppy),
&parallelscommon.StepPrlctl{
Commands: b.config.Prlctl,
Ctx: b.config.ctx,
},
&parallelscommon.StepRun{},
&parallelscommon.StepTypeBootCommand{
BootWait: b.config.BootWait,
BootCommand: b.config.FlatBootCommand(),
HostInterfaces: b.config.HostInterfaces,
VMName: b.config.VMName,
Ctx: b.config.ctx,
GroupInterval: b.config.BootConfig.BootGroupInterval,
},
&communicator.StepConnect{
Config: &b.config.SSHConfig.Comm,
Host: parallelscommon.CommHost(b.config.SSHConfig.Comm.SSHHost),
SSHConfig: b.config.SSHConfig.Comm.SSHConfigFunc(),
},
&parallelscommon.StepUploadVersion{
Path: b.config.PrlctlVersionFile,
},
&parallelscommon.StepUploadParallelsTools{
ParallelsToolsFlavor: b.config.ParallelsToolsFlavor,
ParallelsToolsGuestPath: b.config.ParallelsToolsGuestPath,
ParallelsToolsMode: b.config.ParallelsToolsMode,
Ctx: b.config.ctx,
},
new(common.StepProvision),
&common.StepCleanupTempKeys{
Comm: &b.config.SSHConfig.Comm,
},
&parallelscommon.StepShutdown{
Command: b.config.ShutdownCommand,
Timeout: b.config.ShutdownTimeout,
},
&parallelscommon.StepPrlctl{
Commands: b.config.PrlctlPost,
Ctx: b.config.ctx,
},
&parallelscommon.StepCompactDisk{
Skip: b.config.SkipCompaction,
},
}
// Setup the state bag
state := new(multistep.BasicStateBag)
state.Put("config", &b.config)
state.Put("debug", b.config.PackerDebug)
state.Put("driver", driver)
state.Put("hook", hook)
state.Put("ui", ui)
// Run
b.runner = common.NewRunnerWithPauseFn(steps, b.config.PackerConfig, ui, state)
b.runner.Run(ctx, state)
// If there was an error, return that
if rawErr, ok := state.GetOk("error"); ok {
return nil, rawErr.(error)
}
// If we were interrupted or cancelled, then just exit.
if _, ok := state.GetOk(multistep.StateCancelled); ok {
return nil, errors.New("Build was cancelled.")
}
if _, ok := state.GetOk(multistep.StateHalted); ok {
return nil, errors.New("Build was halted.")
}
generatedData := map[string]interface{}{"generated_data": state.Get("generated_data")}
return parallelscommon.NewArtifact(b.config.OutputDir, generatedData)
}