packer-cn/packer/rpc/post_processor.go
Adrien Delorme 0785c2f6fc
build using HCL2 (#8423)
This follows #8232 which added the code to generate the code required to parse
HCL files for each packer component.

All old config files of packer will keep on working the same. Packer takes one
argument. When a directory is passed, all files in the folder with a name
ending with  “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format.
When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed
using the HCL2 format. For every other case; the old packer style will be used.

## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files

I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields

## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file.

  This is a breaking change for packer plugins.

a packer component can be a: builder/provisioner/post-processor

each component interface now gets a `ConfigSpec() hcldec.ObjectSpec`
which allows packer to tell what is the layout of the hcl2 config meant
to configure that specific component.

This ObjectSpec is sent through the wire (RPC) and a cty.Value is now
sent through the already existing configuration entrypoints:

 Provisioner.Prepare(raws ...interface{}) error
 Builder.Prepare(raws ...interface{}) ([]string, error)
 PostProcessor.Configure(raws ...interface{}) error

close #1768


Example hcl files:

```hcl
// file amazon-ebs-kms-key/run.pkr.hcl
build {
    sources = [
        "source.amazon-ebs.first",
    ]

    provisioner "shell" {
        inline = [
            "sleep 5"
        ]
    }

    post-processor "shell-local" {
        inline = [
            "sleep 5"
        ]
    }
}

// amazon-ebs-kms-key/source.pkr.hcl

source "amazon-ebs" "first" {

    ami_name = "hcl2-test"
    region = "us-east-1"
    instance_type = "t2.micro"

    kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c"
    encrypt_boot = true
    source_ami_filter {
        filters {
          virtualization-type = "hvm"
          name =  "amzn-ami-hvm-????.??.?.????????-x86_64-gp2"
          root-device-type = "ebs"
        }
        most_recent = true
        owners = ["amazon"]
    }
    launch_block_device_mappings {
        device_name = "/dev/xvda"
        volume_size = 20
        volume_type = "gp2"
        delete_on_termination = "true"
    }
    launch_block_device_mappings {
        device_name = "/dev/xvdf"
        volume_size = 500
        volume_type = "gp2"
        delete_on_termination = true
        encrypted = true
    }

    ami_regions = ["eu-central-1"]
    run_tags {
        Name = "packer-solr-something"
        stack-name = "DevOps Tools"
    }
    
    communicator = "ssh"
    ssh_pty = true
    ssh_username = "ec2-user"
    associate_public_ip_address = true
}
```
2019-12-17 11:25:56 +01:00

133 lines
3.2 KiB
Go

package rpc
import (
"context"
"log"
"github.com/hashicorp/packer/packer"
)
// An implementation of packer.PostProcessor where the PostProcessor is actually
// executed over an RPC connection.
type postProcessor struct {
commonClient
}
// PostProcessorServer wraps a packer.PostProcessor implementation and makes it
// exportable as part of a Golang RPC server.
type PostProcessorServer struct {
context context.Context
contextCancel func()
commonServer
p packer.PostProcessor
}
type PostProcessorConfigureArgs struct {
Configs []interface{}
}
type PostProcessorProcessResponse struct {
Err *BasicError
Keep bool
ForceOverride bool
StreamId uint32
}
func (p *postProcessor) Configure(raw ...interface{}) error {
raw, err := encodeCTYValues(raw)
if err != nil {
return err
}
args := &PostProcessorConfigureArgs{Configs: raw}
return p.client.Call(p.endpoint+".Configure", args, new(interface{}))
}
func (p *postProcessor) PostProcess(ctx context.Context, ui packer.Ui, a packer.Artifact) (packer.Artifact, bool, bool, error) {
nextId := p.mux.NextId()
server := newServerWithMux(p.mux, nextId)
server.RegisterArtifact(a)
server.RegisterUi(ui)
go server.Serve()
done := make(chan interface{})
defer close(done)
go func() {
select {
case <-ctx.Done():
log.Printf("Cancelling post-processor after context cancellation %v", ctx.Err())
if err := p.client.Call(p.endpoint+".Cancel", new(interface{}), new(interface{})); err != nil {
log.Printf("Error cancelling post-processor: %s", err)
}
case <-done:
}
}()
var response PostProcessorProcessResponse
if err := p.client.Call(p.endpoint+".PostProcess", nextId, &response); err != nil {
return nil, false, false, err
}
if response.Err != nil {
return nil, false, false, response.Err
}
if response.StreamId == 0 {
return nil, false, false, nil
}
client, err := newClientWithMux(p.mux, response.StreamId)
if err != nil {
return nil, false, false, err
}
return client.Artifact(), response.Keep, response.ForceOverride, nil
}
func (p *PostProcessorServer) Configure(args *PostProcessorConfigureArgs, reply *interface{}) (err error) {
config, err := decodeCTYValues(args.Configs)
if err != nil {
return err
}
err = p.p.Configure(config...)
return err
}
func (p *PostProcessorServer) PostProcess(streamId uint32, reply *PostProcessorProcessResponse) error {
client, err := newClientWithMux(p.mux, streamId)
if err != nil {
return NewBasicError(err)
}
defer client.Close()
if p.context == nil {
p.context, p.contextCancel = context.WithCancel(context.Background())
}
streamId = 0
artifactResult, keep, forceOverride, err := p.p.PostProcess(p.context, client.Ui(), client.Artifact())
if err == nil && artifactResult != nil {
streamId = p.mux.NextId()
server := newServerWithMux(p.mux, streamId)
server.RegisterArtifact(artifactResult)
go server.Serve()
}
*reply = PostProcessorProcessResponse{
Err: NewBasicError(err),
Keep: keep,
ForceOverride: forceOverride,
StreamId: streamId,
}
return nil
}
func (b *PostProcessorServer) Cancel(args *interface{}, reply *interface{}) error {
if b.contextCancel != nil {
b.contextCancel()
}
return nil
}