packer-cn/post-processor/compress/post-processor.go

393 lines
11 KiB
Go
Raw Normal View History

//go:generate mapstructure-to-hcl2 -type Config
package compress
import (
2015-06-10 17:04:24 -04:00
"archive/tar"
"archive/zip"
2019-03-22 09:56:02 -04:00
"context"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"runtime"
2014-09-08 13:28:16 -04:00
"github.com/biogo/hts/bgzf"
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
"github.com/hashicorp/hcl/v2/hcldec"
2017-04-04 16:39:01 -04:00
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/packer-plugin-sdk/common"
packersdk "github.com/hashicorp/packer/packer-plugin-sdk/packer"
"github.com/hashicorp/packer/packer-plugin-sdk/template/config"
"github.com/hashicorp/packer/packer-plugin-sdk/template/interpolate"
"github.com/klauspost/pgzip"
"github.com/pierrec/lz4"
2018-07-24 15:46:21 -04:00
"github.com/ulikunitz/xz"
)
var (
// ErrInvalidCompressionLevel is returned when the compression level passed
// to gzip is not in the expected range. See compress/flate for details.
ErrInvalidCompressionLevel = fmt.Errorf(
"Invalid compression level. Expected an integer from -1 to 9.")
ErrWrongInputCount = fmt.Errorf(
"Can only have 1 input file when not using tar/zip")
filenamePattern = regexp.MustCompile(`(?:\.([a-z0-9]+))`)
)
type Config struct {
common.PackerConfig `mapstructure:",squash"`
// Fields from config file
OutputPath string `mapstructure:"output"`
Format string `mapstructure:"format"`
CompressionLevel int `mapstructure:"compression_level"`
// Derived fields
Archive string
Algorithm string
ctx interpolate.Context
}
2015-06-10 16:46:21 -04:00
type PostProcessor struct {
config Config
}
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
func (p *PostProcessor) ConfigSpec() hcldec.ObjectSpec { return p.config.FlatMapstructure().HCL2Spec() }
2015-06-10 16:46:21 -04:00
func (p *PostProcessor) Configure(raws ...interface{}) error {
err := config.Decode(&p.config, &config.DecodeOpts{
PluginType: "compress",
Interpolate: true,
InterpolateContext: &p.config.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{"output"},
},
}, raws...)
if err != nil {
return err
}
errs := new(packersdk.MultiError)
// If there is no explicit number of Go threads to use, then set it
if os.Getenv("GOMAXPROCS") == "" {
runtime.GOMAXPROCS(runtime.NumCPU())
}
if p.config.OutputPath == "" {
p.config.OutputPath = "packer_{{.BuildName}}_{{.BuilderType}}"
}
if p.config.CompressionLevel > pgzip.BestCompression {
p.config.CompressionLevel = pgzip.BestCompression
}
// Technically 0 means "don't compress" but I don't know how to
// differentiate between "user entered zero" and "user entered nothing".
// Also, why bother creating a compressed file with zero compression?
if p.config.CompressionLevel == -1 || p.config.CompressionLevel == 0 {
p.config.CompressionLevel = pgzip.DefaultCompression
}
if err = interpolate.Validate(p.config.OutputPath, &p.config.ctx); err != nil {
errs = packersdk.MultiErrorAppend(
errs, fmt.Errorf("Error parsing target template: %s", err))
}
p.config.detectFromFilename()
if len(errs.Errors) > 0 {
return errs
}
return nil
}
func (p *PostProcessor) PostProcess(
ctx context.Context,
ui packersdk.Ui,
artifact packer.Artifact,
) (packer.Artifact, bool, bool, error) {
var generatedData map[interface{}]interface{}
stateData := artifact.State("generated_data")
if stateData != nil {
// Make sure it's not a nil map so we can assign to it later.
generatedData = stateData.(map[interface{}]interface{})
}
// If stateData has a nil map generatedData will be nil
// and we need to make sure it's not
if generatedData == nil {
generatedData = make(map[interface{}]interface{})
}
// These are extra variables that will be made available for interpolation.
generatedData["BuildName"] = p.config.PackerBuildName
generatedData["BuilderType"] = p.config.PackerBuilderType
p.config.ctx.Data = generatedData
target, err := interpolate.Render(p.config.OutputPath, &p.config.ctx)
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Error interpolating output value: %s", err)
} else {
fmt.Println(target)
}
newArtifact := &Artifact{Path: target}
if err = os.MkdirAll(filepath.Dir(target), os.FileMode(0755)); err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf(
"Unable to create dir for archive %s: %s", target, err)
}
outputFile, err := os.Create(target)
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf(
"Unable to create archive %s: %s", target, err)
}
defer outputFile.Close()
// Setup output interface. If we're using compression, output is a
// compression writer. Otherwise it's just a file.
var output io.WriteCloser
errTmpl := "error creating %s writer: %s"
switch p.config.Algorithm {
case "bgzf":
ui.Say(fmt.Sprintf("Using bgzf compression with %d cores for %s",
runtime.GOMAXPROCS(-1), target))
output, err = makeBGZFWriter(outputFile, p.config.CompressionLevel)
if err != nil {
return nil, false, false, fmt.Errorf(errTmpl, p.config.Algorithm, err)
}
defer output.Close()
case "lz4":
ui.Say(fmt.Sprintf("Using lz4 compression with %d cores for %s",
runtime.GOMAXPROCS(-1), target))
2015-06-18 07:41:05 -04:00
output, err = makeLZ4Writer(outputFile, p.config.CompressionLevel)
if err != nil {
return nil, false, false, fmt.Errorf(errTmpl, p.config.Algorithm, err)
}
2015-06-18 07:41:05 -04:00
defer output.Close()
2018-07-24 15:46:21 -04:00
case "xz":
2018-07-24 22:27:51 -04:00
ui.Say(fmt.Sprintf("Using xz compression with 1 core for %s (library does not support MT)",
target))
2018-08-24 17:19:01 -04:00
output, err = makeXZWriter(outputFile)
if err != nil {
return nil, false, false, fmt.Errorf(errTmpl, p.config.Algorithm, err)
}
2018-07-24 15:46:21 -04:00
defer output.Close()
case "pgzip":
ui.Say(fmt.Sprintf("Using pgzip compression with %d cores for %s",
runtime.GOMAXPROCS(-1), target))
2015-06-18 07:41:05 -04:00
output, err = makePgzipWriter(outputFile, p.config.CompressionLevel)
if err != nil {
return nil, false, false,
fmt.Errorf(errTmpl, p.config.Algorithm, err)
}
defer output.Close()
default:
output = outputFile
}
compression := p.config.Algorithm
if compression == "" {
compression = "no compression"
}
// Build an archive, if we're supposed to do that.
switch p.config.Archive {
case "tar":
ui.Say(fmt.Sprintf("Tarring %s with %s", target, compression))
2015-06-18 07:41:05 -04:00
err = createTarArchive(artifact.Files(), output)
if err != nil {
return nil, false, false, fmt.Errorf("Error creating tar: %s", err)
2015-06-18 07:41:05 -04:00
}
case "zip":
ui.Say(fmt.Sprintf("Zipping %s", target))
2015-06-18 07:41:05 -04:00
err = createZipArchive(artifact.Files(), output)
if err != nil {
return nil, false, false, fmt.Errorf("Error creating zip: %s", err)
2015-06-18 07:41:05 -04:00
}
default:
// Filename indicates no tarball (just compress) so we'll do an io.Copy
// into our compressor.
if len(artifact.Files()) != 1 {
return nil, false, false, fmt.Errorf(
"Can only have 1 input file when not using tar/zip. Found %d "+
"files: %v", len(artifact.Files()), artifact.Files())
}
archiveFile := artifact.Files()[0]
ui.Say(fmt.Sprintf("Archiving %s with %s", archiveFile, compression))
2015-06-18 07:41:05 -04:00
source, err := os.Open(archiveFile)
if err != nil {
return nil, false, false, fmt.Errorf(
"Failed to open source file %s for reading: %s",
archiveFile, err)
}
defer source.Close()
2015-06-18 07:41:05 -04:00
if _, err = io.Copy(output, source); err != nil {
return nil, false, false, fmt.Errorf("Failed to compress %s: %s",
archiveFile, err)
2015-06-18 07:41:05 -04:00
}
}
ui.Say(fmt.Sprintf("Archive %s completed", target))
return newArtifact, false, false, nil
2015-06-18 07:41:05 -04:00
}
func (config *Config) detectFromFilename() {
var result [][]string
2015-06-18 07:41:05 -04:00
extensions := map[string]string{
"tar": "tar",
"zip": "zip",
"gz": "pgzip",
"lz4": "lz4",
"bgzf": "bgzf",
2018-07-24 15:46:21 -04:00
"xz": "xz",
2015-06-18 07:41:05 -04:00
}
if config.Format == "" {
result = filenamePattern.FindAllStringSubmatch(config.OutputPath, -1)
} else {
result = filenamePattern.FindAllStringSubmatch(fmt.Sprintf("%s.%s", config.OutputPath, config.Format), -1)
}
2015-06-18 07:41:05 -04:00
// No dots. Bail out with defaults.
if len(result) == 0 {
config.Algorithm = "pgzip"
config.Archive = "tar"
return
}
// Parse the last two .groups, if they're there
lastItem := result[len(result)-1][1]
var nextToLastItem string
if len(result) == 1 {
nextToLastItem = ""
} else {
nextToLastItem = result[len(result)-2][1]
}
// Should we make an archive? E.g. tar or zip?
if nextToLastItem == "tar" {
config.Archive = "tar"
}
if lastItem == "zip" || lastItem == "tar" {
config.Archive = lastItem
// Tar or zip is our final artifact. Bail out.
return
}
// Should we compress the artifact?
algorithm, ok := extensions[lastItem]
if ok {
config.Algorithm = algorithm
// We found our compression algorithm. Bail out.
return
}
// We didn't match a known compression format. Default to tar + pgzip
config.Algorithm = "pgzip"
config.Archive = "tar"
return
}
func makeBGZFWriter(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) {
bgzfWriter, err := bgzf.NewWriterLevel(output, compressionLevel, runtime.GOMAXPROCS(-1))
if err != nil {
return nil, ErrInvalidCompressionLevel
}
return bgzfWriter, nil
}
2015-06-18 07:41:05 -04:00
func makeLZ4Writer(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) {
lzwriter := lz4.NewWriter(output)
2019-04-10 09:42:02 -04:00
if compressionLevel > 0 {
lzwriter.Header.CompressionLevel = compressionLevel
2015-06-18 07:41:05 -04:00
}
return lzwriter, nil
}
2018-08-24 17:19:01 -04:00
func makeXZWriter(output io.WriteCloser) (io.WriteCloser, error) {
2018-07-24 15:46:21 -04:00
xzwriter, err := xz.NewWriter(output)
if err != nil {
return nil, err
}
return xzwriter, nil
}
2015-06-18 07:41:05 -04:00
func makePgzipWriter(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) {
gzipWriter, err := pgzip.NewWriterLevel(output, compressionLevel)
if err != nil {
return nil, ErrInvalidCompressionLevel
}
gzipWriter.SetConcurrency(500000, runtime.GOMAXPROCS(-1))
return gzipWriter, nil
}
func createTarArchive(files []string, output io.WriteCloser) error {
archive := tar.NewWriter(output)
defer archive.Close()
for _, path := range files {
file, err := os.Open(path)
if err != nil {
return fmt.Errorf("Unable to read file %s: %s", path, err)
}
defer file.Close()
fi, err := file.Stat()
if err != nil {
return fmt.Errorf("Unable to get fileinfo for %s: %s", path, err)
}
2015-06-18 07:41:05 -04:00
header, err := tar.FileInfoHeader(fi, path)
if err != nil {
return fmt.Errorf("Failed to create tar header for %s: %s", path, err)
}
// workaround for archive format on go >=1.10
setHeaderFormat(header)
if err := archive.WriteHeader(header); err != nil {
return fmt.Errorf("Failed to write tar header for %s: %s", path, err)
}
if _, err := io.Copy(archive, file); err != nil {
return fmt.Errorf("Failed to copy %s data to archive: %s", path, err)
}
}
return nil
}
func createZipArchive(files []string, output io.WriteCloser) error {
2015-06-18 07:41:05 -04:00
archive := zip.NewWriter(output)
defer archive.Close()
2015-06-18 07:41:05 -04:00
for _, path := range files {
path = filepath.ToSlash(path)
2015-06-18 07:41:05 -04:00
source, err := os.Open(path)
if err != nil {
2015-06-18 07:41:05 -04:00
return fmt.Errorf("Unable to read file %s: %s", path, err)
}
2015-06-18 07:41:05 -04:00
defer source.Close()
2015-06-18 07:41:05 -04:00
target, err := archive.Create(path)
if err != nil {
2015-06-18 07:41:05 -04:00
return fmt.Errorf("Failed to add zip header for %s: %s", path, err)
}
2015-06-18 07:41:05 -04:00
_, err = io.Copy(target, source)
if err != nil {
2015-06-18 07:41:05 -04:00
return fmt.Errorf("Failed to copy %s data to archive: %s", path, err)
}
}
2015-06-18 07:41:05 -04:00
return nil
}