2014-04-26 12:22:10 -04:00
|
|
|
package compress
|
|
|
|
|
|
|
|
import (
|
2015-06-10 17:04:24 -04:00
|
|
|
"archive/tar"
|
2015-06-16 22:08:22 -04:00
|
|
|
"archive/zip"
|
2015-06-10 17:04:24 -04:00
|
|
|
"compress/gzip"
|
2014-04-26 12:22:10 -04:00
|
|
|
"fmt"
|
2014-06-12 16:45:37 -04:00
|
|
|
"io"
|
|
|
|
"os"
|
2015-06-16 22:08:22 -04:00
|
|
|
"path/filepath"
|
2015-06-18 03:47:33 -04:00
|
|
|
"regexp"
|
2015-06-16 22:08:22 -04:00
|
|
|
"runtime"
|
2014-09-08 13:28:16 -04:00
|
|
|
|
2015-06-16 22:08:22 -04:00
|
|
|
"github.com/klauspost/pgzip"
|
2014-09-08 13:28:16 -04:00
|
|
|
"github.com/mitchellh/packer/common"
|
2015-06-10 16:33:50 -04:00
|
|
|
"github.com/mitchellh/packer/helper/config"
|
2014-09-08 13:28:16 -04:00
|
|
|
"github.com/mitchellh/packer/packer"
|
2015-06-10 16:33:50 -04:00
|
|
|
"github.com/mitchellh/packer/template/interpolate"
|
2015-06-16 22:08:22 -04:00
|
|
|
"github.com/pierrec/lz4"
|
2014-04-26 12:22:10 -04:00
|
|
|
)
|
|
|
|
|
2015-06-16 22:08:22 -04:00
|
|
|
type Config struct {
|
|
|
|
common.PackerConfig `mapstructure:",squash"`
|
2015-06-18 17:01:00 -04:00
|
|
|
|
|
|
|
// Fields from config file
|
|
|
|
OutputPath string `mapstructure:"output"`
|
|
|
|
CompressionLevel int `mapstructure:"compression_level"`
|
|
|
|
KeepInputArtifact bool `mapstructure:"keep_input_artifact"`
|
|
|
|
|
|
|
|
// Derived fields
|
|
|
|
Archive string
|
|
|
|
Algorithm string
|
|
|
|
|
|
|
|
ctx *interpolate.Context
|
2014-04-26 12:22:10 -04:00
|
|
|
}
|
|
|
|
|
2015-06-10 16:46:21 -04:00
|
|
|
type PostProcessor struct {
|
2015-06-18 06:55:51 -04:00
|
|
|
config *Config
|
2014-04-26 12:22:10 -04:00
|
|
|
}
|
|
|
|
|
2015-06-18 06:55:51 -04:00
|
|
|
var (
|
|
|
|
// ErrInvalidCompressionLevel is returned when the compression level passed
|
|
|
|
// to gzip is not in the expected range. See compress/flate for details.
|
|
|
|
ErrInvalidCompressionLevel = fmt.Errorf(
|
|
|
|
"Invalid compression level. Expected an integer from -1 to 9.")
|
2015-06-18 03:47:33 -04:00
|
|
|
|
2015-06-18 06:55:51 -04:00
|
|
|
ErrWrongInputCount = fmt.Errorf(
|
|
|
|
"Can only have 1 input file when not using tar/zip")
|
2015-06-18 03:47:33 -04:00
|
|
|
|
2015-06-18 06:55:51 -04:00
|
|
|
filenamePattern = regexp.MustCompile(`(?:\.([a-z0-9]+))`)
|
|
|
|
)
|
|
|
|
|
2015-06-10 16:46:21 -04:00
|
|
|
func (p *PostProcessor) Configure(raws ...interface{}) error {
|
2015-06-12 20:25:09 -04:00
|
|
|
err := config.Decode(&p.config, &config.DecodeOpts{
|
2015-06-10 16:33:50 -04:00
|
|
|
Interpolate: true,
|
|
|
|
InterpolateFilter: &interpolate.RenderFilter{
|
2015-06-12 20:25:09 -04:00
|
|
|
Exclude: []string{},
|
2015-06-10 16:33:50 -04:00
|
|
|
},
|
|
|
|
}, raws...)
|
2014-04-26 12:22:10 -04:00
|
|
|
|
2015-06-10 15:30:18 -04:00
|
|
|
errs := new(packer.MultiError)
|
|
|
|
|
2015-06-18 17:22:26 -04:00
|
|
|
// If there is no explicit number of Go threads to use, then set it
|
|
|
|
if os.Getenv("GOMAXPROCS") == "" {
|
|
|
|
runtime.GOMAXPROCS(runtime.NumCPU())
|
|
|
|
}
|
|
|
|
|
2015-06-12 20:25:09 -04:00
|
|
|
if p.config.OutputPath == "" {
|
|
|
|
p.config.OutputPath = "packer_{{.BuildName}}_{{.Provider}}"
|
2015-06-10 15:30:18 -04:00
|
|
|
}
|
|
|
|
|
2015-06-12 20:25:09 -04:00
|
|
|
if err = interpolate.Validate(p.config.OutputPath, p.config.ctx); err != nil {
|
2015-06-10 15:30:18 -04:00
|
|
|
errs = packer.MultiErrorAppend(
|
|
|
|
errs, fmt.Errorf("Error parsing target template: %s", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
templates := map[string]*string{
|
2015-06-12 20:25:09 -04:00
|
|
|
"output": &p.config.OutputPath,
|
2015-06-10 15:30:18 -04:00
|
|
|
}
|
|
|
|
|
2015-06-18 06:55:51 -04:00
|
|
|
if p.config.CompressionLevel > pgzip.BestCompression {
|
|
|
|
p.config.CompressionLevel = pgzip.BestCompression
|
2015-06-10 15:30:18 -04:00
|
|
|
}
|
2015-06-18 06:55:51 -04:00
|
|
|
// Technically 0 means "don't compress" but I don't know how to
|
|
|
|
// differentiate between "user entered zero" and "user entered nothing".
|
|
|
|
// Also, why bother creating a compressed file with zero compression?
|
|
|
|
if p.config.CompressionLevel == -1 || p.config.CompressionLevel == 0 {
|
|
|
|
p.config.CompressionLevel = pgzip.DefaultCompression
|
2015-06-10 15:30:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
for key, ptr := range templates {
|
|
|
|
if *ptr == "" {
|
|
|
|
errs = packer.MultiErrorAppend(
|
|
|
|
errs, fmt.Errorf("%s must be set", key))
|
|
|
|
}
|
|
|
|
|
2015-06-12 20:25:09 -04:00
|
|
|
*ptr, err = interpolate.Render(p.config.OutputPath, p.config.ctx)
|
2015-06-10 15:30:18 -04:00
|
|
|
if err != nil {
|
|
|
|
errs = packer.MultiErrorAppend(
|
|
|
|
errs, fmt.Errorf("Error processing %s: %s", key, err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-18 06:55:51 -04:00
|
|
|
p.config.detectFromFilename()
|
|
|
|
|
2015-06-10 15:30:18 -04:00
|
|
|
if len(errs.Errors) > 0 {
|
|
|
|
return errs
|
|
|
|
}
|
|
|
|
|
2014-04-26 12:22:10 -04:00
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-06-18 03:47:33 -04:00
|
|
|
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
|
|
|
|
|
2015-06-18 06:55:51 -04:00
|
|
|
target := p.config.OutputPath
|
2015-06-18 07:41:05 -04:00
|
|
|
keep := p.config.KeepInputArtifact
|
2015-06-18 06:55:51 -04:00
|
|
|
newArtifact := &Artifact{Path: target}
|
2014-04-26 12:22:10 -04:00
|
|
|
|
2015-06-18 06:55:51 -04:00
|
|
|
outputFile, err := os.Create(target)
|
2015-06-18 03:47:33 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, false, fmt.Errorf(
|
2015-06-18 06:55:51 -04:00
|
|
|
"Unable to create archive %s: %s", target, err)
|
2015-06-10 15:30:18 -04:00
|
|
|
}
|
2015-06-18 03:47:33 -04:00
|
|
|
defer outputFile.Close()
|
|
|
|
|
|
|
|
// Setup output interface. If we're using compression, output is a
|
|
|
|
// compression writer. Otherwise it's just a file.
|
|
|
|
var output io.WriteCloser
|
|
|
|
switch p.config.Algorithm {
|
|
|
|
case "lz4":
|
2015-06-18 17:22:26 -04:00
|
|
|
ui.Say(fmt.Sprintf("Using lz4 compression with %d cores for %s",
|
|
|
|
runtime.GOMAXPROCS(-1), target))
|
2015-06-18 07:41:05 -04:00
|
|
|
output, err = makeLZ4Writer(outputFile, p.config.CompressionLevel)
|
|
|
|
defer output.Close()
|
2015-06-18 03:47:33 -04:00
|
|
|
case "pgzip":
|
2015-06-18 17:22:26 -04:00
|
|
|
ui.Say(fmt.Sprintf("Using pgzip compression with %d cores for %s",
|
|
|
|
runtime.GOMAXPROCS(-1), target))
|
2015-06-18 07:41:05 -04:00
|
|
|
output, err = makePgzipWriter(outputFile, p.config.CompressionLevel)
|
2015-06-18 03:47:33 -04:00
|
|
|
defer output.Close()
|
|
|
|
default:
|
|
|
|
output = outputFile
|
2015-06-10 15:30:18 -04:00
|
|
|
}
|
2015-06-18 03:47:33 -04:00
|
|
|
|
2015-06-18 06:55:51 -04:00
|
|
|
compression := p.config.Algorithm
|
|
|
|
if compression == "" {
|
2015-06-18 17:22:26 -04:00
|
|
|
compression = "no compression"
|
2015-06-18 06:55:51 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Build an archive, if we're supposed to do that.
|
2015-06-18 03:47:33 -04:00
|
|
|
switch p.config.Archive {
|
|
|
|
case "tar":
|
2015-06-18 17:22:26 -04:00
|
|
|
ui.Say(fmt.Sprintf("Tarring %s with %s", target, compression))
|
2015-06-18 07:41:05 -04:00
|
|
|
err = createTarArchive(artifact.Files(), output)
|
|
|
|
if err != nil {
|
|
|
|
return nil, keep, fmt.Errorf("Error creating tar: %s", err)
|
|
|
|
}
|
2015-06-18 03:47:33 -04:00
|
|
|
case "zip":
|
2015-06-18 06:55:51 -04:00
|
|
|
ui.Say(fmt.Sprintf("Zipping %s", target))
|
2015-06-18 07:41:05 -04:00
|
|
|
err = createZipArchive(artifact.Files(), output)
|
|
|
|
if err != nil {
|
|
|
|
return nil, keep, fmt.Errorf("Error creating zip: %s", err)
|
|
|
|
}
|
2015-06-18 03:47:33 -04:00
|
|
|
default:
|
2015-06-18 06:55:51 -04:00
|
|
|
// Filename indicates no tarball (just compress) so we'll do an io.Copy
|
|
|
|
// into our compressor.
|
2015-06-18 03:47:33 -04:00
|
|
|
if len(artifact.Files()) != 1 {
|
2015-06-18 07:41:05 -04:00
|
|
|
return nil, keep, fmt.Errorf(
|
2015-06-18 03:47:33 -04:00
|
|
|
"Can only have 1 input file when not using tar/zip. Found %d "+
|
|
|
|
"files: %v", len(artifact.Files()), artifact.Files())
|
|
|
|
}
|
2015-06-18 17:22:26 -04:00
|
|
|
archiveFile := artifact.Files()[0]
|
|
|
|
ui.Say(fmt.Sprintf("Archiving %s with %s", archiveFile, compression))
|
2015-06-18 07:41:05 -04:00
|
|
|
|
2015-06-18 17:22:26 -04:00
|
|
|
source, err := os.Open(archiveFile)
|
2015-06-18 03:47:33 -04:00
|
|
|
if err != nil {
|
2015-06-18 07:41:05 -04:00
|
|
|
return nil, keep, fmt.Errorf(
|
2015-06-18 03:47:33 -04:00
|
|
|
"Failed to open source file %s for reading: %s",
|
2015-06-18 17:22:26 -04:00
|
|
|
archiveFile, err)
|
2015-06-18 03:47:33 -04:00
|
|
|
}
|
|
|
|
defer source.Close()
|
2015-06-18 07:41:05 -04:00
|
|
|
|
|
|
|
if _, err = io.Copy(output, source); err != nil {
|
|
|
|
return nil, keep, fmt.Errorf("Failed to compress %s: %s",
|
2015-06-18 17:22:26 -04:00
|
|
|
archiveFile, err)
|
2015-06-18 07:41:05 -04:00
|
|
|
}
|
2015-06-18 03:47:33 -04:00
|
|
|
}
|
|
|
|
|
2015-06-18 06:55:51 -04:00
|
|
|
ui.Say(fmt.Sprintf("Archive %s completed", target))
|
|
|
|
|
2015-06-18 07:41:05 -04:00
|
|
|
return newArtifact, keep, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (config *Config) detectFromFilename() {
|
|
|
|
|
|
|
|
extensions := map[string]string{
|
|
|
|
"tar": "tar",
|
|
|
|
"zip": "zip",
|
|
|
|
"gz": "pgzip",
|
|
|
|
"lz4": "lz4",
|
|
|
|
}
|
|
|
|
|
|
|
|
result := filenamePattern.FindAllStringSubmatch(config.OutputPath, -1)
|
|
|
|
|
|
|
|
// No dots. Bail out with defaults.
|
|
|
|
if len(result) == 0 {
|
|
|
|
config.Algorithm = "pgzip"
|
|
|
|
config.Archive = "tar"
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the last two .groups, if they're there
|
|
|
|
lastItem := result[len(result)-1][1]
|
|
|
|
var nextToLastItem string
|
|
|
|
if len(result) == 1 {
|
|
|
|
nextToLastItem = ""
|
|
|
|
} else {
|
|
|
|
nextToLastItem = result[len(result)-2][1]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should we make an archive? E.g. tar or zip?
|
|
|
|
if nextToLastItem == "tar" {
|
|
|
|
config.Archive = "tar"
|
|
|
|
}
|
|
|
|
if lastItem == "zip" || lastItem == "tar" {
|
|
|
|
config.Archive = lastItem
|
|
|
|
// Tar or zip is our final artifact. Bail out.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should we compress the artifact?
|
|
|
|
algorithm, ok := extensions[lastItem]
|
|
|
|
if ok {
|
|
|
|
config.Algorithm = algorithm
|
|
|
|
// We found our compression algorithm. Bail out.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// We didn't match a known compression format. Default to tar + pgzip
|
|
|
|
config.Algorithm = "pgzip"
|
|
|
|
config.Archive = "tar"
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func makeLZ4Writer(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) {
|
|
|
|
lzwriter := lz4.NewWriter(output)
|
|
|
|
if compressionLevel > gzip.DefaultCompression {
|
|
|
|
lzwriter.Header.HighCompression = true
|
|
|
|
}
|
|
|
|
return lzwriter, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func makePgzipWriter(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) {
|
|
|
|
gzipWriter, err := pgzip.NewWriterLevel(output, compressionLevel)
|
|
|
|
if err != nil {
|
|
|
|
return nil, ErrInvalidCompressionLevel
|
|
|
|
}
|
|
|
|
gzipWriter.SetConcurrency(500000, runtime.GOMAXPROCS(-1))
|
|
|
|
return gzipWriter, nil
|
2015-06-10 15:30:18 -04:00
|
|
|
}
|
|
|
|
|
2015-06-18 06:55:51 -04:00
|
|
|
func createTarArchive(files []string, output io.WriteCloser) error {
|
2015-06-18 03:47:33 -04:00
|
|
|
archive := tar.NewWriter(output)
|
|
|
|
defer archive.Close()
|
2015-06-10 15:30:18 -04:00
|
|
|
|
2015-06-18 03:47:33 -04:00
|
|
|
for _, path := range files {
|
|
|
|
file, err := os.Open(path)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Unable to read file %s: %s", path, err)
|
|
|
|
}
|
|
|
|
defer file.Close()
|
2015-06-10 15:30:18 -04:00
|
|
|
|
2015-06-18 03:47:33 -04:00
|
|
|
fi, err := file.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Unable to get fileinfo for %s: %s", path, err)
|
2015-06-10 15:30:18 -04:00
|
|
|
}
|
2015-06-18 03:47:33 -04:00
|
|
|
|
2015-06-18 07:41:05 -04:00
|
|
|
header, err := tar.FileInfoHeader(fi, path)
|
2015-06-10 15:30:18 -04:00
|
|
|
if err != nil {
|
2015-06-18 03:47:33 -04:00
|
|
|
return fmt.Errorf("Failed to create tar header for %s: %s", path, err)
|
2015-06-10 15:30:18 -04:00
|
|
|
}
|
|
|
|
|
2015-06-18 03:47:33 -04:00
|
|
|
if err := archive.WriteHeader(header); err != nil {
|
|
|
|
return fmt.Errorf("Failed to write tar header for %s: %s", path, err)
|
|
|
|
}
|
2015-06-10 15:30:18 -04:00
|
|
|
|
2015-06-18 03:47:33 -04:00
|
|
|
if _, err := io.Copy(archive, file); err != nil {
|
|
|
|
return fmt.Errorf("Failed to copy %s data to archive: %s", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2015-06-10 15:30:18 -04:00
|
|
|
}
|
|
|
|
|
2015-06-18 06:55:51 -04:00
|
|
|
func createZipArchive(files []string, output io.WriteCloser) error {
|
2015-06-18 07:41:05 -04:00
|
|
|
archive := zip.NewWriter(output)
|
|
|
|
defer archive.Close()
|
2015-06-18 06:55:51 -04:00
|
|
|
|
2015-06-18 07:41:05 -04:00
|
|
|
for _, path := range files {
|
|
|
|
path = filepath.ToSlash(path)
|
2015-06-10 15:30:18 -04:00
|
|
|
|
2015-06-18 07:41:05 -04:00
|
|
|
source, err := os.Open(path)
|
2015-06-10 15:30:18 -04:00
|
|
|
if err != nil {
|
2015-06-18 07:41:05 -04:00
|
|
|
return fmt.Errorf("Unable to read file %s: %s", path, err)
|
2015-06-10 15:30:18 -04:00
|
|
|
}
|
2015-06-18 07:41:05 -04:00
|
|
|
defer source.Close()
|
2015-06-10 15:30:18 -04:00
|
|
|
|
2015-06-18 07:41:05 -04:00
|
|
|
target, err := archive.Create(path)
|
2015-06-10 15:30:18 -04:00
|
|
|
if err != nil {
|
2015-06-18 07:41:05 -04:00
|
|
|
return fmt.Errorf("Failed to add zip header for %s: %s", path, err)
|
2015-06-10 15:30:18 -04:00
|
|
|
}
|
|
|
|
|
2015-06-18 07:41:05 -04:00
|
|
|
_, err = io.Copy(target, source)
|
2015-06-10 15:30:18 -04:00
|
|
|
if err != nil {
|
2015-06-18 07:41:05 -04:00
|
|
|
return fmt.Errorf("Failed to copy %s data to archive: %s", path, err)
|
2014-04-26 12:22:10 -04:00
|
|
|
}
|
|
|
|
}
|
2015-06-18 07:41:05 -04:00
|
|
|
return nil
|
2014-04-26 12:22:10 -04:00
|
|
|
}
|