Change to compression_level, fix and add tests for format detection
This commit is contained in:
parent
8fdb4f77e0
commit
b767aa7f99
|
@ -22,27 +22,31 @@ import (
|
||||||
type Config struct {
|
type Config struct {
|
||||||
common.PackerConfig `mapstructure:",squash"`
|
common.PackerConfig `mapstructure:",squash"`
|
||||||
OutputPath string `mapstructure:"output"`
|
OutputPath string `mapstructure:"output"`
|
||||||
Level int `mapstructure:"level"`
|
CompressionLevel int `mapstructure:"compression_level"`
|
||||||
KeepInputArtifact bool `mapstructure:"keep_input_artifact"`
|
KeepInputArtifact bool `mapstructure:"keep_input_artifact"`
|
||||||
Archive string
|
Archive string
|
||||||
Algorithm string
|
Algorithm string
|
||||||
|
UsingDefault bool
|
||||||
ctx *interpolate.Context
|
ctx *interpolate.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
type PostProcessor struct {
|
type PostProcessor struct {
|
||||||
config Config
|
config *Config
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrInvalidCompressionLevel is returned when the compression level passed to
|
var (
|
||||||
// gzip is not in the expected range. See compress/flate for details.
|
// ErrInvalidCompressionLevel is returned when the compression level passed
|
||||||
var ErrInvalidCompressionLevel = fmt.Errorf(
|
// to gzip is not in the expected range. See compress/flate for details.
|
||||||
|
ErrInvalidCompressionLevel = fmt.Errorf(
|
||||||
"Invalid compression level. Expected an integer from -1 to 9.")
|
"Invalid compression level. Expected an integer from -1 to 9.")
|
||||||
|
|
||||||
var ErrWrongInputCount = fmt.Errorf(
|
ErrWrongInputCount = fmt.Errorf(
|
||||||
"Can only have 1 input file when not using tar/zip")
|
"Can only have 1 input file when not using tar/zip")
|
||||||
|
|
||||||
func detectFromFilename(config *Config) error {
|
filenamePattern = regexp.MustCompile(`(?:\.([a-z0-9]+))`)
|
||||||
re := regexp.MustCompile("^.+?(?:\\.([a-z0-9]+))?\\.([a-z0-9]+)$")
|
)
|
||||||
|
|
||||||
|
func (config *Config) detectFromFilename() {
|
||||||
|
|
||||||
extensions := map[string]string{
|
extensions := map[string]string{
|
||||||
"tar": "tar",
|
"tar": "tar",
|
||||||
|
@ -51,34 +55,47 @@ func detectFromFilename(config *Config) error {
|
||||||
"lz4": "lz4",
|
"lz4": "lz4",
|
||||||
}
|
}
|
||||||
|
|
||||||
result := re.FindAllString(config.OutputPath, -1)
|
result := filenamePattern.FindAllStringSubmatch(config.OutputPath, -1)
|
||||||
|
|
||||||
|
if len(result) == 0 {
|
||||||
|
config.Algorithm = "pgzip"
|
||||||
|
config.Archive = "tar"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Should we make an archive? E.g. tar or zip?
|
// Should we make an archive? E.g. tar or zip?
|
||||||
if result[0] == "tar" {
|
var nextToLastItem string
|
||||||
|
if len(result) == 1 {
|
||||||
|
nextToLastItem = ""
|
||||||
|
} else {
|
||||||
|
nextToLastItem = result[len(result)-2][1]
|
||||||
|
}
|
||||||
|
|
||||||
|
lastItem := result[len(result)-1][1]
|
||||||
|
if nextToLastItem == "tar" {
|
||||||
config.Archive = "tar"
|
config.Archive = "tar"
|
||||||
}
|
}
|
||||||
if result[1] == "zip" || result[1] == "tar" {
|
if lastItem == "zip" || lastItem == "tar" {
|
||||||
config.Archive = result[1]
|
config.Archive = lastItem
|
||||||
// Tar or zip is our final artifact. Bail out.
|
// Tar or zip is our final artifact. Bail out.
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Should we compress the artifact?
|
// Should we compress the artifact?
|
||||||
algorithm, ok := extensions[result[1]]
|
algorithm, ok := extensions[lastItem]
|
||||||
if ok {
|
if ok {
|
||||||
config.Algorithm = algorithm
|
config.Algorithm = algorithm
|
||||||
// We found our compression algorithm something. Bail out.
|
// We found our compression algorithm. Bail out.
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// We didn't find anything. Default to tar + pgzip
|
// We didn't find anything. Default to tar + pgzip
|
||||||
config.Algorithm = "pgzip"
|
config.Algorithm = "pgzip"
|
||||||
config.Archive = "tar"
|
config.Archive = "tar"
|
||||||
return fmt.Errorf("Unable to detect compression algorithm")
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PostProcessor) Configure(raws ...interface{}) error {
|
func (p *PostProcessor) Configure(raws ...interface{}) error {
|
||||||
p.config.Level = -1
|
|
||||||
err := config.Decode(&p.config, &config.DecodeOpts{
|
err := config.Decode(&p.config, &config.DecodeOpts{
|
||||||
Interpolate: true,
|
Interpolate: true,
|
||||||
InterpolateFilter: &interpolate.RenderFilter{
|
InterpolateFilter: &interpolate.RenderFilter{
|
||||||
|
@ -86,6 +103,8 @@ func (p *PostProcessor) Configure(raws ...interface{}) error {
|
||||||
},
|
},
|
||||||
}, raws...)
|
}, raws...)
|
||||||
|
|
||||||
|
fmt.Printf("CompressionLevel: %d\n", p.config.CompressionLevel)
|
||||||
|
|
||||||
errs := new(packer.MultiError)
|
errs := new(packer.MultiError)
|
||||||
|
|
||||||
if p.config.OutputPath == "" {
|
if p.config.OutputPath == "" {
|
||||||
|
@ -101,13 +120,17 @@ func (p *PostProcessor) Configure(raws ...interface{}) error {
|
||||||
"output": &p.config.OutputPath,
|
"output": &p.config.OutputPath,
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.config.Level > gzip.BestCompression {
|
if p.config.CompressionLevel > pgzip.BestCompression {
|
||||||
p.config.Level = gzip.BestCompression
|
p.config.CompressionLevel = pgzip.BestCompression
|
||||||
}
|
}
|
||||||
if p.config.Level == -1 {
|
// Technically 0 means "don't compress" but I don't know how to
|
||||||
p.config.Level = gzip.DefaultCompression
|
// differentiate between "user entered zero" and "user entered nothing".
|
||||||
|
// Also, why bother creating a compressed file with zero compression?
|
||||||
|
if p.config.CompressionLevel == -1 || p.config.CompressionLevel == 0 {
|
||||||
|
p.config.CompressionLevel = pgzip.DefaultCompression
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fmt.Printf("CompressionLevel: %d\n", p.config.CompressionLevel)
|
||||||
for key, ptr := range templates {
|
for key, ptr := range templates {
|
||||||
if *ptr == "" {
|
if *ptr == "" {
|
||||||
errs = packer.MultiErrorAppend(
|
errs = packer.MultiErrorAppend(
|
||||||
|
@ -121,6 +144,8 @@ func (p *PostProcessor) Configure(raws ...interface{}) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p.config.detectFromFilename()
|
||||||
|
|
||||||
if len(errs.Errors) > 0 {
|
if len(errs.Errors) > 0 {
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
@ -131,12 +156,13 @@ func (p *PostProcessor) Configure(raws ...interface{}) error {
|
||||||
|
|
||||||
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
|
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
|
||||||
|
|
||||||
newArtifact := &Artifact{Path: p.config.OutputPath}
|
target := p.config.OutputPath
|
||||||
|
newArtifact := &Artifact{Path: target}
|
||||||
|
|
||||||
outputFile, err := os.Create(p.config.OutputPath)
|
outputFile, err := os.Create(target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, fmt.Errorf(
|
return nil, false, fmt.Errorf(
|
||||||
"Unable to create archive %s: %s", p.config.OutputPath, err)
|
"Unable to create archive %s: %s", target, err)
|
||||||
}
|
}
|
||||||
defer outputFile.Close()
|
defer outputFile.Close()
|
||||||
|
|
||||||
|
@ -145,31 +171,44 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac
|
||||||
var output io.WriteCloser
|
var output io.WriteCloser
|
||||||
switch p.config.Algorithm {
|
switch p.config.Algorithm {
|
||||||
case "lz4":
|
case "lz4":
|
||||||
|
ui.Say(fmt.Sprintf("Preparing lz4 compression for %s", target))
|
||||||
lzwriter := lz4.NewWriter(outputFile)
|
lzwriter := lz4.NewWriter(outputFile)
|
||||||
if p.config.Level > gzip.DefaultCompression {
|
if p.config.CompressionLevel > gzip.DefaultCompression {
|
||||||
lzwriter.Header.HighCompression = true
|
lzwriter.Header.HighCompression = true
|
||||||
}
|
}
|
||||||
defer lzwriter.Close()
|
defer lzwriter.Close()
|
||||||
output = lzwriter
|
output = lzwriter
|
||||||
case "pgzip":
|
case "pgzip":
|
||||||
output, err = pgzip.NewWriterLevel(outputFile, p.config.Level)
|
ui.Say(fmt.Sprintf("Preparing gzip compression for %s", target))
|
||||||
|
gzipWriter, err := pgzip.NewWriterLevel(outputFile, p.config.CompressionLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, ErrInvalidCompressionLevel
|
return nil, false, ErrInvalidCompressionLevel
|
||||||
}
|
}
|
||||||
|
gzipWriter.SetConcurrency(500000, runtime.GOMAXPROCS(-1))
|
||||||
|
output = gzipWriter
|
||||||
defer output.Close()
|
defer output.Close()
|
||||||
default:
|
default:
|
||||||
output = outputFile
|
output = outputFile
|
||||||
}
|
}
|
||||||
|
|
||||||
//Archive
|
compression := p.config.Algorithm
|
||||||
|
if compression == "" {
|
||||||
|
compression = "no"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build an archive, if we're supposed to do that.
|
||||||
switch p.config.Archive {
|
switch p.config.Archive {
|
||||||
case "tar":
|
case "tar":
|
||||||
archiveTar(artifact.Files(), output)
|
ui.Say(fmt.Sprintf("Taring %s with %s compression", target, compression))
|
||||||
|
createTarArchive(artifact.Files(), output)
|
||||||
case "zip":
|
case "zip":
|
||||||
|
ui.Say(fmt.Sprintf("Zipping %s", target))
|
||||||
archive := zip.NewWriter(output)
|
archive := zip.NewWriter(output)
|
||||||
defer archive.Close()
|
defer archive.Close()
|
||||||
default:
|
default:
|
||||||
// We have a regular file, so we'll just do an io.Copy
|
ui.Say(fmt.Sprintf("Copying %s with %s compression", target, compression))
|
||||||
|
// Filename indicates no tarball (just compress) so we'll do an io.Copy
|
||||||
|
// into our compressor.
|
||||||
if len(artifact.Files()) != 1 {
|
if len(artifact.Files()) != 1 {
|
||||||
return nil, false, fmt.Errorf(
|
return nil, false, fmt.Errorf(
|
||||||
"Can only have 1 input file when not using tar/zip. Found %d "+
|
"Can only have 1 input file when not using tar/zip. Found %d "+
|
||||||
|
@ -185,10 +224,12 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac
|
||||||
io.Copy(output, source)
|
io.Copy(output, source)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ui.Say(fmt.Sprintf("Archive %s completed", target))
|
||||||
|
|
||||||
return newArtifact, p.config.KeepInputArtifact, nil
|
return newArtifact, p.config.KeepInputArtifact, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func archiveTar(files []string, output io.WriteCloser) error {
|
func createTarArchive(files []string, output io.WriteCloser) error {
|
||||||
archive := tar.NewWriter(output)
|
archive := tar.NewWriter(output)
|
||||||
defer archive.Close()
|
defer archive.Close()
|
||||||
|
|
||||||
|
@ -225,44 +266,8 @@ func archiveTar(files []string, output io.WriteCloser) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PostProcessor) cmpTAR(files []string, target string) ([]string, error) {
|
func createZipArchive(files []string, output io.WriteCloser) error {
|
||||||
fw, err := os.Create(target)
|
return fmt.Errorf("Not implemented")
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("tar error creating tar %s: %s", target, err)
|
|
||||||
}
|
|
||||||
defer fw.Close()
|
|
||||||
|
|
||||||
tw := tar.NewWriter(fw)
|
|
||||||
defer tw.Close()
|
|
||||||
|
|
||||||
for _, name := range files {
|
|
||||||
fi, err := os.Stat(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("tar error on stat of %s: %s", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
target, _ := os.Readlink(name)
|
|
||||||
header, err := tar.FileInfoHeader(fi, target)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("tar error reading info for %s: %s", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = tw.WriteHeader(header); err != nil {
|
|
||||||
return nil, fmt.Errorf("tar error writing header for %s: %s", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fr, err := os.Open(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("tar error opening file %s: %s", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = io.Copy(tw, fr); err != nil {
|
|
||||||
fr.Close()
|
|
||||||
return nil, fmt.Errorf("tar error copying contents of %s: %s", name, err)
|
|
||||||
}
|
|
||||||
fr.Close()
|
|
||||||
}
|
|
||||||
return []string{target}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PostProcessor) cmpGZIP(files []string, target string) ([]string, error) {
|
func (p *PostProcessor) cmpGZIP(files []string, target string) ([]string, error) {
|
||||||
|
@ -273,7 +278,7 @@ func (p *PostProcessor) cmpGZIP(files []string, target string) ([]string, error)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("gzip error creating archive: %s", err)
|
return nil, fmt.Errorf("gzip error creating archive: %s", err)
|
||||||
}
|
}
|
||||||
cw, err := gzip.NewWriterLevel(fw, p.config.Level)
|
cw, err := gzip.NewWriterLevel(fw, p.config.CompressionLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fw.Close()
|
fw.Close()
|
||||||
return nil, fmt.Errorf("gzip error: %s", err)
|
return nil, fmt.Errorf("gzip error: %s", err)
|
||||||
|
@ -306,8 +311,8 @@ func (p *PostProcessor) cmpPGZIP(files []string, target string) ([]string, error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("pgzip error: %s", err)
|
return nil, fmt.Errorf("pgzip error: %s", err)
|
||||||
}
|
}
|
||||||
cw, err := pgzip.NewWriterLevel(fw, p.config.Level)
|
cw, err := pgzip.NewWriterLevel(fw, p.config.CompressionLevel)
|
||||||
cw.SetConcurrency(500000, runtime.GOMAXPROCS(-1))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fw.Close()
|
fw.Close()
|
||||||
return nil, fmt.Errorf("pgzip error: %s", err)
|
return nil, fmt.Errorf("pgzip error: %s", err)
|
||||||
|
@ -345,7 +350,7 @@ func (p *PostProcessor) cmpLZ4(src []string, dst string) ([]string, error) {
|
||||||
fw.Close()
|
fw.Close()
|
||||||
return nil, fmt.Errorf("lz4 error: %s", err)
|
return nil, fmt.Errorf("lz4 error: %s", err)
|
||||||
}
|
}
|
||||||
if p.config.Level > gzip.DefaultCompression {
|
if p.config.CompressionLevel > gzip.DefaultCompression {
|
||||||
cw.Header.HighCompression = true
|
cw.Header.HighCompression = true
|
||||||
}
|
}
|
||||||
fr, err := os.Open(name)
|
fr, err := os.Open(name)
|
||||||
|
|
|
@ -45,6 +45,48 @@ func setup(t *testing.T) (packer.Ui, packer.Artifact, error) {
|
||||||
return ui, artifact, err
|
return ui, artifact, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDetectFilename(t *testing.T) {
|
||||||
|
// Test default / fallback with no file extension
|
||||||
|
nakedFilename := Config{OutputPath: "test"}
|
||||||
|
nakedFilename.detectFromFilename()
|
||||||
|
if nakedFilename.Archive != "tar" {
|
||||||
|
t.Error("Expected to find tar archive setting")
|
||||||
|
}
|
||||||
|
if nakedFilename.Algorithm != "pgzip" {
|
||||||
|
t.Error("Expected to find pgzip algorithm setting")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test .archive
|
||||||
|
zipFilename := Config{OutputPath: "test.zip"}
|
||||||
|
zipFilename.detectFromFilename()
|
||||||
|
if zipFilename.Archive != "zip" {
|
||||||
|
t.Error("Expected to find zip archive setting")
|
||||||
|
}
|
||||||
|
if zipFilename.Algorithm != "" {
|
||||||
|
t.Error("Expected to find empty algorithm setting")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test .compress
|
||||||
|
lz4Filename := Config{OutputPath: "test.lz4"}
|
||||||
|
lz4Filename.detectFromFilename()
|
||||||
|
if lz4Filename.Archive != "" {
|
||||||
|
t.Error("Expected to find empty archive setting")
|
||||||
|
}
|
||||||
|
if lz4Filename.Algorithm != "lz4" {
|
||||||
|
t.Error("Expected to find lz4 algorithm setting")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test .archive.compress with some.extra.dots...
|
||||||
|
lotsOfDots := Config{OutputPath: "test.blah.bloo.blee.tar.lz4"}
|
||||||
|
lotsOfDots.detectFromFilename()
|
||||||
|
if lotsOfDots.Archive != "tar" {
|
||||||
|
t.Error("Expected to find tar archive setting")
|
||||||
|
}
|
||||||
|
if lotsOfDots.Algorithm != "lz4" {
|
||||||
|
t.Error("Expected to find lz4 algorithm setting")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestSimpleCompress(t *testing.T) {
|
func TestSimpleCompress(t *testing.T) {
|
||||||
if os.Getenv(env.TestEnvVar) == "" {
|
if os.Getenv(env.TestEnvVar) == "" {
|
||||||
t.Skip(fmt.Sprintf(
|
t.Skip(fmt.Sprintf(
|
||||||
|
@ -167,13 +209,18 @@ func TestCompressOptions(t *testing.T) {
|
||||||
defer artifact.Destroy()
|
defer artifact.Destroy()
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl, err := template.Parse(strings.NewReader(zipTestCase))
|
tpl, err := template.Parse(strings.NewReader(optionsTestCase))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to parse test config: %s", err)
|
t.Fatalf("Unable to parse test config: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
compressor := PostProcessor{}
|
compressor := PostProcessor{}
|
||||||
compressor.Configure(tpl.PostProcessors[0][0].Config)
|
compressor.Configure(tpl.PostProcessors[0][0].Config)
|
||||||
|
|
||||||
|
if compressor.config.CompressionLevel != 9 {
|
||||||
|
t.Errorf("Expected compression_level 9, got %d", compressor.config.CompressionLevel)
|
||||||
|
}
|
||||||
|
|
||||||
artifactOut, _, err := compressor.PostProcess(ui, artifact)
|
artifactOut, _, err := compressor.PostProcess(ui, artifact)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to archive artifact: %s", err)
|
t.Fatalf("Failed to archive artifact: %s", err)
|
||||||
|
@ -227,8 +274,7 @@ const optionsTestCase = `
|
||||||
{
|
{
|
||||||
"type": "compress",
|
"type": "compress",
|
||||||
"output": "package.gz",
|
"output": "package.gz",
|
||||||
"level": 9,
|
"compression_level": 9
|
||||||
"parallel": false
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,25 +10,24 @@ description: |-
|
||||||
Type: `compress`
|
Type: `compress`
|
||||||
|
|
||||||
The Packer compress post-processor takes an artifact with files (such as from
|
The Packer compress post-processor takes an artifact with files (such as from
|
||||||
VMware or VirtualBox) and gzip compresses the artifact into a single
|
VMware or VirtualBox) and compresses the artifact into a single archive.
|
||||||
archive.
|
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
The minimal required configuration is to specify the output file. This will create a gzipped tarball.
|
You must specify the output filename. The archive format is derived from the filename.
|
||||||
|
|
||||||
* `output` (required, string) - The path to save the compressed archive. The archive format is inferred from the filename. E.g. `.tar.gz` will be a gzipped tarball. `.zip` will be a zip file.
|
* `output` (required, string) - The path to save the compressed archive. The archive format is inferred from the filename. E.g. `.tar.gz` will be a gzipped tarball. `.zip` will be a zip file. If the extension can't be detected packer defaults to `.tar.gz` behavior but will not change the filename.
|
||||||
|
|
||||||
If the extension can't be detected tar+gzip will be used as a fallback.
|
If you are executing multiple builders in parallel you should make sure `output` is unique for each one. For example `packer_{{.BuildName}}_{{.Provider}}.zip`.
|
||||||
|
|
||||||
If you want more control over how the archive is created you can specify the following settings:
|
If you want more control over how the archive is created you can specify the following settings:
|
||||||
|
|
||||||
* `level` (optional, integer) - Specify the compression level, for algorithms that support it. Value from -1 through 9 inclusive. 9 offers the smallest file size, but takes longer
|
* `compression_level` (optional, integer) - Specify the compression level, for algorithms that support it, from 1 through 9 inclusive. Typically higher compression levels take longer but produce smaller files. Default if omitted is 6
|
||||||
* `keep_input_artifact` (optional, bool) - Keep source files; defaults to false
|
* `keep_input_artifact` (optional, bool) - Keep source files; defaults to false
|
||||||
|
|
||||||
## Supported Formats
|
## Supported Formats
|
||||||
|
|
||||||
Supported file extensions include `.zip`, `.tar`, `.gz`, `.tar.gz`, `.lz4` and `.tar.lz4`.
|
Supported file extensions include `.zip`, `.tar`, `.gz`, `.tar.gz`, `.lz4` and `.tar.lz4`. Note that `.gz` and `.lz4` will fail if you have multiple files to compress.
|
||||||
|
|
||||||
## Example
|
## Example
|
||||||
|
|
||||||
|
@ -37,7 +36,7 @@ Some minimal examples are shown below, showing only the post-processor configura
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"type": "compress",
|
"type": "compress",
|
||||||
"output": "archive.tar.gz"
|
"output": "archive.tar.lz4"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -48,13 +47,10 @@ Some minimal examples are shown below, showing only the post-processor configura
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
A more complex example, again showing only the post-processor configuration:
|
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"type": "compress",
|
"type": "compress",
|
||||||
"output": "archive.gz",
|
"output": "archive.gz",
|
||||||
"compression": 9,
|
"compression": 9
|
||||||
"parallel": false
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
Loading…
Reference in New Issue