Complete Atlas deprecation.
Removes the push command and the Atlas post-processor. Please see our guide on building immutable infrastructure with Packer on CI/CD for ideas on implementing these features yourself. https://www.packer.io/guides/packer-on-cicd/
This commit is contained in:
parent
0633189c98
commit
ddf23a2c46
|
@ -5,22 +5,6 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestFix_noArgs(t *testing.T) {
|
||||
c := &PushCommand{Meta: testMeta(t)}
|
||||
code := c.Run(nil)
|
||||
if code != 1 {
|
||||
t.Fatalf("bad: %#v", code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFix_multiArgs(t *testing.T) {
|
||||
c := &PushCommand{Meta: testMeta(t)}
|
||||
code := c.Run([]string{"one", "two"})
|
||||
if code != 1 {
|
||||
t.Fatalf("bad: %#v", code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFix(t *testing.T) {
|
||||
c := &FixCommand{
|
||||
Meta: testMeta(t),
|
||||
|
|
|
@ -48,7 +48,6 @@ import (
|
|||
alicloudimportpostprocessor "github.com/hashicorp/packer/post-processor/alicloud-import"
|
||||
amazonimportpostprocessor "github.com/hashicorp/packer/post-processor/amazon-import"
|
||||
artificepostprocessor "github.com/hashicorp/packer/post-processor/artifice"
|
||||
atlaspostprocessor "github.com/hashicorp/packer/post-processor/atlas"
|
||||
checksumpostprocessor "github.com/hashicorp/packer/post-processor/checksum"
|
||||
compresspostprocessor "github.com/hashicorp/packer/post-processor/compress"
|
||||
dockerimportpostprocessor "github.com/hashicorp/packer/post-processor/docker-import"
|
||||
|
@ -139,7 +138,6 @@ var PostProcessors = map[string]packer.PostProcessor{
|
|||
"alicloud-import": new(alicloudimportpostprocessor.PostProcessor),
|
||||
"amazon-import": new(amazonimportpostprocessor.PostProcessor),
|
||||
"artifice": new(artificepostprocessor.PostProcessor),
|
||||
"atlas": new(atlaspostprocessor.PostProcessor),
|
||||
"checksum": new(checksumpostprocessor.PostProcessor),
|
||||
"compress": new(compresspostprocessor.PostProcessor),
|
||||
"docker-import": new(dockerimportpostprocessor.PostProcessor),
|
||||
|
|
437
command/push.go
437
command/push.go
|
@ -1,437 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/hashicorp/atlas-go/archive"
|
||||
"github.com/hashicorp/atlas-go/v1"
|
||||
"github.com/hashicorp/packer/helper/flag-kv"
|
||||
"github.com/hashicorp/packer/helper/flag-slice"
|
||||
"github.com/hashicorp/packer/template"
|
||||
|
||||
"github.com/posener/complete"
|
||||
)
|
||||
|
||||
// archiveTemplateEntry is the name the template always takes within the slug.
|
||||
const archiveTemplateEntry = ".packer-template"
|
||||
|
||||
var (
|
||||
reName = regexp.MustCompile("^[a-zA-Z0-9-_./]+$")
|
||||
errInvalidName = fmt.Errorf("Your build name can only contain these characters: %s", reName.String())
|
||||
)
|
||||
|
||||
type PushCommand struct {
|
||||
Meta
|
||||
|
||||
client *atlas.Client
|
||||
|
||||
// For tests:
|
||||
uploadFn pushUploadFn
|
||||
}
|
||||
|
||||
// pushUploadFn is the callback type used for tests to stub out the uploading
|
||||
// logic of the push command.
|
||||
type pushUploadFn func(
|
||||
io.Reader, *uploadOpts) (<-chan struct{}, <-chan error, error)
|
||||
|
||||
func (c *PushCommand) Run(args []string) int {
|
||||
var token string
|
||||
var message string
|
||||
var name string
|
||||
var create bool
|
||||
var sensitiveVars []string
|
||||
|
||||
flags := c.Meta.FlagSet("push", FlagSetVars)
|
||||
flags.Usage = func() { c.Ui.Error(c.Help()) }
|
||||
flags.StringVar(&token, "token", "", "token")
|
||||
flags.StringVar(&message, "m", "", "message")
|
||||
flags.StringVar(&message, "message", "", "message")
|
||||
flags.StringVar(&name, "name", "", "name")
|
||||
flags.BoolVar(&create, "create", false, "create (deprecated)")
|
||||
flags.Var((*sliceflag.StringFlag)(&sensitiveVars), "sensitive", "")
|
||||
if err := flags.Parse(args); err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
if message != "" {
|
||||
c.Ui.Say("[DEPRECATED] -m/-message is deprecated and will be removed in a future Packer release")
|
||||
}
|
||||
|
||||
args = flags.Args()
|
||||
if len(args) != 1 {
|
||||
flags.Usage()
|
||||
return 1
|
||||
}
|
||||
|
||||
// Print deprecations
|
||||
if create {
|
||||
c.Ui.Error(fmt.Sprintf("The '-create' option is now the default and is\n" +
|
||||
"longer used. It will be removed in the next version."))
|
||||
}
|
||||
|
||||
// Parse the template
|
||||
tpl, err := template.ParseFile(args[0])
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
// Get the core
|
||||
core, err := c.Meta.Core(tpl)
|
||||
if err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
return 1
|
||||
}
|
||||
push := core.Template.Push
|
||||
|
||||
// If we didn't pass name from the CLI, use the template
|
||||
if name == "" {
|
||||
name = push.Name
|
||||
}
|
||||
|
||||
// Validate some things
|
||||
if name == "" {
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"The 'push' section must be specified in the template with\n" +
|
||||
"at least the 'name' option set. Alternatively, you can pass the\n" +
|
||||
"name parameter from the CLI."))
|
||||
return 1
|
||||
}
|
||||
|
||||
if !reName.MatchString(name) {
|
||||
c.Ui.Error(errInvalidName.Error())
|
||||
return 1
|
||||
}
|
||||
|
||||
// Determine our token
|
||||
if token == "" {
|
||||
token = push.Token
|
||||
}
|
||||
|
||||
// Build our client
|
||||
defer func() { c.client = nil }()
|
||||
c.client = atlas.DefaultClient()
|
||||
if push.Address != "" {
|
||||
c.client, err = atlas.NewClient(push.Address)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"Error setting up API client: %s", err))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if token != "" {
|
||||
c.client.Token = token
|
||||
}
|
||||
|
||||
// Build the archiving options
|
||||
var opts archive.ArchiveOpts
|
||||
opts.Include = push.Include
|
||||
opts.Exclude = push.Exclude
|
||||
opts.VCS = push.VCS
|
||||
opts.Extra = map[string]string{
|
||||
archiveTemplateEntry: args[0],
|
||||
}
|
||||
|
||||
// Determine the path we're archiving. This logic is a bit complicated
|
||||
// as there are three possibilities:
|
||||
//
|
||||
// 1.) BaseDir is an absolute path, just use that.
|
||||
//
|
||||
// 2.) BaseDir is empty, so we use the directory of the template.
|
||||
//
|
||||
// 3.) BaseDir is relative, so we use the path relative to the directory
|
||||
// of the template.
|
||||
//
|
||||
path := push.BaseDir
|
||||
if path == "" || !filepath.IsAbs(path) {
|
||||
tplPath, err := filepath.Abs(args[0])
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error determining path to archive: %s", err))
|
||||
return 1
|
||||
}
|
||||
tplPath = filepath.Dir(tplPath)
|
||||
if path != "" {
|
||||
tplPath = filepath.Join(tplPath, path)
|
||||
}
|
||||
path, err = filepath.Abs(tplPath)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error determining path to archive: %s", err))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
// Find the Atlas post-processors, if possible
|
||||
var atlasPPs []*template.PostProcessor
|
||||
for _, list := range tpl.PostProcessors {
|
||||
for _, pp := range list {
|
||||
if pp.Type == "atlas" {
|
||||
atlasPPs = append(atlasPPs, pp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build the upload options
|
||||
var uploadOpts uploadOpts
|
||||
uploadOpts.Slug = name
|
||||
uploadOpts.Builds = make(map[string]*uploadBuildInfo)
|
||||
for _, b := range tpl.Builders {
|
||||
info := &uploadBuildInfo{Type: b.Type}
|
||||
// todo: remove post-migration
|
||||
if b.Type == "vagrant" {
|
||||
c.Ui.Error("\n-----------------------------------------------------------------------------------\n" +
|
||||
"Vagrant-related functionality has been moved from Terraform Enterprise into \n" +
|
||||
"its own product, Vagrant Cloud. For more information see " +
|
||||
"https://www.vagrantup.com/docs/vagrant-cloud/vagrant-cloud-migration.html\n" +
|
||||
"Please replace the Atlas post-processor with the Vagrant Cloud post-processor,\n" +
|
||||
"and see https://www.packer.io/docs/post-processors/vagrant-cloud.html for\n" +
|
||||
"more detail.\n" +
|
||||
"-----------------------------------------------------------------------------------\n")
|
||||
return 1
|
||||
}
|
||||
|
||||
// Determine if we're artifacting this build
|
||||
for _, pp := range atlasPPs {
|
||||
if !pp.Skip(b.Name) {
|
||||
info.Artifact = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
uploadOpts.Builds[b.Name] = info
|
||||
}
|
||||
|
||||
// Collect the variables from CLI args and any var files
|
||||
if sf := flags.Lookup("sensitive"); sf != nil {
|
||||
sfv := sf.Value.(*sliceflag.StringFlag)
|
||||
svars := []string(*sfv)
|
||||
uploadOpts.SensitiveVars = svars
|
||||
}
|
||||
|
||||
uploadOpts.Vars = make(map[string]string)
|
||||
if vs := flags.Lookup("var"); vs != nil {
|
||||
f := vs.Value.(*kvflag.Flag)
|
||||
vars := map[string]string(*f)
|
||||
|
||||
for k, v := range vars {
|
||||
uploadOpts.Vars[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Add the upload metadata
|
||||
metadata := make(map[string]interface{})
|
||||
if message != "" {
|
||||
metadata["message"] = message
|
||||
}
|
||||
metadata["template"] = tpl.RawContents
|
||||
metadata["template_name"] = filepath.Base(args[0])
|
||||
uploadOpts.Metadata = metadata
|
||||
|
||||
// Warn about builds not having post-processors.
|
||||
var badBuilds []string
|
||||
for name, b := range uploadOpts.Builds {
|
||||
if b.Artifact {
|
||||
continue
|
||||
}
|
||||
|
||||
badBuilds = append(badBuilds, name)
|
||||
}
|
||||
if len(badBuilds) > 0 {
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"Warning! One or more of the builds in this template does not\n"+
|
||||
"have an Atlas post-processor. Artifacts from this template will\n"+
|
||||
"not appear in the Atlas artifact registry.\n\n"+
|
||||
"This is just a warning. Atlas will still build your template\n"+
|
||||
"and assume other post-processors are sending the artifacts where\n"+
|
||||
"they need to go.\n\n"+
|
||||
"Builds: %s\n\n", strings.Join(badBuilds, ", ")))
|
||||
}
|
||||
|
||||
c.Ui.Message("\n-----------------------------------------------------------------------\n" +
|
||||
"Deprecation warning: The Packer and Artifact Registry features of Atlas\n" +
|
||||
"will no longer be actively developed or maintained and will be fully\n" +
|
||||
"decommissioned. Please see our guide on building immutable\n" +
|
||||
"infrastructure with Packer on CI/CD for ideas on implementing\n" +
|
||||
"these features yourself: https://www.packer.io/guides/packer-on-cicd/\n" +
|
||||
"-----------------------------------------------------------------------\n",
|
||||
)
|
||||
|
||||
// Start the archiving process
|
||||
r, err := archive.CreateArchive(path, &opts)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error archiving: %s", err))
|
||||
return 1
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Start the upload process
|
||||
doneCh, uploadErrCh, err := c.upload(r, &uploadOpts)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error starting upload: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
// Make a ctrl-C channel
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
|
||||
defer signal.Stop(sigCh)
|
||||
|
||||
err = nil
|
||||
select {
|
||||
case err = <-uploadErrCh:
|
||||
err = fmt.Errorf("Error uploading: %s", err)
|
||||
case <-sigCh:
|
||||
err = fmt.Errorf("Push cancelled from Ctrl-C")
|
||||
case <-doneCh:
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
c.Ui.Error(err.Error())
|
||||
return 1
|
||||
}
|
||||
|
||||
c.Ui.Say(fmt.Sprintf("Push successful to '%s'", name))
|
||||
return 0
|
||||
}
|
||||
|
||||
func (*PushCommand) Help() string {
|
||||
helpText := `
|
||||
Usage: packer push [options] TEMPLATE
|
||||
|
||||
Push the given template and supporting files to a Packer build service such as
|
||||
Atlas.
|
||||
|
||||
If a build configuration for the given template does not exist, it will be
|
||||
created automatically. If the build configuration already exists, a new
|
||||
version will be created with this template and the supporting files.
|
||||
|
||||
Additional configuration options (such as the Atlas server URL and files to
|
||||
include) may be specified in the "push" section of the Packer template. Please
|
||||
see the online documentation for more information about these configurables.
|
||||
|
||||
Options:
|
||||
|
||||
-name=<name> The destination build in Atlas. This is in a format
|
||||
"username/name".
|
||||
|
||||
-token=<token> The access token to use to when uploading
|
||||
|
||||
-sensitive='var1,var2' List of variables to mark as sensitive in Atlas UI.
|
||||
|
||||
-var 'key=value' Variable for templates, can be used multiple times.
|
||||
|
||||
-var-file=path JSON file containing user variables.
|
||||
`
|
||||
|
||||
return strings.TrimSpace(helpText)
|
||||
}
|
||||
|
||||
func (*PushCommand) Synopsis() string {
|
||||
return "push a template and supporting files to a Packer build service"
|
||||
}
|
||||
|
||||
func (*PushCommand) AutocompleteArgs() complete.Predictor {
|
||||
return complete.PredictNothing
|
||||
}
|
||||
|
||||
func (*PushCommand) AutocompleteFlags() complete.Flags {
|
||||
return complete.Flags{
|
||||
"-name": complete.PredictNothing,
|
||||
"-token": complete.PredictNothing,
|
||||
"-sensitive": complete.PredictNothing,
|
||||
"-var": complete.PredictNothing,
|
||||
"-var-file": complete.PredictNothing,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *PushCommand) upload(
|
||||
r *archive.Archive, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
if c.uploadFn != nil {
|
||||
return c.uploadFn(r, opts)
|
||||
}
|
||||
|
||||
// Separate the slug into the user and name components
|
||||
user, name, err := atlas.ParseSlug(opts.Slug)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("upload: %s", err)
|
||||
}
|
||||
|
||||
// Get the build configuration
|
||||
bc, err := c.client.BuildConfig(user, name)
|
||||
if err != nil {
|
||||
if err == atlas.ErrNotFound {
|
||||
// Build configuration doesn't exist, attempt to create it
|
||||
bc, err = c.client.CreateBuildConfig(user, name)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("upload: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Build the version to send up
|
||||
version := atlas.BuildConfigVersion{
|
||||
User: bc.User,
|
||||
Name: bc.Name,
|
||||
Builds: make([]atlas.BuildConfigBuild, 0, len(opts.Builds)),
|
||||
}
|
||||
|
||||
// Build the BuildVars struct
|
||||
buildVars := atlas.BuildVars{}
|
||||
for k, v := range opts.Vars {
|
||||
isSensitive := false
|
||||
for _, sensitiveVar := range opts.SensitiveVars {
|
||||
if sensitiveVar == k {
|
||||
isSensitive = true
|
||||
break
|
||||
}
|
||||
}
|
||||
buildVars = append(buildVars, atlas.BuildVar{
|
||||
Key: k,
|
||||
Value: v,
|
||||
Sensitive: isSensitive,
|
||||
})
|
||||
}
|
||||
|
||||
for name, info := range opts.Builds {
|
||||
version.Builds = append(version.Builds, atlas.BuildConfigBuild{
|
||||
Name: name,
|
||||
Type: info.Type,
|
||||
Artifact: info.Artifact,
|
||||
})
|
||||
}
|
||||
|
||||
// Start the upload
|
||||
doneCh, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
err := c.client.UploadBuildConfigVersion(&version, opts.Metadata, buildVars, r, r.Size)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
return doneCh, errCh, nil
|
||||
}
|
||||
|
||||
type uploadOpts struct {
|
||||
URL string
|
||||
Slug string
|
||||
Builds map[string]*uploadBuildInfo
|
||||
Metadata map[string]interface{}
|
||||
Vars map[string]string
|
||||
SensitiveVars []string
|
||||
}
|
||||
|
||||
type uploadBuildInfo struct {
|
||||
Type string
|
||||
Artifact bool
|
||||
}
|
|
@ -1,270 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPush_noArgs(t *testing.T) {
|
||||
c := &PushCommand{Meta: testMeta(t)}
|
||||
code := c.Run(nil)
|
||||
if code != 1 {
|
||||
t.Fatalf("bad: %#v", code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush_multiArgs(t *testing.T) {
|
||||
c := &PushCommand{Meta: testMeta(t)}
|
||||
code := c.Run([]string{"one", "two"})
|
||||
if code != 1 {
|
||||
t.Fatalf("bad: %#v", code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush(t *testing.T) {
|
||||
var actual []string
|
||||
var actualOpts *uploadOpts
|
||||
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
actual = testArchive(t, r)
|
||||
actualOpts = opts
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
close(doneCh)
|
||||
return doneCh, nil, nil
|
||||
}
|
||||
|
||||
c := &PushCommand{
|
||||
Meta: testMeta(t),
|
||||
uploadFn: uploadFn,
|
||||
}
|
||||
|
||||
args := []string{filepath.Join(testFixture("push"), "template.json")}
|
||||
if code := c.Run(args); code != 0 {
|
||||
fatalCommand(t, c.Meta)
|
||||
}
|
||||
|
||||
expected := []string{
|
||||
archiveTemplateEntry,
|
||||
"template.json",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
|
||||
expectedBuilds := map[string]*uploadBuildInfo{
|
||||
"dummy": {
|
||||
Type: "dummy",
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(actualOpts.Builds, expectedBuilds) {
|
||||
t.Fatalf("bad: %#v", actualOpts.Builds)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush_builds(t *testing.T) {
|
||||
var actualOpts *uploadOpts
|
||||
uploadFn := func(
|
||||
r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
actualOpts = opts
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
close(doneCh)
|
||||
return doneCh, nil, nil
|
||||
}
|
||||
|
||||
c := &PushCommand{
|
||||
Meta: testMeta(t),
|
||||
uploadFn: uploadFn,
|
||||
}
|
||||
|
||||
args := []string{filepath.Join(testFixture("push-builds"), "template.json")}
|
||||
if code := c.Run(args); code != 0 {
|
||||
fatalCommand(t, c.Meta)
|
||||
}
|
||||
|
||||
expectedBuilds := map[string]*uploadBuildInfo{
|
||||
"dummy": {
|
||||
Type: "dummy",
|
||||
Artifact: true,
|
||||
},
|
||||
"foo": {
|
||||
Type: "dummy",
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(actualOpts.Builds, expectedBuilds) {
|
||||
t.Fatalf("bad: %#v", actualOpts.Builds)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush_noName(t *testing.T) {
|
||||
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
c := &PushCommand{
|
||||
Meta: testMeta(t),
|
||||
uploadFn: uploadFn,
|
||||
}
|
||||
|
||||
args := []string{filepath.Join(testFixture("push-no-name"), "template.json")}
|
||||
if code := c.Run(args); code != 1 {
|
||||
fatalCommand(t, c.Meta)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush_cliName(t *testing.T) {
|
||||
var actual []string
|
||||
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
actual = testArchive(t, r)
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
close(doneCh)
|
||||
return doneCh, nil, nil
|
||||
}
|
||||
|
||||
c := &PushCommand{
|
||||
Meta: testMeta(t),
|
||||
uploadFn: uploadFn,
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-name=foo/bar",
|
||||
filepath.Join(testFixture("push-no-name"), "template.json"),
|
||||
}
|
||||
|
||||
if code := c.Run(args); code != 0 {
|
||||
fatalCommand(t, c.Meta)
|
||||
}
|
||||
|
||||
expected := []string{
|
||||
archiveTemplateEntry,
|
||||
"template.json",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush_uploadError(t *testing.T) {
|
||||
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
return nil, nil, fmt.Errorf("bad")
|
||||
}
|
||||
|
||||
c := &PushCommand{
|
||||
Meta: testMeta(t),
|
||||
uploadFn: uploadFn,
|
||||
}
|
||||
|
||||
args := []string{filepath.Join(testFixture("push"), "template.json")}
|
||||
if code := c.Run(args); code != 1 {
|
||||
fatalCommand(t, c.Meta)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush_uploadErrorCh(t *testing.T) {
|
||||
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
errCh := make(chan error, 1)
|
||||
errCh <- fmt.Errorf("bad")
|
||||
return nil, errCh, nil
|
||||
}
|
||||
|
||||
c := &PushCommand{
|
||||
Meta: testMeta(t),
|
||||
uploadFn: uploadFn,
|
||||
}
|
||||
|
||||
args := []string{filepath.Join(testFixture("push"), "template.json")}
|
||||
if code := c.Run(args); code != 1 {
|
||||
fatalCommand(t, c.Meta)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush_vars(t *testing.T) {
|
||||
var actualOpts *uploadOpts
|
||||
uploadFn := func(r io.Reader, opts *uploadOpts) (<-chan struct{}, <-chan error, error) {
|
||||
actualOpts = opts
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
close(doneCh)
|
||||
return doneCh, nil, nil
|
||||
}
|
||||
|
||||
c := &PushCommand{
|
||||
Meta: testMeta(t),
|
||||
uploadFn: uploadFn,
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-var", "name=foo/bar",
|
||||
"-var", "one=two",
|
||||
"-var-file", filepath.Join(testFixture("push-vars"), "vars.json"),
|
||||
"-var", "overridden=yes",
|
||||
"-sensitive", "super,secret",
|
||||
filepath.Join(testFixture("push-vars"), "template.json"),
|
||||
}
|
||||
if code := c.Run(args); code != 0 {
|
||||
fatalCommand(t, c.Meta)
|
||||
}
|
||||
|
||||
if actualOpts.Slug != "foo/bar" {
|
||||
t.Fatalf("bad slug: %s", actualOpts.Slug)
|
||||
}
|
||||
|
||||
expected := map[string]string{
|
||||
"bar": "baz",
|
||||
"name": "foo/bar",
|
||||
"null": "",
|
||||
"one": "two",
|
||||
"overridden": "yes",
|
||||
"super": "this should be secret",
|
||||
"secret": "this one too",
|
||||
}
|
||||
if !reflect.DeepEqual(actualOpts.Vars, expected) {
|
||||
t.Fatalf("bad vars: got %#v\n expected %#v\n", actualOpts.Vars, expected)
|
||||
}
|
||||
|
||||
expected_sensitive := []string{"super", "secret"}
|
||||
if !reflect.DeepEqual(actualOpts.SensitiveVars, expected_sensitive) {
|
||||
t.Fatalf("bad vars: got %#v\n expected %#v\n", actualOpts.SensitiveVars, expected_sensitive)
|
||||
}
|
||||
}
|
||||
|
||||
func testArchive(t *testing.T, r io.Reader) []string {
|
||||
// Finish the archiving process in-memory
|
||||
var buf bytes.Buffer
|
||||
if _, err := io.Copy(&buf, r); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
gzipR, err := gzip.NewReader(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
tarR := tar.NewReader(gzipR)
|
||||
|
||||
// Read all the entries
|
||||
result := make([]string, 0, 5)
|
||||
for {
|
||||
hdr, err := tarR.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
result = append(result, hdr.Name)
|
||||
}
|
||||
|
||||
sort.Strings(result)
|
||||
return result
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
{
|
||||
"builders": [
|
||||
{"type": "dummy"},
|
||||
{"type": "dummy", "name": "foo"}
|
||||
],
|
||||
|
||||
"post-processors": [{
|
||||
"type": "atlas",
|
||||
"only": ["dummy"]
|
||||
}],
|
||||
|
||||
"push": {
|
||||
"name": "foo/bar"
|
||||
}
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"builders": [{"type": "dummy"}]
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
{
|
||||
"variables": {
|
||||
"name": null
|
||||
},
|
||||
|
||||
"builders": [{"type": "dummy"}],
|
||||
|
||||
"push": {
|
||||
"name": "{{user `name`}}"
|
||||
}
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"null": null,
|
||||
"bar": "baz",
|
||||
"overridden": "no",
|
||||
"super": "this should be secret",
|
||||
"secret": "this one too"
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"builders": [{"type": "dummy"}],
|
||||
|
||||
"push": {
|
||||
"name": "foo/bar"
|
||||
}
|
||||
}
|
|
@ -35,12 +35,6 @@ func init() {
|
|||
}, nil
|
||||
},
|
||||
|
||||
"push": func() (cli.Command, error) {
|
||||
return &command.PushCommand{
|
||||
Meta: *CommandMeta,
|
||||
}, nil
|
||||
},
|
||||
|
||||
"validate": func() (cli.Command, error) {
|
||||
return &command.ValidateCommand{
|
||||
Meta: *CommandMeta,
|
||||
|
|
|
@ -27,14 +27,6 @@ _packer () {
|
|||
'(-)*:files:_files -g "*.json"'
|
||||
)
|
||||
|
||||
local -a push_arguments && push_arguments=(
|
||||
'-name=[(<name>) The destination build in Atlas.]'
|
||||
'-token=[(<token>) Access token to use to upload.]'
|
||||
'-var[("key=value") Variable for templates, can be used multiple times.]'
|
||||
'-var-file=[(path) JSON file containing user variables.]'
|
||||
'(-)*:files:_files -g "*.json"'
|
||||
)
|
||||
|
||||
local -a validate_arguments && validate_arguments=(
|
||||
'-syntax-only[Only check syntax. Do not verify config of the template.]'
|
||||
'-except=[(foo,bar,baz) Validate all builds other than these]'
|
||||
|
@ -57,8 +49,6 @@ _packer () {
|
|||
_arguments -s -S : $build_arguments ;;
|
||||
inspect)
|
||||
_arguments -s -S : $inspect_arguments ;;
|
||||
push)
|
||||
_arguments -s -S : $push_arguments ;;
|
||||
validate)
|
||||
_arguments -s -S : $validate_arguments ;;
|
||||
esac
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const BuilderId = "packer.post-processor.atlas"
|
||||
|
||||
type Artifact struct {
|
||||
Name string
|
||||
Type string
|
||||
Version int
|
||||
}
|
||||
|
||||
func (*Artifact) BuilderId() string {
|
||||
return BuilderId
|
||||
}
|
||||
|
||||
func (a *Artifact) Files() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) Id() string {
|
||||
return fmt.Sprintf("%s/%s/%d", a.Name, a.Type, a.Version)
|
||||
}
|
||||
|
||||
func (a *Artifact) String() string {
|
||||
return fmt.Sprintf("%s/%s (v%d)", a.Name, a.Type, a.Version)
|
||||
}
|
||||
|
||||
func (*Artifact) State(name string) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) Destroy() error {
|
||||
return nil
|
||||
}
|
|
@ -1,284 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/atlas-go/archive"
|
||||
"github.com/hashicorp/atlas-go/v1"
|
||||
"github.com/hashicorp/packer/common"
|
||||
"github.com/hashicorp/packer/helper/config"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/template/interpolate"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
const (
|
||||
BuildEnvKey = "ATLAS_BUILD_ID"
|
||||
CompileEnvKey = "ATLAS_COMPILE_ID"
|
||||
)
|
||||
|
||||
// Artifacts can return a string for this state key and the post-processor
|
||||
// will use automatically use this as the type. The user's value overrides
|
||||
// this if `artifact_type_override` is set to true.
|
||||
const ArtifactStateType = "atlas.artifact.type"
|
||||
|
||||
// Artifacts can return a map[string]string for this state key and this
|
||||
// post-processor will automatically merge it into the metadata for any
|
||||
// uploaded artifact versions.
|
||||
const ArtifactStateMetadata = "atlas.artifact.metadata"
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
|
||||
Artifact string
|
||||
Type string `mapstructure:"artifact_type"`
|
||||
TypeOverride bool `mapstructure:"artifact_type_override"`
|
||||
Metadata map[string]string
|
||||
|
||||
ServerAddr string `mapstructure:"atlas_url"`
|
||||
Token string
|
||||
|
||||
// This shouldn't ever be set outside of unit tests.
|
||||
Test bool `mapstructure:"test"`
|
||||
|
||||
ctx interpolate.Context
|
||||
user, name string
|
||||
buildId int
|
||||
compileId int
|
||||
}
|
||||
|
||||
type PostProcessor struct {
|
||||
config Config
|
||||
client *atlas.Client
|
||||
}
|
||||
|
||||
func (p *PostProcessor) Configure(raws ...interface{}) error {
|
||||
err := config.Decode(&p.config, &config.DecodeOpts{
|
||||
Interpolate: true,
|
||||
InterpolateContext: &p.config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
required := map[string]*string{
|
||||
"artifact": &p.config.Artifact,
|
||||
"artifact_type": &p.config.Type,
|
||||
}
|
||||
|
||||
var errs *packer.MultiError
|
||||
for key, ptr := range required {
|
||||
if *ptr == "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("%s must be set", key))
|
||||
}
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
p.config.user, p.config.name, err = atlas.ParseSlug(p.config.Artifact)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we have a build ID, save it
|
||||
if v := os.Getenv(BuildEnvKey); v != "" {
|
||||
raw, err := strconv.ParseInt(v, 0, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error parsing build ID: %s", err)
|
||||
}
|
||||
|
||||
p.config.buildId = int(raw)
|
||||
}
|
||||
|
||||
// If we have a compile ID, save it
|
||||
if v := os.Getenv(CompileEnvKey); v != "" {
|
||||
raw, err := strconv.ParseInt(v, 0, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error parsing compile ID: %s", err)
|
||||
}
|
||||
|
||||
p.config.compileId = int(raw)
|
||||
}
|
||||
|
||||
// Build the client
|
||||
p.client = atlas.DefaultClient()
|
||||
if p.config.ServerAddr != "" {
|
||||
p.client, err = atlas.NewClient(p.config.ServerAddr)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error initializing atlas client: %s", err))
|
||||
return errs
|
||||
}
|
||||
}
|
||||
if p.config.Token != "" {
|
||||
p.client.Token = p.config.Token
|
||||
}
|
||||
|
||||
if !p.config.Test {
|
||||
// Verify the client
|
||||
if err := p.client.Verify(); err != nil {
|
||||
if err == atlas.ErrAuth {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error connecting to atlas server, please check your ATLAS_TOKEN env: %s", err))
|
||||
} else {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error initializing atlas client: %s", err))
|
||||
}
|
||||
return errs
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
|
||||
// todo: remove/reword after the migration
|
||||
if p.config.Type == "vagrant.box" {
|
||||
return nil, false, fmt.Errorf("Vagrant-related functionality has been removed from Terraform\n" +
|
||||
"Enterprise into its own product, Vagrant Cloud. For more information see\n" +
|
||||
"https://www.vagrantup.com/docs/vagrant-cloud/vagrant-cloud-migration.html\n" +
|
||||
"Please replace the Atlas post-processor with the Vagrant Cloud post-processor,\n" +
|
||||
"and see https://www.packer.io/docs/post-processors/vagrant-cloud.html for\n" +
|
||||
"more detail.\n")
|
||||
}
|
||||
|
||||
ui.Message("\n-----------------------------------------------------------------------\n" +
|
||||
"Deprecation warning: The Packer and Artifact Registry features of Atlas\n" +
|
||||
"will no longer be actively developed or maintained and will be fully\n" +
|
||||
"decommissioned. Please see our guide on building immutable\n" +
|
||||
"infrastructure with Packer on CI/CD for ideas on implementing\n" +
|
||||
"these features yourself: https://www.packer.io/guides/packer-on-cicd/\n" +
|
||||
"-----------------------------------------------------------------------\n",
|
||||
)
|
||||
|
||||
if _, err := p.client.Artifact(p.config.user, p.config.name); err != nil {
|
||||
if err != atlas.ErrNotFound {
|
||||
return nil, false, fmt.Errorf(
|
||||
"Error finding artifact: %s", err)
|
||||
}
|
||||
|
||||
// Artifact doesn't exist, create it
|
||||
ui.Message(fmt.Sprintf("Creating artifact: %s", p.config.Artifact))
|
||||
_, err = p.client.CreateArtifact(p.config.user, p.config.name)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf(
|
||||
"Error creating artifact: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
opts := &atlas.UploadArtifactOpts{
|
||||
User: p.config.user,
|
||||
Name: p.config.name,
|
||||
Type: p.config.Type,
|
||||
ID: artifact.Id(),
|
||||
Metadata: p.metadata(artifact),
|
||||
BuildID: p.config.buildId,
|
||||
CompileID: p.config.compileId,
|
||||
}
|
||||
|
||||
if fs := artifact.Files(); len(fs) > 0 {
|
||||
var archiveOpts archive.ArchiveOpts
|
||||
|
||||
// We have files. We want to compress/upload them. If we have just
|
||||
// one file, then we use it as-is. Otherwise, we compress all of
|
||||
// them into a single file.
|
||||
var path string
|
||||
if len(fs) == 1 {
|
||||
path = fs[0]
|
||||
} else {
|
||||
path = longestCommonPrefix(fs)
|
||||
if path == "" {
|
||||
return nil, false, fmt.Errorf(
|
||||
"No common prefix for archiving files: %v", fs)
|
||||
}
|
||||
|
||||
// Modify the archive options to only include the files
|
||||
// that are in our file list.
|
||||
include := make([]string, len(fs))
|
||||
for i, f := range fs {
|
||||
include[i] = strings.Replace(f, path, "", 1)
|
||||
}
|
||||
archiveOpts.Include = include
|
||||
}
|
||||
|
||||
r, err := archive.CreateArchive(path, &archiveOpts)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf(
|
||||
"Error archiving artifact: %s", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
opts.File = r
|
||||
opts.FileSize = r.Size
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("Uploading artifact (%d bytes)", opts.FileSize))
|
||||
var av *atlas.ArtifactVersion
|
||||
doneCh := make(chan struct{})
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
var err error
|
||||
av, err = p.client.UploadArtifact(opts)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return nil, false, fmt.Errorf("Error uploading (%d bytes): %s", opts.FileSize, err)
|
||||
case <-doneCh:
|
||||
}
|
||||
|
||||
return &Artifact{
|
||||
Name: p.config.Artifact,
|
||||
Type: p.config.Type,
|
||||
Version: av.Version,
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
func (p *PostProcessor) metadata(artifact packer.Artifact) map[string]string {
|
||||
var metadata map[string]string
|
||||
metadataRaw := artifact.State(ArtifactStateMetadata)
|
||||
if metadataRaw != nil {
|
||||
if err := mapstructure.Decode(metadataRaw, &metadata); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
if p.config.Metadata != nil {
|
||||
// If we have no extra metadata, just return as-is
|
||||
if metadata == nil {
|
||||
return p.config.Metadata
|
||||
}
|
||||
|
||||
// Merge the metadata
|
||||
for k, v := range p.config.Metadata {
|
||||
metadata[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return metadata
|
||||
}
|
||||
|
||||
func (p *PostProcessor) artifactType(artifact packer.Artifact) string {
|
||||
if !p.config.TypeOverride {
|
||||
if v := artifact.State(ArtifactStateType); v != nil {
|
||||
return v.(string)
|
||||
}
|
||||
}
|
||||
|
||||
return p.config.Type
|
||||
}
|
|
@ -1,154 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
func TestPostProcessorConfigure(t *testing.T) {
|
||||
currentEnv := os.Getenv("ATLAS_TOKEN")
|
||||
os.Setenv("ATLAS_TOKEN", "")
|
||||
defer os.Setenv("ATLAS_TOKEN", currentEnv)
|
||||
|
||||
var p PostProcessor
|
||||
if err := p.Configure(validDefaults()); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if p.client == nil {
|
||||
t.Fatal("should have client")
|
||||
}
|
||||
if p.client.Token != "" {
|
||||
t.Fatal("should not have token")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorConfigure_buildId(t *testing.T) {
|
||||
defer os.Setenv(BuildEnvKey, os.Getenv(BuildEnvKey))
|
||||
os.Setenv(BuildEnvKey, "5")
|
||||
|
||||
var p PostProcessor
|
||||
if err := p.Configure(validDefaults()); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if p.config.buildId != 5 {
|
||||
t.Fatalf("bad: %#v", p.config.buildId)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorConfigure_compileId(t *testing.T) {
|
||||
defer os.Setenv(CompileEnvKey, os.Getenv(CompileEnvKey))
|
||||
os.Setenv(CompileEnvKey, "5")
|
||||
|
||||
var p PostProcessor
|
||||
if err := p.Configure(validDefaults()); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if p.config.compileId != 5 {
|
||||
t.Fatalf("bad: %#v", p.config.compileId)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorMetadata(t *testing.T) {
|
||||
var p PostProcessor
|
||||
if err := p.Configure(validDefaults()); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
artifact := new(packer.MockArtifact)
|
||||
metadata := p.metadata(artifact)
|
||||
if len(metadata) > 0 {
|
||||
t.Fatalf("bad: %#v", metadata)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorMetadata_artifact(t *testing.T) {
|
||||
config := validDefaults()
|
||||
config["metadata"] = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
|
||||
var p PostProcessor
|
||||
if err := p.Configure(config); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
artifact := new(packer.MockArtifact)
|
||||
artifact.StateValues = map[string]interface{}{
|
||||
ArtifactStateMetadata: map[interface{}]interface{}{
|
||||
"bar": "baz",
|
||||
},
|
||||
}
|
||||
|
||||
metadata := p.metadata(artifact)
|
||||
expected := map[string]string{
|
||||
"foo": "bar",
|
||||
"bar": "baz",
|
||||
}
|
||||
if !reflect.DeepEqual(metadata, expected) {
|
||||
t.Fatalf("bad: %#v", metadata)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorMetadata_config(t *testing.T) {
|
||||
config := validDefaults()
|
||||
config["metadata"] = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
|
||||
var p PostProcessor
|
||||
if err := p.Configure(config); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
artifact := new(packer.MockArtifact)
|
||||
metadata := p.metadata(artifact)
|
||||
expected := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
if !reflect.DeepEqual(metadata, expected) {
|
||||
t.Fatalf("bad: %#v", metadata)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorType(t *testing.T) {
|
||||
var p PostProcessor
|
||||
if err := p.Configure(validDefaults()); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
artifact := new(packer.MockArtifact)
|
||||
actual := p.artifactType(artifact)
|
||||
if actual != "foo" {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessorType_artifact(t *testing.T) {
|
||||
var p PostProcessor
|
||||
if err := p.Configure(validDefaults()); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
artifact := new(packer.MockArtifact)
|
||||
artifact.StateValues = map[string]interface{}{
|
||||
ArtifactStateType: "bar",
|
||||
}
|
||||
actual := p.artifactType(artifact)
|
||||
if actual != "bar" {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func validDefaults() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"artifact": "mitchellh/test",
|
||||
"artifact_type": "foo",
|
||||
"test": true,
|
||||
}
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"math"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// longestCommonPrefix finds the longest common prefix for all the strings
|
||||
// given as an argument, or returns the empty string if a prefix can't be
|
||||
// found.
|
||||
//
|
||||
// This function just uses brute force instead of a more optimized algorithm.
|
||||
func longestCommonPrefix(vs []string) string {
|
||||
var length int64
|
||||
// Find the shortest string
|
||||
var shortest string
|
||||
length = math.MaxUint32
|
||||
for _, v := range vs {
|
||||
if int64(len(v)) < length {
|
||||
shortest = v
|
||||
length = int64(len(v))
|
||||
}
|
||||
}
|
||||
|
||||
// Now go through and find a prefix to all the strings using this
|
||||
// short string, which itself must contain the prefix.
|
||||
for i := len(shortest); i > 0; i-- {
|
||||
// We only care about prefixes with path seps
|
||||
if shortest[i-1] != filepath.Separator {
|
||||
continue
|
||||
}
|
||||
|
||||
bad := false
|
||||
prefix := shortest[0:i]
|
||||
for _, v := range vs {
|
||||
if !strings.HasPrefix(v, prefix) {
|
||||
bad = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !bad {
|
||||
return prefix
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLongestCommonPrefix(t *testing.T) {
|
||||
sep := string(filepath.Separator)
|
||||
cases := []struct {
|
||||
Input []string
|
||||
Output string
|
||||
}{
|
||||
{
|
||||
[]string{"foo", "bar"},
|
||||
"",
|
||||
},
|
||||
{
|
||||
[]string{"foo", "foobar"},
|
||||
"",
|
||||
},
|
||||
{
|
||||
[]string{"foo" + sep, "foo" + sep + "bar"},
|
||||
"foo" + sep,
|
||||
},
|
||||
{
|
||||
[]string{sep + "foo" + sep, sep + "bar"},
|
||||
sep,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
actual := longestCommonPrefix(tc.Input)
|
||||
if actual != tc.Output {
|
||||
t.Fatalf("bad: %#v\n\n%#v", actual, tc.Input)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,353 +0,0 @@
|
|||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
|
@ -1,517 +0,0 @@
|
|||
// archive is package that helps create archives in a format that
|
||||
// Atlas expects with its various upload endpoints.
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Archive is the resulting archive. The archive data is generally streamed
|
||||
// so the io.ReadCloser can be used to backpressure the archive progress
|
||||
// and avoid memory pressure.
|
||||
type Archive struct {
|
||||
io.ReadCloser
|
||||
|
||||
Size int64
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
// ArchiveOpts are the options for defining how the archive will be built.
|
||||
type ArchiveOpts struct {
|
||||
// Exclude and Include are filters of files to include/exclude in
|
||||
// the archive when creating it from a directory. These filters should
|
||||
// be relative to the packaging directory and should be basic glob
|
||||
// patterns.
|
||||
Exclude []string
|
||||
Include []string
|
||||
|
||||
// Extra is a mapping of extra files to include within the archive. The
|
||||
// key should be the path within the archive and the value should be
|
||||
// an absolute path to the file to put into the archive. These extra
|
||||
// files will override any other files in the archive.
|
||||
Extra map[string]string
|
||||
|
||||
// VCS, if true, will detect and use a VCS system to determine what
|
||||
// files to include the archive.
|
||||
VCS bool
|
||||
}
|
||||
|
||||
// IsSet says whether any options were set.
|
||||
func (o *ArchiveOpts) IsSet() bool {
|
||||
return len(o.Exclude) > 0 || len(o.Include) > 0 || o.VCS
|
||||
}
|
||||
|
||||
// Constants related to setting special values for Extra in ArchiveOpts.
|
||||
const (
|
||||
// ExtraEntryDir just creates the Extra key as a directory entry.
|
||||
ExtraEntryDir = ""
|
||||
)
|
||||
|
||||
// CreateArchive takes the given path and ArchiveOpts and archives it.
|
||||
//
|
||||
// The archive will be fully completed and put into a temporary file.
|
||||
// This must be done to retrieve the content length of the archive which
|
||||
// is needed for almost all operations involving archives with Atlas. Because
|
||||
// of this, sufficient disk space will be required to buffer the archive.
|
||||
func CreateArchive(path string, opts *ArchiveOpts) (*Archive, error) {
|
||||
log.Printf("[INFO] creating archive from %s", path)
|
||||
|
||||
// Dereference any symlinks and determine the real path and info
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
path, fi, err = readLinkFull(path, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Windows
|
||||
path = filepath.ToSlash(path)
|
||||
|
||||
// Direct file paths cannot have archive options
|
||||
if !fi.IsDir() && opts.IsSet() {
|
||||
return nil, fmt.Errorf(
|
||||
"options such as exclude, include, and VCS can't be set when " +
|
||||
"the path is a file.")
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
return archiveDir(path, opts)
|
||||
} else {
|
||||
return archiveFile(path)
|
||||
}
|
||||
}
|
||||
|
||||
func archiveFile(path string) (*Archive, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := gzip.NewReader(f); err == nil {
|
||||
// Reset the read offset for future reading
|
||||
if _, err := f.Seek(0, 0); err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the file info for the size
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This is a gzip file, let it through.
|
||||
return &Archive{ReadCloser: f, Size: fi.Size()}, nil
|
||||
}
|
||||
|
||||
// Close the file, no use for it anymore
|
||||
f.Close()
|
||||
|
||||
// We have a single file that is not gzipped. Compress it.
|
||||
path, err = filepath.Abs(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Act like we're compressing a directory, but only include this one
|
||||
// file.
|
||||
return archiveDir(filepath.Dir(path), &ArchiveOpts{
|
||||
Include: []string{filepath.Base(path)},
|
||||
})
|
||||
}
|
||||
|
||||
func archiveDir(root string, opts *ArchiveOpts) (*Archive, error) {
|
||||
|
||||
var vcsInclude []string
|
||||
var metadata map[string]string
|
||||
if opts.VCS {
|
||||
var err error
|
||||
|
||||
if err = vcsPreflight(root); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vcsInclude, err = vcsFiles(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metadata, err = vcsMetadata(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure the root path is absolute
|
||||
root, err := filepath.Abs(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the temporary file that we'll send the archive data to.
|
||||
archiveF, err := ioutil.TempFile("", "atlas-archive")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the wrapper for the result which will automatically
|
||||
// remove the temporary file on close.
|
||||
archiveWrapper := &readCloseRemover{F: archiveF}
|
||||
|
||||
// Buffer the writer so that we can push as much data to disk at
|
||||
// a time as possible. 4M should be good.
|
||||
bufW := bufio.NewWriterSize(archiveF, 4096*1024)
|
||||
|
||||
// Gzip compress all the output data
|
||||
gzipW := gzip.NewWriter(bufW)
|
||||
|
||||
// Tar the file contents
|
||||
tarW := tar.NewWriter(gzipW)
|
||||
|
||||
// First, walk the path and do the normal files
|
||||
werr := filepath.Walk(root, copyDirWalkFn(
|
||||
tarW, root, "", opts, vcsInclude))
|
||||
if werr == nil {
|
||||
// If that succeeded, handle the extra files
|
||||
werr = copyExtras(tarW, opts.Extra)
|
||||
}
|
||||
|
||||
// Attempt to close all the things. If we get an error on the way
|
||||
// and we haven't had an error yet, then record that as the critical
|
||||
// error. But we still try to close everything.
|
||||
|
||||
// Close the tar writer
|
||||
if err := tarW.Close(); err != nil && werr == nil {
|
||||
werr = err
|
||||
}
|
||||
|
||||
// Close the gzip writer
|
||||
if err := gzipW.Close(); err != nil && werr == nil {
|
||||
werr = err
|
||||
}
|
||||
|
||||
// Flush the buffer
|
||||
if err := bufW.Flush(); err != nil && werr == nil {
|
||||
werr = err
|
||||
}
|
||||
|
||||
// If we had an error, then close the file (removing it) and
|
||||
// return the error.
|
||||
if werr != nil {
|
||||
archiveWrapper.Close()
|
||||
return nil, werr
|
||||
}
|
||||
|
||||
// Seek to the beginning
|
||||
if _, err := archiveWrapper.F.Seek(0, 0); err != nil {
|
||||
archiveWrapper.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the file information so we can get the size
|
||||
fi, err := archiveWrapper.F.Stat()
|
||||
if err != nil {
|
||||
archiveWrapper.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Archive{
|
||||
ReadCloser: archiveWrapper,
|
||||
Size: fi.Size(),
|
||||
Metadata: metadata,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func copyDirWalkFn(
|
||||
tarW *tar.Writer, root string, prefix string,
|
||||
opts *ArchiveOpts, vcsInclude []string) filepath.WalkFunc {
|
||||
|
||||
errFunc := func(err error) filepath.WalkFunc {
|
||||
return func(string, os.FileInfo, error) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Windows
|
||||
root = filepath.ToSlash(root)
|
||||
|
||||
var includeMap map[string]struct{}
|
||||
|
||||
// If we have an include/exclude pattern set, then setup the lookup
|
||||
// table to determine what we want to include.
|
||||
if opts != nil && len(opts.Include) > 0 {
|
||||
includeMap = make(map[string]struct{})
|
||||
for _, pattern := range opts.Include {
|
||||
matches, err := filepath.Glob(filepath.Join(root, pattern))
|
||||
if err != nil {
|
||||
return errFunc(fmt.Errorf(
|
||||
"error checking include glob '%s': %s",
|
||||
pattern, err))
|
||||
}
|
||||
|
||||
for _, path := range matches {
|
||||
// Windows
|
||||
path = filepath.ToSlash(path)
|
||||
subpath, err := filepath.Rel(root, path)
|
||||
subpath = filepath.ToSlash(subpath)
|
||||
|
||||
if err != nil {
|
||||
return errFunc(err)
|
||||
}
|
||||
|
||||
for {
|
||||
includeMap[subpath] = struct{}{}
|
||||
subpath = filepath.Dir(subpath)
|
||||
if subpath == "." {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return func(path string, info os.FileInfo, err error) error {
|
||||
path = filepath.ToSlash(path)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the relative path from the path since it contains the root
|
||||
// plus the path.
|
||||
subpath, err := filepath.Rel(root, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if subpath == "." {
|
||||
return nil
|
||||
}
|
||||
if prefix != "" {
|
||||
subpath = filepath.Join(prefix, subpath)
|
||||
}
|
||||
// Windows
|
||||
subpath = filepath.ToSlash(subpath)
|
||||
|
||||
// If we have a list of VCS files, check that first
|
||||
skip := false
|
||||
if len(vcsInclude) > 0 {
|
||||
skip = true
|
||||
for _, f := range vcsInclude {
|
||||
if f == subpath {
|
||||
skip = false
|
||||
break
|
||||
}
|
||||
|
||||
if info.IsDir() && strings.HasPrefix(f, subpath+"/") {
|
||||
skip = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If include is present, we only include what is listed
|
||||
if len(includeMap) > 0 {
|
||||
if _, ok := includeMap[subpath]; !ok {
|
||||
skip = true
|
||||
}
|
||||
}
|
||||
|
||||
// If exclude, it is one last gate to excluding files
|
||||
if opts != nil {
|
||||
for _, exclude := range opts.Exclude {
|
||||
match, err := filepath.Match(exclude, subpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if match {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we have to skip this file, then skip it, properly skipping
|
||||
// children if we're a directory.
|
||||
if skip {
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// If this is a symlink, then we need to get the symlink target
|
||||
// rather than the symlink itself.
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
target, info, err := readLinkFull(path, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the concrete entry for this path. This will either
|
||||
// be the file itself or just a directory entry.
|
||||
if err := copyConcreteEntry(tarW, subpath, target, info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return filepath.Walk(target, copyDirWalkFn(
|
||||
tarW, target, subpath, opts, vcsInclude))
|
||||
}
|
||||
// return now so that we don't try to copy twice
|
||||
return nil
|
||||
}
|
||||
|
||||
return copyConcreteEntry(tarW, subpath, path, info)
|
||||
}
|
||||
}
|
||||
|
||||
func copyConcreteEntry(
|
||||
tarW *tar.Writer, entry string,
|
||||
path string, info os.FileInfo) error {
|
||||
// Windows
|
||||
path = filepath.ToSlash(path)
|
||||
|
||||
// Build the file header for the tar entry
|
||||
header, err := tar.FileInfoHeader(info, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed creating archive header: %s", path)
|
||||
}
|
||||
|
||||
// Modify the header to properly be the full entry name
|
||||
header.Name = entry
|
||||
if info.IsDir() {
|
||||
header.Name += "/"
|
||||
}
|
||||
|
||||
// Write the header first to the archive.
|
||||
if err := tarW.WriteHeader(header); err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed writing archive header: %s", path)
|
||||
}
|
||||
|
||||
// If it is a directory, then we're done (no body to write)
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open the real file to write the data
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed opening file '%s' to write compressed archive.", path)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err = io.Copy(tarW, f); err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed copying file to archive: %s, %s", path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyExtras(w *tar.Writer, extra map[string]string) error {
|
||||
var tmpDir string
|
||||
defer func() {
|
||||
if tmpDir != "" {
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
}()
|
||||
|
||||
for entry, path := range extra {
|
||||
// If the path is empty, then we set it to a generic empty directory
|
||||
if path == "" {
|
||||
// If tmpDir is still empty, then we create an empty dir
|
||||
if tmpDir == "" {
|
||||
td, err := ioutil.TempDir("", "archive")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmpDir = td
|
||||
}
|
||||
|
||||
path = tmpDir
|
||||
}
|
||||
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// No matter what, write the entry. If this is a directory,
|
||||
// it'll just write the directory header.
|
||||
if err := copyConcreteEntry(w, entry, path, info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If this is a directory, then we walk the internal contents
|
||||
// and copy those as well.
|
||||
if info.IsDir() {
|
||||
err := filepath.Walk(path, copyDirWalkFn(
|
||||
w, path, entry, nil, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readLinkFull(path string, info os.FileInfo) (string, os.FileInfo, error) {
|
||||
target, err := filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
target, err = filepath.Abs(target)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
fi, err := os.Lstat(target)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return target, fi, nil
|
||||
}
|
||||
|
||||
// readCloseRemover is an io.ReadCloser implementation that will remove
|
||||
// the file on Close(). We use this to clean up our temporary file for
|
||||
// the archive.
|
||||
type readCloseRemover struct {
|
||||
F *os.File
|
||||
}
|
||||
|
||||
func (r *readCloseRemover) Read(p []byte) (int, error) {
|
||||
return r.F.Read(p)
|
||||
}
|
||||
|
||||
func (r *readCloseRemover) Close() error {
|
||||
// First close the file
|
||||
err := r.F.Close()
|
||||
|
||||
// Next make sure to remove it, or at least try, regardless of error
|
||||
// above.
|
||||
os.Remove(r.F.Name())
|
||||
|
||||
return err
|
||||
}
|
|
@ -1,365 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
version "github.com/hashicorp/go-version"
|
||||
)
|
||||
|
||||
// VCS is a struct that explains how to get the file list for a given
|
||||
// VCS.
|
||||
type VCS struct {
|
||||
Name string
|
||||
|
||||
// Detect is a list of files/folders that if they exist, signal that
|
||||
// this VCS is the VCS in use.
|
||||
Detect []string
|
||||
|
||||
// Files returns the files that are under version control for the
|
||||
// given path.
|
||||
Files VCSFilesFunc
|
||||
|
||||
// Metadata returns arbitrary metadata about the underlying VCS for the
|
||||
// given path.
|
||||
Metadata VCSMetadataFunc
|
||||
|
||||
// Preflight is a function to run before looking for VCS files.
|
||||
Preflight VCSPreflightFunc
|
||||
}
|
||||
|
||||
// VCSList is the list of VCS we recognize.
|
||||
var VCSList = []*VCS{
|
||||
&VCS{
|
||||
Name: "git",
|
||||
Detect: []string{".git/"},
|
||||
Preflight: gitPreflight,
|
||||
Files: vcsFilesCmd("git", "ls-files"),
|
||||
Metadata: gitMetadata,
|
||||
},
|
||||
&VCS{
|
||||
Name: "hg",
|
||||
Detect: []string{".hg/"},
|
||||
Files: vcsTrimCmd(vcsFilesCmd("hg", "locate", "-f", "--include", ".")),
|
||||
},
|
||||
&VCS{
|
||||
Name: "svn",
|
||||
Detect: []string{".svn/"},
|
||||
Files: vcsFilesCmd("svn", "ls"),
|
||||
},
|
||||
}
|
||||
|
||||
// VCSFilesFunc is the callback invoked to return the files in the VCS.
|
||||
//
|
||||
// The return value should be paths relative to the given path.
|
||||
type VCSFilesFunc func(string) ([]string, error)
|
||||
|
||||
// VCSMetadataFunc is the callback invoked to get arbitrary information about
|
||||
// the current VCS.
|
||||
//
|
||||
// The return value should be a map of key-value pairs.
|
||||
type VCSMetadataFunc func(string) (map[string]string, error)
|
||||
|
||||
// VCSPreflightFunc is a function that runs before VCS detection to be
|
||||
// configured by the user. It may be used to check if pre-requisites (like the
|
||||
// actual VCS) are installed or that a program is at the correct version. If an
|
||||
// error is returned, the VCS will not be processed and the error will be
|
||||
// returned up the stack.
|
||||
//
|
||||
// The given argument is the path where the VCS is running.
|
||||
type VCSPreflightFunc func(string) error
|
||||
|
||||
// vcsDetect detects the VCS that is used for path.
|
||||
func vcsDetect(path string) (*VCS, error) {
|
||||
dir := path
|
||||
for {
|
||||
for _, v := range VCSList {
|
||||
for _, f := range v.Detect {
|
||||
check := filepath.Join(dir, f)
|
||||
if _, err := os.Stat(check); err == nil {
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
lastDir := dir
|
||||
dir = filepath.Dir(dir)
|
||||
if dir == lastDir {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no VCS found for path: %s", path)
|
||||
}
|
||||
|
||||
// vcsPreflight returns the metadata for the VCS directory path.
|
||||
func vcsPreflight(path string) error {
|
||||
vcs, err := vcsDetect(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error detecting VCS: %s", err)
|
||||
}
|
||||
|
||||
if vcs.Preflight != nil {
|
||||
return vcs.Preflight(path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// vcsFiles returns the files for the VCS directory path.
|
||||
func vcsFiles(path string) ([]string, error) {
|
||||
vcs, err := vcsDetect(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error detecting VCS: %s", err)
|
||||
}
|
||||
|
||||
if vcs.Files != nil {
|
||||
return vcs.Files(path)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// vcsFilesCmd creates a Files-compatible function that reads the files
|
||||
// by executing the command in the repository path and returning each
|
||||
// line in stdout.
|
||||
func vcsFilesCmd(args ...string) VCSFilesFunc {
|
||||
return func(path string) ([]string, error) {
|
||||
var stderr, stdout bytes.Buffer
|
||||
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = path
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"error executing %s: %s",
|
||||
strings.Join(args, " "),
|
||||
err)
|
||||
}
|
||||
|
||||
// Read each line of output as a path
|
||||
result := make([]string, 0, 100)
|
||||
scanner := bufio.NewScanner(&stdout)
|
||||
for scanner.Scan() {
|
||||
result = append(result, scanner.Text())
|
||||
}
|
||||
|
||||
// Always use *nix-style paths (for Windows)
|
||||
for idx, value := range result {
|
||||
result[idx] = filepath.ToSlash(value)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
// vcsTrimCmd trims the prefix from the paths returned by another VCSFilesFunc.
|
||||
// This should be used to wrap another function if the return value is known
|
||||
// to have full paths rather than relative paths
|
||||
func vcsTrimCmd(f VCSFilesFunc) VCSFilesFunc {
|
||||
return func(path string) ([]string, error) {
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"error expanding VCS path: %s", err)
|
||||
}
|
||||
|
||||
// Now that we have the root path, get the inner files
|
||||
fs, err := f(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Trim the root path from the files
|
||||
result := make([]string, 0, len(fs))
|
||||
for _, f := range fs {
|
||||
if !strings.HasPrefix(f, absPath) {
|
||||
continue
|
||||
}
|
||||
|
||||
f, err = filepath.Rel(absPath, f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"error determining path: %s", err)
|
||||
}
|
||||
|
||||
result = append(result, f)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
// vcsMetadata returns the metadata for the VCS directory path.
|
||||
func vcsMetadata(path string) (map[string]string, error) {
|
||||
vcs, err := vcsDetect(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error detecting VCS: %s", err)
|
||||
}
|
||||
|
||||
if vcs.Metadata != nil {
|
||||
return vcs.Metadata(path)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
const ignorableDetachedHeadError = "HEAD is not a symbolic ref"
|
||||
|
||||
// gitBranch gets and returns the current git branch for the Git repository
|
||||
// at the given path. It is assumed that the VCS is git.
|
||||
func gitBranch(path string) (string, error) {
|
||||
var stderr, stdout bytes.Buffer
|
||||
|
||||
cmd := exec.Command("git", "symbolic-ref", "--short", "HEAD")
|
||||
cmd.Dir = path
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
if strings.Contains(stderr.String(), ignorableDetachedHeadError) {
|
||||
return "", nil
|
||||
} else {
|
||||
return "",
|
||||
fmt.Errorf("error getting git branch: %s\nstdout: %s\nstderr: %s",
|
||||
err, stdout.String(), stderr.String())
|
||||
}
|
||||
}
|
||||
|
||||
branch := strings.TrimSpace(stdout.String())
|
||||
|
||||
return branch, nil
|
||||
}
|
||||
|
||||
// gitCommit gets the SHA of the latest commit for the Git repository at the
|
||||
// given path. It is assumed that the VCS is git.
|
||||
func gitCommit(path string) (string, error) {
|
||||
var stderr, stdout bytes.Buffer
|
||||
|
||||
cmd := exec.Command("git", "log", "-n1", "--pretty=format:%H")
|
||||
cmd.Dir = path
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return "", fmt.Errorf("error getting git commit: %s\nstdout: %s\nstderr: %s",
|
||||
err, stdout.String(), stderr.String())
|
||||
}
|
||||
|
||||
commit := strings.TrimSpace(stdout.String())
|
||||
|
||||
return commit, nil
|
||||
}
|
||||
|
||||
// gitRemotes gets and returns a map of all remotes for the Git repository. The
|
||||
// map key is the name of the remote of the format "remote.NAME" and the value
|
||||
// is the endpoint for the remote. It is assumed that the VCS is git.
|
||||
func gitRemotes(path string) (map[string]string, error) {
|
||||
var stderr, stdout bytes.Buffer
|
||||
|
||||
cmd := exec.Command("git", "remote", "-v")
|
||||
cmd.Dir = path
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("error getting git remotes: %s\nstdout: %s\nstderr: %s",
|
||||
err, stdout.String(), stderr.String())
|
||||
}
|
||||
|
||||
// Read each line of output as a remote
|
||||
result := make(map[string]string)
|
||||
scanner := bufio.NewScanner(&stdout)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
split := strings.Split(line, "\t")
|
||||
|
||||
if len(split) < 2 {
|
||||
return nil, fmt.Errorf("invalid response from git remote: %s", stdout.String())
|
||||
}
|
||||
|
||||
remote := fmt.Sprintf("remote.%s", strings.TrimSpace(split[0]))
|
||||
if _, ok := result[remote]; !ok {
|
||||
// https://github.com/foo/bar.git (fetch) #=> https://github.com/foo/bar.git
|
||||
urlSplit := strings.Split(split[1], " ")
|
||||
result[remote] = strings.TrimSpace(urlSplit[0])
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// gitPreflight is the pre-flight command that runs for Git-based VCSs
|
||||
func gitPreflight(path string) error {
|
||||
var stderr, stdout bytes.Buffer
|
||||
|
||||
cmd := exec.Command("git", "--version")
|
||||
cmd.Dir = path
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("error getting git version: %s\nstdout: %s\nstderr: %s",
|
||||
err, stdout.String(), stderr.String())
|
||||
}
|
||||
|
||||
// Check if the output is valid
|
||||
output := strings.Split(strings.TrimSpace(stdout.String()), " ")
|
||||
if len(output) < 1 {
|
||||
log.Printf("[WARN] could not extract version output from Git")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse the version
|
||||
gitv, err := version.NewVersion(output[len(output)-1])
|
||||
if err != nil {
|
||||
log.Printf("[WARN] could not parse version output from Git")
|
||||
return nil
|
||||
}
|
||||
|
||||
constraint, err := version.NewConstraint("> 1.8")
|
||||
if err != nil {
|
||||
log.Printf("[WARN] could not create version constraint to check")
|
||||
return nil
|
||||
}
|
||||
if !constraint.Check(gitv) {
|
||||
return fmt.Errorf("git version (%s) is too old, please upgrade", gitv.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// gitMetadata is the function to parse and return Git metadata
|
||||
func gitMetadata(path string) (map[string]string, error) {
|
||||
// Future-self note: Git is NOT threadsafe, so we cannot run these
|
||||
// operations in go routines or else you're going to have a really really
|
||||
// bad day and Panda.State == "Sad" :(
|
||||
|
||||
branch, err := gitBranch(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
commit, err := gitCommit(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
remotes, err := gitRemotes(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make the return result (we already know the size)
|
||||
result := make(map[string]string, 2+len(remotes))
|
||||
|
||||
result["branch"] = branch
|
||||
result["commit"] = commit
|
||||
for remote, value := range remotes {
|
||||
result[remote] = value
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
|
@ -1,164 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
// appWrapper is the API wrapper since the server wraps the resulting object.
|
||||
type appWrapper struct {
|
||||
Application *App `json:"application"`
|
||||
}
|
||||
|
||||
// App represents a single instance of an application on the Atlas server.
|
||||
type App struct {
|
||||
// User is the namespace (username or organization) under which the
|
||||
// Atlas application resides
|
||||
User string `json:"username"`
|
||||
|
||||
// Name is the name of the application
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Slug returns the slug format for this App (User/Name)
|
||||
func (a *App) Slug() string {
|
||||
return fmt.Sprintf("%s/%s", a.User, a.Name)
|
||||
}
|
||||
|
||||
// App gets the App by the given user space and name. In the event the App is
|
||||
// not found (404), or for any other non-200 responses, an error is returned.
|
||||
func (c *Client) App(user, name string) (*App, error) {
|
||||
log.Printf("[INFO] getting application %s/%s", user, name)
|
||||
|
||||
endpoint := fmt.Sprintf("/api/v1/vagrant/applications/%s/%s", user, name)
|
||||
request, err := c.Request("GET", endpoint, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := checkResp(c.HTTPClient.Do(request))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var app App
|
||||
if err := decodeJSON(response, &app); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &app, nil
|
||||
}
|
||||
|
||||
// CreateApp creates a new App under the given user with the given name. If the
|
||||
// App is created successfully, it is returned. If the server returns any
|
||||
// errors, an error is returned.
|
||||
func (c *Client) CreateApp(user, name string) (*App, error) {
|
||||
log.Printf("[INFO] creating application %s/%s", user, name)
|
||||
|
||||
body, err := json.Marshal(&appWrapper{&App{
|
||||
User: user,
|
||||
Name: name,
|
||||
}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint := "/api/v1/vagrant/applications"
|
||||
request, err := c.Request("POST", endpoint, &RequestOptions{
|
||||
Body: bytes.NewReader(body),
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := checkResp(c.HTTPClient.Do(request))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var app App
|
||||
if err := decodeJSON(response, &app); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &app, nil
|
||||
}
|
||||
|
||||
// appVersion represents a specific version of an App in Atlas. It is actually
|
||||
// an upload container/wrapper.
|
||||
type appVersion struct {
|
||||
UploadPath string `json:"upload_path"`
|
||||
Token string `json:"token"`
|
||||
Version uint64 `json:"version"`
|
||||
}
|
||||
|
||||
// appMetadataWrapper is a wrapper around a map the prefixes the json key with
|
||||
// "metadata" when marshalled to format requests to the API properly.
|
||||
type appMetadataWrapper struct {
|
||||
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// UploadApp creates and uploads a new version for the App. If the server does not
|
||||
// find the application, an error is returned. If the server does not accept the
|
||||
// data, an error is returned.
|
||||
//
|
||||
// It is the responsibility of the caller to create a properly-formed data
|
||||
// object; this method blindly passes along the contents of the io.Reader.
|
||||
func (c *Client) UploadApp(app *App, metadata map[string]interface{},
|
||||
data io.Reader, size int64) (uint64, error) {
|
||||
|
||||
log.Printf("[INFO] uploading application %s (%d bytes) with metadata %q",
|
||||
app.Slug(), size, metadata)
|
||||
|
||||
endpoint := fmt.Sprintf("/api/v1/vagrant/applications/%s/%s/versions",
|
||||
app.User, app.Name)
|
||||
|
||||
// If metadata was given, setup the RequestOptions to pass in the metadata
|
||||
// with the request.
|
||||
var ro *RequestOptions
|
||||
if metadata != nil {
|
||||
// wrap the struct into the correct JSON format
|
||||
wrapper := struct {
|
||||
Application *appMetadataWrapper `json:"application"`
|
||||
}{
|
||||
&appMetadataWrapper{metadata},
|
||||
}
|
||||
m, err := json.Marshal(wrapper)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Create the request options.
|
||||
ro = &RequestOptions{
|
||||
Body: bytes.NewReader(m),
|
||||
BodyLength: int64(len(m)),
|
||||
}
|
||||
}
|
||||
|
||||
request, err := c.Request("POST", endpoint, ro)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
response, err := checkResp(c.HTTPClient.Do(request))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var av appVersion
|
||||
if err := decodeJSON(response, &av); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := c.putFile(av.UploadPath, data, size); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return av.Version, nil
|
||||
}
|
|
@ -1,248 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// Artifact represents a single instance of an artifact.
|
||||
type Artifact struct {
|
||||
// User and name are self-explanatory. Tag is the combination
|
||||
// of both into "username/name"
|
||||
User string `json:"username"`
|
||||
Name string `json:"name"`
|
||||
Tag string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ArtifactVersion represents a single version of an artifact.
|
||||
type ArtifactVersion struct {
|
||||
User string `json:"username"`
|
||||
Name string `json:"name"`
|
||||
Tag string `json:",omitempty"`
|
||||
Type string `json:"artifact_type"`
|
||||
ID string `json:"id"`
|
||||
Version int `json:"version"`
|
||||
Metadata map[string]string `json:"metadata"`
|
||||
File bool `json:"file"`
|
||||
Slug string `json:"slug"`
|
||||
|
||||
UploadPath string `json:"upload_path"`
|
||||
UploadToken string `json:"upload_token"`
|
||||
}
|
||||
|
||||
// ArtifactSearchOpts are the options used to search for an artifact.
|
||||
type ArtifactSearchOpts struct {
|
||||
User string
|
||||
Name string
|
||||
Type string
|
||||
|
||||
Build string
|
||||
Version string
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
// UploadArtifactOpts are the options used to upload an artifact.
|
||||
type UploadArtifactOpts struct {
|
||||
User string
|
||||
Name string
|
||||
Type string
|
||||
ID string
|
||||
File io.Reader
|
||||
FileSize int64
|
||||
Metadata map[string]string
|
||||
BuildID int
|
||||
CompileID int
|
||||
}
|
||||
|
||||
// MarshalJSON converts the UploadArtifactOpts into a JSON struct.
|
||||
func (o *UploadArtifactOpts) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"artifact_version": map[string]interface{}{
|
||||
"id": o.ID,
|
||||
"file": o.File != nil,
|
||||
"metadata": o.Metadata,
|
||||
"build_id": o.BuildID,
|
||||
"compile_id": o.CompileID,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// This is the value that should be used for metadata in ArtifactSearchOpts
|
||||
// if you don't care what the value is.
|
||||
const MetadataAnyValue = "943febbf-589f-401b-8f25-58f6d8786848"
|
||||
|
||||
// Artifact finds the Atlas artifact by the given name and returns it. Any
|
||||
// errors that occur are returned, including ErrAuth and ErrNotFound special
|
||||
// exceptions which the user may want to handle separately.
|
||||
func (c *Client) Artifact(user, name string) (*Artifact, error) {
|
||||
endpoint := fmt.Sprintf("/api/v1/artifacts/%s/%s", user, name)
|
||||
request, err := c.Request("GET", endpoint, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := checkResp(c.HTTPClient.Do(request))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var aw artifactWrapper
|
||||
if err := decodeJSON(response, &aw); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return aw.Artifact, nil
|
||||
}
|
||||
|
||||
// ArtifactSearch searches Atlas for the given ArtifactSearchOpts and returns
|
||||
// a slice of ArtifactVersions.
|
||||
func (c *Client) ArtifactSearch(opts *ArtifactSearchOpts) ([]*ArtifactVersion, error) {
|
||||
log.Printf("[INFO] searching artifacts: %#v", opts)
|
||||
|
||||
params := make(map[string]string)
|
||||
if opts.Version != "" {
|
||||
params["version"] = opts.Version
|
||||
}
|
||||
if opts.Build != "" {
|
||||
params["build"] = opts.Build
|
||||
}
|
||||
|
||||
i := 1
|
||||
for k, v := range opts.Metadata {
|
||||
prefix := fmt.Sprintf("metadata.%d.", i)
|
||||
params[prefix+"key"] = k
|
||||
if v != MetadataAnyValue {
|
||||
params[prefix+"value"] = v
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
endpoint := fmt.Sprintf("/api/v1/artifacts/%s/%s/%s/search",
|
||||
opts.User, opts.Name, opts.Type)
|
||||
request, err := c.Request("GET", endpoint, &RequestOptions{
|
||||
Params: params,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := checkResp(c.HTTPClient.Do(request))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var w artifactSearchWrapper
|
||||
if err := decodeJSON(response, &w); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return w.Versions, nil
|
||||
}
|
||||
|
||||
// CreateArtifact creates and returns a new Artifact in Atlas. Any errors that
|
||||
// occurr are returned.
|
||||
func (c *Client) CreateArtifact(user, name string) (*Artifact, error) {
|
||||
log.Printf("[INFO] creating artifact: %s/%s", user, name)
|
||||
body, err := json.Marshal(&artifactWrapper{&Artifact{
|
||||
User: user,
|
||||
Name: name,
|
||||
}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint := "/api/v1/artifacts"
|
||||
request, err := c.Request("POST", endpoint, &RequestOptions{
|
||||
Body: bytes.NewReader(body),
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := checkResp(c.HTTPClient.Do(request))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var aw artifactWrapper
|
||||
if err := decodeJSON(response, &aw); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return aw.Artifact, nil
|
||||
}
|
||||
|
||||
// ArtifactFileURL is a helper method for getting the URL for an ArtifactVersion
|
||||
// from the Client.
|
||||
func (c *Client) ArtifactFileURL(av *ArtifactVersion) (*url.URL, error) {
|
||||
if !av.File {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
u := *c.URL
|
||||
u.Path = fmt.Sprintf("/api/v1/artifacts/%s/%s/%s/file",
|
||||
av.User, av.Name, av.Type)
|
||||
return &u, nil
|
||||
}
|
||||
|
||||
// UploadArtifact streams the upload of a file on disk using the given
|
||||
// UploadArtifactOpts. Any errors that occur are returned.
|
||||
func (c *Client) UploadArtifact(opts *UploadArtifactOpts) (*ArtifactVersion, error) {
|
||||
log.Printf("[INFO] uploading artifact: %s/%s (%s)", opts.User, opts.Name, opts.Type)
|
||||
|
||||
endpoint := fmt.Sprintf("/api/v1/artifacts/%s/%s/%s",
|
||||
opts.User, opts.Name, opts.Type)
|
||||
|
||||
body, err := json.Marshal(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request, err := c.Request("POST", endpoint, &RequestOptions{
|
||||
Body: bytes.NewReader(body),
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := checkResp(c.HTTPClient.Do(request))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var av ArtifactVersion
|
||||
if err := decodeJSON(response, &av); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opts.File != nil {
|
||||
if err := c.putFile(av.UploadPath, opts.File, opts.FileSize); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &av, nil
|
||||
}
|
||||
|
||||
type artifactWrapper struct {
|
||||
Artifact *Artifact `json:"artifact"`
|
||||
}
|
||||
|
||||
type artifactSearchWrapper struct {
|
||||
Versions []*ArtifactVersion
|
||||
}
|
||||
|
||||
type artifactVersionWrapper struct {
|
||||
Version *ArtifactVersion
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Login accepts a username and password as string arguments. Both username and
|
||||
// password must be non-nil, non-empty values. Atlas does not permit
|
||||
// passwordless authentication.
|
||||
//
|
||||
// If authentication is unsuccessful, an error is returned with the body of the
|
||||
// error containing the server's response.
|
||||
//
|
||||
// If authentication is successful, this method sets the Token value on the
|
||||
// Client and returns the Token as a string.
|
||||
func (c *Client) Login(username, password string) (string, error) {
|
||||
log.Printf("[INFO] logging in user %s", username)
|
||||
|
||||
if len(username) == 0 {
|
||||
return "", fmt.Errorf("client: missing username")
|
||||
}
|
||||
|
||||
if len(password) == 0 {
|
||||
return "", fmt.Errorf("client: missing password")
|
||||
}
|
||||
|
||||
// Make a request
|
||||
request, err := c.Request("POST", "/api/v1/authenticate", &RequestOptions{
|
||||
Body: strings.NewReader(url.Values{
|
||||
"user[login]": []string{username},
|
||||
"user[password]": []string{password},
|
||||
"user[description]": []string{"Created by the Atlas Go Client"},
|
||||
}.Encode()),
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Make the request
|
||||
response, err := checkResp(c.HTTPClient.Do(request))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Decode the body
|
||||
var tResponse struct{ Token string }
|
||||
if err := decodeJSON(response, &tResponse); err != nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Set the token
|
||||
log.Printf("[DEBUG] setting atlas token (%s)", maskString(tResponse.Token))
|
||||
c.Token = tResponse.Token
|
||||
|
||||
// Return the token
|
||||
return c.Token, nil
|
||||
}
|
||||
|
||||
// Verify verifies that authentication and communication with Atlas
|
||||
// is properly functioning.
|
||||
func (c *Client) Verify() error {
|
||||
log.Printf("[INFO] verifying authentication")
|
||||
|
||||
request, err := c.Request("GET", "/api/v1/authenticate", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = checkResp(c.HTTPClient.Do(request))
|
||||
return err
|
||||
}
|
||||
|
||||
// maskString masks all but the first few characters of a string for display
|
||||
// output. This is useful for tokens so we can display them to the user without
|
||||
// showing the full output.
|
||||
func maskString(s string) string {
|
||||
if len(s) <= 3 {
|
||||
return "*** (masked)"
|
||||
}
|
||||
|
||||
return s[0:3] + "*** (masked)"
|
||||
}
|
|
@ -1,193 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
// bcWrapper is the API wrapper since the server wraps the resulting object.
|
||||
type bcWrapper struct {
|
||||
BuildConfig *BuildConfig `json:"build_configuration"`
|
||||
}
|
||||
|
||||
// Atlas expects a list of key/value vars
|
||||
type BuildVar struct {
|
||||
Key string `json:"key"`
|
||||
Value string `json:"value"`
|
||||
Sensitive bool `json:"sensitive"`
|
||||
}
|
||||
type BuildVars []BuildVar
|
||||
|
||||
// BuildConfig represents a Packer build configuration.
|
||||
type BuildConfig struct {
|
||||
// User is the namespace under which the build config lives
|
||||
User string `json:"username"`
|
||||
|
||||
// Name is the actual name of the build config, unique in the scope
|
||||
// of the username.
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Slug returns the slug format for this BuildConfig (User/Name)
|
||||
func (b *BuildConfig) Slug() string {
|
||||
return fmt.Sprintf("%s/%s", b.User, b.Name)
|
||||
}
|
||||
|
||||
// BuildConfigVersion represents a single uploaded (or uploadable) version
|
||||
// of a build configuration.
|
||||
type BuildConfigVersion struct {
|
||||
// The fields below are the username/name combo to uniquely identify
|
||||
// a build config.
|
||||
User string `json:"username"`
|
||||
Name string `json:"name"`
|
||||
|
||||
// Builds is the list of builds that this version supports.
|
||||
Builds []BuildConfigBuild
|
||||
}
|
||||
|
||||
// Slug returns the slug format for this BuildConfigVersion (User/Name)
|
||||
func (bv *BuildConfigVersion) Slug() string {
|
||||
return fmt.Sprintf("%s/%s", bv.User, bv.Name)
|
||||
}
|
||||
|
||||
// BuildConfigBuild is a single build that is present in an uploaded
|
||||
// build configuration.
|
||||
type BuildConfigBuild struct {
|
||||
// Name is a unique name for this build
|
||||
Name string `json:"name"`
|
||||
|
||||
// Type is the type of builder that this build needs to run on,
|
||||
// such as "amazon-ebs" or "qemu".
|
||||
Type string `json:"type"`
|
||||
|
||||
// Artifact is true if this build results in one or more artifacts
|
||||
// being sent to Atlas
|
||||
Artifact bool `json:"artifact"`
|
||||
}
|
||||
|
||||
// BuildConfig gets a single build configuration by user and name.
|
||||
func (c *Client) BuildConfig(user, name string) (*BuildConfig, error) {
|
||||
log.Printf("[INFO] getting build configuration %s/%s", user, name)
|
||||
|
||||
endpoint := fmt.Sprintf("/api/v1/packer/build-configurations/%s/%s", user, name)
|
||||
request, err := c.Request("GET", endpoint, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := checkResp(c.HTTPClient.Do(request))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var bc BuildConfig
|
||||
if err := decodeJSON(response, &bc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &bc, nil
|
||||
}
|
||||
|
||||
// CreateBuildConfig creates a new build configuration.
|
||||
func (c *Client) CreateBuildConfig(user, name string) (*BuildConfig, error) {
|
||||
log.Printf("[INFO] creating build configuration %s/%s", user, name)
|
||||
|
||||
endpoint := "/api/v1/packer/build-configurations"
|
||||
body, err := json.Marshal(&bcWrapper{
|
||||
BuildConfig: &BuildConfig{
|
||||
User: user,
|
||||
Name: name,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request, err := c.Request("POST", endpoint, &RequestOptions{
|
||||
Body: bytes.NewReader(body),
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := checkResp(c.HTTPClient.Do(request))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var bc BuildConfig
|
||||
if err := decodeJSON(response, &bc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &bc, nil
|
||||
}
|
||||
|
||||
// UploadBuildConfigVersion creates a single build configuration version
|
||||
// and uploads the template associated with it.
|
||||
//
|
||||
// Actual API: "Create Build Config Version"
|
||||
func (c *Client) UploadBuildConfigVersion(v *BuildConfigVersion, metadata map[string]interface{},
|
||||
vars BuildVars, data io.Reader, size int64) error {
|
||||
|
||||
log.Printf("[INFO] uploading build configuration version %s (%d bytes), with metadata %q",
|
||||
v.Slug(), size, metadata)
|
||||
|
||||
endpoint := fmt.Sprintf("/api/v1/packer/build-configurations/%s/%s/versions",
|
||||
v.User, v.Name)
|
||||
|
||||
var bodyData bcCreateWrapper
|
||||
bodyData.Version.Builds = v.Builds
|
||||
bodyData.Version.Metadata = metadata
|
||||
bodyData.Version.Vars = vars
|
||||
body, err := json.Marshal(bodyData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
request, err := c.Request("POST", endpoint, &RequestOptions{
|
||||
Body: bytes.NewReader(body),
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
response, err := checkResp(c.HTTPClient.Do(request))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var bv bcCreate
|
||||
if err := decodeJSON(response, &bv); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.putFile(bv.UploadPath, data, size); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// bcCreate is the struct returned when creating a build configuration.
|
||||
type bcCreate struct {
|
||||
UploadPath string `json:"upload_path"`
|
||||
}
|
||||
|
||||
// bcCreateWrapper is the wrapper for creating a build config.
|
||||
type bcCreateWrapper struct {
|
||||
Version struct {
|
||||
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||||
Builds []BuildConfigBuild `json:"builds"`
|
||||
Vars BuildVars `json:"packer_vars,omitempty"`
|
||||
} `json:"version"`
|
||||
}
|
|
@ -1,339 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-rootcerts"
|
||||
)
|
||||
|
||||
const (
|
||||
// atlasDefaultEndpoint is the default base URL for connecting to Atlas.
|
||||
atlasDefaultEndpoint = "https://atlas.hashicorp.com"
|
||||
|
||||
// atlasEndpointEnvVar is the environment variable that overrrides the
|
||||
// default Atlas address.
|
||||
atlasEndpointEnvVar = "ATLAS_ADDRESS"
|
||||
|
||||
// atlasCAFileEnvVar is the environment variable that causes the client to
|
||||
// load trusted certs from a file
|
||||
atlasCAFileEnvVar = "ATLAS_CAFILE"
|
||||
|
||||
// atlasCAPathEnvVar is the environment variable that causes the client to
|
||||
// load trusted certs from a directory
|
||||
atlasCAPathEnvVar = "ATLAS_CAPATH"
|
||||
|
||||
// atlasTLSNoVerifyEnvVar disables TLS verification, similar to curl -k
|
||||
// This defaults to false (verify) and will change to true (skip
|
||||
// verification) with any non-empty value
|
||||
atlasTLSNoVerifyEnvVar = "ATLAS_TLS_NOVERIFY"
|
||||
|
||||
// atlasTokenHeader is the header key used for authenticating with Atlas
|
||||
atlasTokenHeader = "X-Atlas-Token"
|
||||
)
|
||||
|
||||
var projectURL = "https://github.com/hashicorp/atlas-go"
|
||||
var userAgent = fmt.Sprintf("AtlasGo/1.0 (+%s; %s)",
|
||||
projectURL, runtime.Version())
|
||||
|
||||
// ErrAuth is the error returned if a 401 is returned by an API request.
|
||||
var ErrAuth = fmt.Errorf("authentication failed")
|
||||
|
||||
// ErrNotFound is the error returned if a 404 is returned by an API request.
|
||||
var ErrNotFound = fmt.Errorf("resource not found")
|
||||
|
||||
// RailsError represents an error that was returned from the Rails server.
|
||||
type RailsError struct {
|
||||
Errors []string `json:"errors"`
|
||||
}
|
||||
|
||||
// Error collects all of the errors in the RailsError and returns a comma-
|
||||
// separated list of the errors that were returned from the server.
|
||||
func (re *RailsError) Error() string {
|
||||
return strings.Join(re.Errors, ", ")
|
||||
}
|
||||
|
||||
// Client represents a single connection to a Atlas API endpoint.
|
||||
type Client struct {
|
||||
// URL is the full endpoint address to the Atlas server including the
|
||||
// protocol, port, and path.
|
||||
URL *url.URL
|
||||
|
||||
// Token is the Atlas authentication token
|
||||
Token string
|
||||
|
||||
// HTTPClient is the underlying http client with which to make requests.
|
||||
HTTPClient *http.Client
|
||||
|
||||
// DefaultHeaders is a set of headers that will be added to every request.
|
||||
// This minimally includes the atlas user-agent string.
|
||||
DefaultHeader http.Header
|
||||
}
|
||||
|
||||
// DefaultClient returns a client that connects to the Atlas API.
|
||||
func DefaultClient() *Client {
|
||||
atlasEndpoint := os.Getenv(atlasEndpointEnvVar)
|
||||
if atlasEndpoint == "" {
|
||||
atlasEndpoint = atlasDefaultEndpoint
|
||||
}
|
||||
|
||||
client, err := NewClient(atlasEndpoint)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
// NewClient creates a new Atlas Client from the given URL (as a string). If
|
||||
// the URL cannot be parsed, an error is returned. The HTTPClient is set to
|
||||
// an empty http.Client, but this can be changed programmatically by setting
|
||||
// client.HTTPClient. The user can also programmatically set the URL as a
|
||||
// *url.URL.
|
||||
func NewClient(urlString string) (*Client, error) {
|
||||
if len(urlString) == 0 {
|
||||
return nil, fmt.Errorf("client: missing url")
|
||||
}
|
||||
|
||||
parsedURL, err := url.Parse(urlString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
token := os.Getenv("ATLAS_TOKEN")
|
||||
if token != "" {
|
||||
log.Printf("[DEBUG] using ATLAS_TOKEN (%s)", maskString(token))
|
||||
}
|
||||
|
||||
client := &Client{
|
||||
URL: parsedURL,
|
||||
Token: token,
|
||||
DefaultHeader: make(http.Header),
|
||||
}
|
||||
|
||||
client.DefaultHeader.Set("User-Agent", userAgent)
|
||||
|
||||
if err := client.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// init() sets defaults on the client.
|
||||
func (c *Client) init() error {
|
||||
c.HTTPClient = cleanhttp.DefaultClient()
|
||||
tlsConfig := &tls.Config{}
|
||||
if os.Getenv(atlasTLSNoVerifyEnvVar) != "" {
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
}
|
||||
err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{
|
||||
CAFile: os.Getenv(atlasCAFileEnvVar),
|
||||
CAPath: os.Getenv(atlasCAPathEnvVar),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t := cleanhttp.DefaultTransport()
|
||||
t.TLSClientConfig = tlsConfig
|
||||
c.HTTPClient.Transport = t
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestOptions is the list of options to pass to the request.
|
||||
type RequestOptions struct {
|
||||
// Params is a map of key-value pairs that will be added to the Request.
|
||||
Params map[string]string
|
||||
|
||||
// Headers is a map of key-value pairs that will be added to the Request.
|
||||
Headers map[string]string
|
||||
|
||||
// Body is an io.Reader object that will be streamed or uploaded with the
|
||||
// Request. BodyLength is the final size of the Body.
|
||||
Body io.Reader
|
||||
BodyLength int64
|
||||
}
|
||||
|
||||
// Request creates a new HTTP request using the given verb and sub path.
|
||||
func (c *Client) Request(verb, spath string, ro *RequestOptions) (*http.Request, error) {
|
||||
log.Printf("[INFO] request: %s %s", verb, spath)
|
||||
|
||||
// Ensure we have a RequestOptions struct (passing nil is an acceptable)
|
||||
if ro == nil {
|
||||
ro = new(RequestOptions)
|
||||
}
|
||||
|
||||
// Create a new URL with the appended path
|
||||
u := *c.URL
|
||||
u.Path = path.Join(c.URL.Path, spath)
|
||||
|
||||
// Add the token and other params
|
||||
if c.Token != "" {
|
||||
log.Printf("[DEBUG] request: appending token (%s)", maskString(c.Token))
|
||||
if ro.Headers == nil {
|
||||
ro.Headers = make(map[string]string)
|
||||
}
|
||||
|
||||
ro.Headers[atlasTokenHeader] = c.Token
|
||||
}
|
||||
|
||||
return c.rawRequest(verb, &u, ro)
|
||||
}
|
||||
|
||||
func (c *Client) putFile(rawURL string, r io.Reader, size int64) error {
|
||||
log.Printf("[INFO] putting file: %s", rawURL)
|
||||
|
||||
url, err := url.Parse(rawURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
request, err := c.rawRequest("PUT", url, &RequestOptions{
|
||||
Body: r,
|
||||
BodyLength: size,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := checkResp(c.HTTPClient.Do(request)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// rawRequest accepts a verb, URL, and RequestOptions struct and returns the
|
||||
// constructed http.Request and any errors that occurred
|
||||
func (c *Client) rawRequest(verb string, u *url.URL, ro *RequestOptions) (*http.Request, error) {
|
||||
if verb == "" {
|
||||
return nil, fmt.Errorf("client: missing verb")
|
||||
}
|
||||
|
||||
if u == nil {
|
||||
return nil, fmt.Errorf("client: missing URL.url")
|
||||
}
|
||||
|
||||
if ro == nil {
|
||||
return nil, fmt.Errorf("client: missing RequestOptions")
|
||||
}
|
||||
|
||||
// Add the token and other params
|
||||
var params = make(url.Values)
|
||||
for k, v := range ro.Params {
|
||||
params.Add(k, v)
|
||||
}
|
||||
u.RawQuery = params.Encode()
|
||||
|
||||
// Create the request object
|
||||
request, err := http.NewRequest(verb, u.String(), ro.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set our default headers first
|
||||
for k, v := range c.DefaultHeader {
|
||||
request.Header[k] = v
|
||||
}
|
||||
|
||||
// Add any request headers (auth will be here if set)
|
||||
for k, v := range ro.Headers {
|
||||
request.Header.Add(k, v)
|
||||
}
|
||||
|
||||
// Add content-length if we have it
|
||||
if ro.BodyLength > 0 {
|
||||
request.ContentLength = ro.BodyLength
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] raw request: %#v", request)
|
||||
|
||||
return request, nil
|
||||
}
|
||||
|
||||
// checkResp wraps http.Client.Do() and verifies that the request was
|
||||
// successful. A non-200 request returns an error formatted to included any
|
||||
// validation problems or otherwise.
|
||||
func checkResp(resp *http.Response, err error) (*http.Response, error) {
|
||||
// If the err is already there, there was an error higher up the chain, so
|
||||
// just return that
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
log.Printf("[INFO] response: %d (%s)", resp.StatusCode, resp.Status)
|
||||
var buf bytes.Buffer
|
||||
if _, err := io.Copy(&buf, resp.Body); err != nil {
|
||||
log.Printf("[ERR] response: error copying response body")
|
||||
} else {
|
||||
log.Printf("[DEBUG] response: %s", buf.String())
|
||||
|
||||
// We are going to reset the response body, so we need to close the old
|
||||
// one or else it will leak.
|
||||
resp.Body.Close()
|
||||
resp.Body = &bytesReadCloser{&buf}
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case 200:
|
||||
return resp, nil
|
||||
case 201:
|
||||
return resp, nil
|
||||
case 202:
|
||||
return resp, nil
|
||||
case 204:
|
||||
return resp, nil
|
||||
case 400:
|
||||
return nil, parseErr(resp)
|
||||
case 401:
|
||||
return nil, ErrAuth
|
||||
case 404:
|
||||
return nil, ErrNotFound
|
||||
case 422:
|
||||
return nil, parseErr(resp)
|
||||
default:
|
||||
return nil, fmt.Errorf("client: %s", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
// parseErr is used to take an error JSON response and return a single string
|
||||
// for use in error messages.
|
||||
func parseErr(r *http.Response) error {
|
||||
re := &RailsError{}
|
||||
|
||||
if err := decodeJSON(r, &re); err != nil {
|
||||
return fmt.Errorf("error decoding JSON body: %s", err)
|
||||
}
|
||||
|
||||
return re
|
||||
}
|
||||
|
||||
// decodeJSON is used to JSON decode a body into an interface.
|
||||
func decodeJSON(resp *http.Response, out interface{}) error {
|
||||
defer resp.Body.Close()
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
return dec.Decode(out)
|
||||
}
|
||||
|
||||
// bytesReadCloser is a simple wrapper around a bytes buffer that implements
|
||||
// Close as a noop.
|
||||
type bytesReadCloser struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (nrc *bytesReadCloser) Close() error {
|
||||
// we don't actually have to do anything here, since the buffer is just some
|
||||
// data in memory and the error is initialized to no-error
|
||||
return nil
|
||||
}
|
|
@ -1,106 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
// TerraformConfigVersion represents a single uploaded version of a
|
||||
// Terraform configuration.
|
||||
type TerraformConfigVersion struct {
|
||||
Version int
|
||||
Remotes []string `json:"remotes"`
|
||||
Metadata map[string]string `json:"metadata"`
|
||||
Variables map[string]string `json:"variables,omitempty"`
|
||||
TFVars []TFVar `json:"tf_vars"`
|
||||
}
|
||||
|
||||
// TFVar is used to serialize a single Terraform variable sent by the
|
||||
// manager as a collection of Variables in a Job payload.
|
||||
type TFVar struct {
|
||||
Key string `json:"key"`
|
||||
Value string `json:"value"`
|
||||
IsHCL bool `json:"hcl"`
|
||||
}
|
||||
|
||||
// TerraformConfigLatest returns the latest Terraform configuration version.
|
||||
func (c *Client) TerraformConfigLatest(user, name string) (*TerraformConfigVersion, error) {
|
||||
log.Printf("[INFO] getting terraform configuration %s/%s", user, name)
|
||||
|
||||
endpoint := fmt.Sprintf("/api/v1/terraform/configurations/%s/%s/versions/latest", user, name)
|
||||
request, err := c.Request("GET", endpoint, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := checkResp(c.HTTPClient.Do(request))
|
||||
if err == ErrNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var wrapper tfConfigVersionWrapper
|
||||
if err := decodeJSON(response, &wrapper); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return wrapper.Version, nil
|
||||
}
|
||||
|
||||
// CreateTerraformConfigVersion creatse a new Terraform configuration
|
||||
// versions and uploads a slug with it.
|
||||
func (c *Client) CreateTerraformConfigVersion(
|
||||
user string, name string,
|
||||
version *TerraformConfigVersion,
|
||||
data io.Reader, size int64) (int, error) {
|
||||
log.Printf("[INFO] creating terraform configuration %s/%s", user, name)
|
||||
|
||||
endpoint := fmt.Sprintf(
|
||||
"/api/v1/terraform/configurations/%s/%s/versions", user, name)
|
||||
body, err := json.Marshal(&tfConfigVersionWrapper{
|
||||
Version: version,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
request, err := c.Request("POST", endpoint, &RequestOptions{
|
||||
Body: bytes.NewReader(body),
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
response, err := checkResp(c.HTTPClient.Do(request))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var result tfConfigVersionCreate
|
||||
if err := decodeJSON(response, &result); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := c.putFile(result.UploadPath, data, size); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return result.Version, nil
|
||||
}
|
||||
|
||||
type tfConfigVersionCreate struct {
|
||||
UploadPath string `json:"upload_path"`
|
||||
Version int
|
||||
}
|
||||
|
||||
type tfConfigVersionWrapper struct {
|
||||
Version *TerraformConfigVersion `json:"version"`
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
package atlas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseSlug parses a slug of the format (x/y) into the x and y components. It
|
||||
// accepts a string of the format "x/y" ("user/name" for example). If an empty
|
||||
// string is given, an error is returned. If the given string is not a valid
|
||||
// slug format, an error is returned.
|
||||
func ParseSlug(slug string) (string, string, error) {
|
||||
if slug == "" {
|
||||
return "", "", fmt.Errorf("missing slug")
|
||||
}
|
||||
|
||||
parts := strings.Split(slug, "/")
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("malformed slug %q", slug)
|
||||
}
|
||||
return parts[0], parts[1], nil
|
||||
}
|
|
@ -827,20 +827,6 @@
|
|||
"revision": "a91eba7f97777409bc2c443f5534d41dd20c5720",
|
||||
"revisionTime": "2017-03-19T17:27:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "izBSRxLAHN+a/XpAku0in05UzlY=",
|
||||
"comment": "20141209094003-92-g95fa852",
|
||||
"path": "github.com/hashicorp/atlas-go/archive",
|
||||
"revision": "17522f63497eefcffc90d528ca1eeaded2b529d3",
|
||||
"revisionTime": "2017-08-08T16:18:53Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "IR7S+SOsSUnPnLxgRrfemXfCqNM=",
|
||||
"comment": "20141209094003-92-g95fa852",
|
||||
"path": "github.com/hashicorp/atlas-go/v1",
|
||||
"revision": "0885342d5643b7a412026596f2f3ebb3c9b4c190",
|
||||
"revisionTime": "2017-06-08T19:44:05Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "cdOCt0Yb+hdErz8NAQqayxPmRsY=",
|
||||
"path": "github.com/hashicorp/errwrap",
|
||||
|
|
|
@ -34,7 +34,6 @@ still distributed with Packer.
|
|||
|
||||
- Amazon Import
|
||||
- Artifice
|
||||
- Atlas
|
||||
- Docker
|
||||
- Local Shell
|
||||
- Manifest
|
||||
|
|
|
@ -117,7 +117,7 @@ For example, assume a tab is typed at the end of each prompt line:
|
|||
|
||||
```
|
||||
$ packer p
|
||||
plugin push
|
||||
$ packer push -
|
||||
-name -sensitive -token -var -var-file
|
||||
plugin build
|
||||
$ packer build -
|
||||
-color -debug -except -force -machine-readable -on-error -only -parallel -var -var-file
|
||||
```
|
||||
|
|
|
@ -1,114 +0,0 @@
|
|||
---
|
||||
description: |
|
||||
The `packer push` command uploads a template and other required files to the
|
||||
Atlas build service, which will run your packer build for you.
|
||||
layout: docs
|
||||
page_title: 'packer push - Commands'
|
||||
sidebar_current: 'docs-commands-push'
|
||||
---
|
||||
|
||||
# `push` Command
|
||||
|
||||
!> The Packer and Artifact Registry features of Atlas will no longer be
|
||||
actively developed or maintained and will be fully decommissioned.
|
||||
Please see our [guide on building immutable infrastructure with
|
||||
Packer on CI/CD](/guides/packer-on-cicd/) for ideas on implementing these
|
||||
features yourself.
|
||||
|
||||
The `packer push` command uploads a template and other required files to the
|
||||
Atlas service, which will run your packer build for you. [Learn more about
|
||||
Packer in Atlas.](https://atlas.hashicorp.com/help/packer/features)
|
||||
|
||||
Running builds remotely makes it easier to iterate on packer builds that are not
|
||||
supported on your operating system, for example, building docker or QEMU while
|
||||
developing on Mac or Windows. Also, the hard work of building VMs is offloaded
|
||||
to dedicated servers with more CPU, memory, and network resources.
|
||||
|
||||
When you use push to run a build in Atlas, you may also want to store your build
|
||||
artifacts in Atlas. In order to do that you will also need to configure the
|
||||
[Atlas post-processor](/docs/post-processors/atlas.html). This is optional, and
|
||||
both the post-processor and push commands can be used independently.
|
||||
|
||||
~> The push command uploads your template and other files, like provisioning
|
||||
scripts, to Atlas. Take care not to upload files that you don't intend to, like
|
||||
secrets or large binaries. **If you have secrets in your Packer template, you
|
||||
should [move them into environment
|
||||
variables](https://www.packer.io/docs/templates/user-variables.html).**
|
||||
|
||||
Most push behavior is [configured in your packer
|
||||
template](/docs/templates/push.html). You can override or supplement your
|
||||
configuration using the options below.
|
||||
|
||||
## Options
|
||||
|
||||
- `-token` - Your access token for the Atlas API. Login to Atlas to [generate an
|
||||
Atlas Token](https://atlas.hashicorp.com/settings/tokens). The most convenient
|
||||
way to configure your token is to set it to the `ATLAS_TOKEN` environment
|
||||
variable, but you can also use `-token` on the command line.
|
||||
|
||||
- `-name` - The name of the build in the service. This typically looks like
|
||||
`hashicorp/precise64`, which follows the form `<username>/<buildname>`. This
|
||||
must be specified here or in your template.
|
||||
|
||||
- `-sensitive` - A comma-separated list of variables that should be marked as
|
||||
sensitive in the Terraform Enterprise ui. These variables' keys will be
|
||||
visible, but their values will be redacted. example usage:
|
||||
`-var 'supersecretpassword=mypassword' -sensitive=supersecretpassword1`
|
||||
|
||||
- `-var` - Set a variable in your packer template. This option can be used
|
||||
multiple times. This is useful for setting version numbers for your build.
|
||||
|
||||
- `-var-file` - Set template variables from a file.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `ATLAS_CAFILE` (path) - This should be a path to an X.509 PEM-encoded public
|
||||
key. If specified, this will be used to validate the certificate authority
|
||||
that signed certificates used by an Atlas installation.
|
||||
|
||||
- `ATLAS_CAPATH` - This should be a path which contains an X.509 PEM-encoded
|
||||
public key file. If specified, this will be used to validate the certificate
|
||||
authority that signed certificates used by an Atlas installation.
|
||||
|
||||
## Examples
|
||||
|
||||
Push a Packer template:
|
||||
|
||||
``` shell
|
||||
$ packer push template.json
|
||||
```
|
||||
|
||||
Push a Packer template with a custom token:
|
||||
|
||||
``` shell
|
||||
$ packer push -token ABCD1234 template.json
|
||||
```
|
||||
|
||||
## Limits
|
||||
|
||||
`push` is limited to 5gb upload when pushing to Atlas. To be clear, packer *can*
|
||||
build artifacts larger than 5gb, and Atlas *can* store artifacts larger than
|
||||
5gb. However, the initial payload you push to *start* the build cannot exceed
|
||||
5gb. If your boot ISO is larger than 5gb (for example if you are building OSX
|
||||
images), you will need to put your boot ISO in an external web service and
|
||||
download it during the packer run.
|
||||
|
||||
## Building Private `.iso` and `.dmg` Files
|
||||
|
||||
If you want to build a private `.iso` file you can upload the `.iso` to a secure
|
||||
file hosting service like [Amazon
|
||||
S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/ShareObjectPreSignedURL.html),
|
||||
[Google Cloud
|
||||
Storage](https://cloud.google.com/storage/docs/gsutil/commands/signurl), or
|
||||
[Azure File
|
||||
Service](https://msdn.microsoft.com/en-us/library/azure/dn194274.aspx) and
|
||||
download it at build time using a signed URL. You should convert `.dmg` files to
|
||||
`.iso` and follow a similar procedure.
|
||||
|
||||
Once you have added [variables in your packer
|
||||
template](/docs/templates/user-variables.html) you can specify credentials or
|
||||
signed URLs using Atlas environment variables, or via the `-var` flag when you
|
||||
run `push`.
|
||||
|
||||
![Configure your signed URL in the Atlas build variables
|
||||
menu](/assets/images/packer-signed-urls.png)
|
|
@ -25,8 +25,7 @@ After overriding the artifact with artifice, you can use it with other
|
|||
post-processors like
|
||||
[compress](https://www.packer.io/docs/post-processors/compress.html),
|
||||
[docker-push](https://www.packer.io/docs/post-processors/docker-push.html),
|
||||
[Atlas](https://www.packer.io/docs/post-processors/atlas.html), or a third-party
|
||||
post-processor.
|
||||
or a third-party post-processor.
|
||||
|
||||
Artifice allows you to use the familiar packer workflow to create a fresh,
|
||||
stateless build environment for each build on the infrastructure of your
|
||||
|
@ -42,7 +41,7 @@ Artifice helps you tie together a few other packer features:
|
|||
- A file provisioner, which downloads the artifact from the VM
|
||||
- The artifice post-processor, which identifies which files have been
|
||||
downloaded from the VM
|
||||
- Additional post-processors, which push the artifact to Atlas, Docker
|
||||
- Additional post-processors, which push the artifact to Docker
|
||||
hub, etc.
|
||||
|
||||
You will want to perform as much work as possible inside the VM. Ideally the
|
||||
|
@ -68,7 +67,7 @@ This minimal example:
|
|||
2. Installs a [consul](https://www.consul.io/) release
|
||||
3. Downloads the consul binary
|
||||
4. Packages it into a `.tar.gz` file
|
||||
5. Uploads it to Atlas.
|
||||
5. Uploads it to S3.
|
||||
|
||||
VMX is a fast way to build and test locally, but you can easily substitute
|
||||
another builder.
|
||||
|
@ -113,9 +112,8 @@ another builder.
|
|||
"output": "consul-0.5.2.tar.gz"
|
||||
},
|
||||
{
|
||||
"type":"atlas",
|
||||
"artifact": "hashicorp/consul",
|
||||
"artifact_type": "archive"
|
||||
"type": "shell-local",
|
||||
"inline": [ "/usr/local/bin/aws s3 cp consul-0.5.2.tar.gz s3://<s3 path>" ]
|
||||
}
|
||||
]
|
||||
]
|
||||
|
@ -137,7 +135,7 @@ artifact (the vmx file in this case) and it will not have the desired result.
|
|||
"files": ["consul"]
|
||||
},
|
||||
{
|
||||
"type": "atlas",
|
||||
"type": "compress",
|
||||
...
|
||||
}
|
||||
], // <--- End post-processor chain
|
||||
|
|
|
@ -1,147 +0,0 @@
|
|||
---
|
||||
description: |
|
||||
The Atlas post-processor for Packer receives an artifact from a Packer build
|
||||
and uploads it to Atlas. Atlas hosts and serves artifacts, allowing you to
|
||||
version and distribute them in a simple way.
|
||||
layout: docs
|
||||
page_title: 'Atlas - Post-Processor'
|
||||
sidebar_current: 'docs-post-processors-atlas'
|
||||
---
|
||||
|
||||
# Atlas Post-Processor
|
||||
|
||||
!> The Packer and Artifact Registry features of Atlas will no longer be
|
||||
actively developed or maintained and will be fully decommissioned.
|
||||
Please see our [guide on building immutable infrastructure with
|
||||
Packer on CI/CD](/guides/packer-on-cicd/) for ideas on implementing these
|
||||
features yourself.
|
||||
|
||||
Type: `atlas`
|
||||
|
||||
The Atlas post-processor uploads artifacts from your packer builds to Atlas for
|
||||
hosting. Artifacts hosted in Atlas are automatically made available for use
|
||||
with Terraform, and Atlas provides additional features for managing
|
||||
versions and releases. [Learn more about packer in
|
||||
Atlas.](https://atlas.hashicorp.com/help/packer/features)
|
||||
|
||||
You can also use the push command to [run packer builds in
|
||||
Atlas](/docs/commands/push.html). The push command and Atlas post-processor
|
||||
can be used together or independently.
|
||||
|
||||
~> If you'd like to publish a Vagrant box to [Vagrant Cloud](https://vagrantcloud.com), you must use the [`vagrant-cloud`](/docs/post-processors/vagrant-cloud.html) post-processor.
|
||||
|
||||
## Workflow
|
||||
|
||||
To take full advantage of Packer and Atlas, it's important to understand the
|
||||
workflow for creating artifacts with Packer and storing them in Atlas using this
|
||||
post-processor. The goal of the Atlas post-processor is to streamline the
|
||||
distribution of public or private artifacts by hosting them in a central
|
||||
location in Atlas.
|
||||
|
||||
Here is an example workflow:
|
||||
|
||||
1. Packer builds an AMI with the [Amazon AMI
|
||||
builder](/docs/builders/amazon.html)
|
||||
2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas.
|
||||
The `atlas` post-processor is configured with the name of the AMI, for
|
||||
example `hashicorp/foobar`, to create the artifact in Atlas or update the
|
||||
version if the artifact already exists
|
||||
3. The new version is ready and available to be used in deployments with a
|
||||
tool like [Terraform](https://www.terraform.io)
|
||||
|
||||
## Configuration
|
||||
|
||||
The configuration allows you to specify and access the artifact in Atlas.
|
||||
|
||||
### Required:
|
||||
|
||||
- `artifact` (string) - The shorthand tag for your artifact that maps to
|
||||
Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`.
|
||||
You must have access to the organization—hashicorp in this example—in order
|
||||
to add an artifact to the organization in Atlas.
|
||||
|
||||
- `artifact_type` (string) - For uploading artifacts to Atlas.
|
||||
`artifact_type` can be set to any unique identifier, however, the following
|
||||
are recommended for consistency - `amazon.image`, `azure.image`,
|
||||
`cloudstack.image`, `digitalocean.image`, `docker.image`,
|
||||
`googlecompute.image`, `hyperv.image`, `oneandone.image`,
|
||||
`openstack.image`, `parallels.image`, `profitbricks.image`, `qemu.image`,
|
||||
`triton.image`, `virtualbox.image`, `vmware.image`, and `custom.image`.
|
||||
|
||||
### Optional:
|
||||
|
||||
- `token` (string) - Your access token for the Atlas API.
|
||||
|
||||
-> Login to Atlas to [generate an Atlas
|
||||
Token](https://atlas.hashicorp.com/settings/tokens). The most convenient way to
|
||||
configure your token is to set it to the `ATLAS_TOKEN` environment variable, but
|
||||
you can also use `token` configuration option.
|
||||
|
||||
- `atlas_url` (string) - Override the base URL for Atlas. This is useful if
|
||||
you're using Atlas Enterprise in your own network. Defaults to
|
||||
`https://atlas.hashicorp.com/api/v1`.
|
||||
|
||||
- `metadata` (map) - Send metadata about the artifact.
|
||||
|
||||
- `description` (string) - Inside the metadata blob you can add a information
|
||||
about the uploaded artifact to Atlas. This will be reflected in the box
|
||||
description on Atlas.
|
||||
|
||||
- `provider` (string) - Used by Atlas to help determine, what should be used
|
||||
to run the artifact.
|
||||
|
||||
- `version` (string) - Used by Atlas to give a semantic version to the
|
||||
uploaded artifact.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `ATLAS_CAFILE` (path) - This should be a path to an X.509 PEM-encoded public key. If specified, this will be used to validate the certificate authority that signed certificates used by an Atlas installation.
|
||||
|
||||
- `ATLAS_CAPATH` - This should be a path which contains an X.509 PEM-encoded public key file. If specified, this will be used to validate the certificate authority that signed certificates used by an Atlas installation.
|
||||
|
||||
### Example Configuration
|
||||
|
||||
``` json
|
||||
{
|
||||
"variables": {
|
||||
"aws_access_key": "ACCESS_KEY_HERE",
|
||||
"aws_secret_key": "SECRET_KEY_HERE",
|
||||
"atlas_token": "ATLAS_TOKEN_HERE"
|
||||
},
|
||||
"builders": [
|
||||
{
|
||||
"type": "amazon-ebs",
|
||||
"access_key": "{{user `aws_access_key`}}",
|
||||
"secret_key": "{{user `aws_secret_key`}}",
|
||||
"region": "us-east-1",
|
||||
"source_ami": "ami-fce3c696",
|
||||
"instance_type": "t2.micro",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "atlas-example {{timestamp}}"
|
||||
}
|
||||
],
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell",
|
||||
"inline": [
|
||||
"sleep 30",
|
||||
"sudo apt-get update",
|
||||
"sudo apt-get install apache2 -y"
|
||||
]
|
||||
}
|
||||
],
|
||||
"post-processors": [
|
||||
{
|
||||
"type": "atlas",
|
||||
"token": "{{user `atlas_token`}}",
|
||||
"artifact": "hashicorp/foobar",
|
||||
"artifact_type": "amazon.image",
|
||||
"metadata": {
|
||||
"created_at": "{{timestamp}}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
More information on the correct configuration of the `amazon-ebs` builder in this example can be found in the [amazon-ebs builder documentation](/docs/builders/amazon-ebs.html).
|
|
@ -25,8 +25,7 @@ post-processors like
|
|||
[artifice](https://www.packer.io/docs/post-processors/artifice.html),
|
||||
[compress](https://www.packer.io/docs/post-processors/compress.html),
|
||||
[docker-push](https://www.packer.io/docs/post-processors/docker-push.html),
|
||||
[atlas](https://www.packer.io/docs/post-processors/atlas.html), or a third-party
|
||||
post-processor.
|
||||
or a third-party post-processor.
|
||||
|
||||
## Basic example
|
||||
|
||||
|
|
|
@ -15,8 +15,9 @@ Type: `vagrant-cloud`
|
|||
|
||||
The Packer Vagrant Cloud post-processor receives a Vagrant box from the
|
||||
`vagrant` post-processor and pushes it to Vagrant Cloud. [Vagrant
|
||||
Cloud](https://atlas.hashicorp.com) hosts and serves boxes to Vagrant, allowing you
|
||||
to version and distribute boxes to an organization in a simple way.
|
||||
Cloud](https://app.vagrantup.com/boxes/search) hosts and serves boxes to
|
||||
Vagrant, allowing you to version and distribute boxes to an organization in
|
||||
a simple way.
|
||||
|
||||
You'll need to be familiar with Vagrant Cloud, have an upgraded account to
|
||||
enable box hosting, and be distributing your box via the [shorthand
|
||||
|
|
|
@ -98,35 +98,6 @@ It is very important that any post processors that need to be run in order, be s
|
|||
As you may be able to imagine, the **simple** and **detailed** definitions are
|
||||
simply shortcuts for a **sequence** definition of only one element.
|
||||
|
||||
## Creating Vagrant Boxes in Atlas
|
||||
|
||||
It is important to sequence post processors when creating and uploading vagrant boxes to Atlas via Packer. Using a sequence will ensure that the post processors are ran in order and creates the vagrant box prior to uploading the box to Atlas.
|
||||
|
||||
``` json
|
||||
{
|
||||
"post-processors": [
|
||||
[
|
||||
{
|
||||
"type": "vagrant",
|
||||
"keep_input_artifact": false
|
||||
},
|
||||
{
|
||||
"type": "atlas",
|
||||
"only": ["virtualbox-iso"],
|
||||
"artifact": "dundlermifflin/dwight-schrute",
|
||||
"artifact_type": "vagrant.box",
|
||||
"metadata": {
|
||||
"provider": "virtualbox",
|
||||
"version": "0.0.1"
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
More documentation on the Atlas post-processor can be found [here](/docs/post-processors/atlas.html)
|
||||
|
||||
## Input Artifacts
|
||||
|
||||
When using post-processors, the input artifact (coming from a builder or another
|
||||
|
|
|
@ -1,99 +0,0 @@
|
|||
---
|
||||
description: |
|
||||
Within the template, the push section configures how a template can be pushed
|
||||
to a remote build service.
|
||||
layout: docs
|
||||
page_title: 'Push - Templates'
|
||||
sidebar_current: 'docs-templates-push'
|
||||
---
|
||||
|
||||
# Template Push
|
||||
|
||||
!> The Packer and Artifact Registry features of Atlas will no longer be
|
||||
actively developed or maintained and will be fully decommissioned.
|
||||
Please see our [guide on building immutable infrastructure with
|
||||
Packer on CI/CD](/guides/packer-on-cicd/) for ideas on implementing these
|
||||
features yourself.
|
||||
|
||||
Within the template, the push section configures how a template can be
|
||||
[pushed](/docs/commands/push.html) to a remote build service.
|
||||
|
||||
Push configuration is responsible for defining what files are required to build
|
||||
this template, what the name of build configuration is in the build service,
|
||||
etc.
|
||||
|
||||
The only build service that Packer can currently push to is
|
||||
[Atlas](https://atlas.hashicorp.com) by HashiCorp. Support for other build
|
||||
services will come in the form of plugins in the future.
|
||||
|
||||
Within a template, a push configuration section looks like this:
|
||||
|
||||
``` json
|
||||
{
|
||||
"push": {
|
||||
// ... push configuration here
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
There are many configuration options available for the builder. They are
|
||||
segmented below into two categories: required and optional parameters. Within
|
||||
each category, the available configuration keys are alphabetized.
|
||||
|
||||
### Required
|
||||
|
||||
- `name` (string) - Name of the build configuration in the build service. If
|
||||
this doesn't exist, it will be created (by default). Note that the name
|
||||
cannot contain dots. `[a-zA-Z0-9-_/]+` are safe.
|
||||
|
||||
### Optional
|
||||
|
||||
- `address` (string) - The address of the build service to use. By default
|
||||
this is `https://atlas.hashicorp.com`.
|
||||
|
||||
- `base_dir` (string) - The base directory of the files to upload. This will
|
||||
be the current working directory when the build service executes
|
||||
your template. This path is relative to the template.
|
||||
|
||||
- `include` (array of strings) - Glob patterns to include relative to the
|
||||
`base_dir`. If this is specified, only files that match the include pattern
|
||||
are included.
|
||||
|
||||
- `exclude` (array of strings) - Glob patterns to exclude relative to the
|
||||
`base_dir`.
|
||||
|
||||
- `token` (string) - An access token to use to authenticate to the
|
||||
build service.
|
||||
|
||||
- `vcs` (boolean) - If true, Packer will detect your VCS (if there is one) and
|
||||
only upload the files that are tracked by the VCS. This is useful for
|
||||
automatically excluding ignored files. This defaults to false.
|
||||
|
||||
## Examples
|
||||
|
||||
A push configuration section with minimal options:
|
||||
|
||||
``` json
|
||||
{
|
||||
"push": {
|
||||
"name": "hashicorp/precise64"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
A push configuration specifying Packer to inspect the VCS and list individual
|
||||
files to include:
|
||||
|
||||
``` json
|
||||
{
|
||||
"push": {
|
||||
"name": "hashicorp/precise64",
|
||||
"vcs": true,
|
||||
"include": [
|
||||
"other_file/outside_of.vcs"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
|
@ -83,7 +83,6 @@ Available commands are:
|
|||
build build image(s) from template
|
||||
fix fixes templates from old versions of packer
|
||||
inspect see components of a template
|
||||
push push template files to a Packer build service
|
||||
validate check that a template is valid
|
||||
version Prints the Packer version
|
||||
```
|
||||
|
|
|
@ -18,9 +18,6 @@
|
|||
<li<%= sidebar_current("docs-commands-inspect") %>>
|
||||
<a href="/docs/commands/inspect.html"><tt>inspect</tt></a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-commands-push") %>>
|
||||
<a href="/docs/commands/push.html"><tt>push</tt></a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-commands-validate") %>>
|
||||
<a href="/docs/commands/validate.html"><tt>validate</tt></a>
|
||||
</li>
|
||||
|
@ -45,9 +42,6 @@
|
|||
<li<%= sidebar_current("docs-templates-provisioners") %>>
|
||||
<a href="/docs/templates/provisioners.html">Provisioners</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-templates-push") %>>
|
||||
<a href="/docs/templates/push.html">Push</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-templates-user-variables") %>>
|
||||
<a href="/docs/templates/user-variables.html">User Variables</a>
|
||||
</li>
|
||||
|
@ -257,9 +251,6 @@
|
|||
<li<%= sidebar_current("docs-post-processors-artifice") %>>
|
||||
<a href="/docs/post-processors/artifice.html">Artifice</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-post-processors-atlas") %>>
|
||||
<a href="/docs/post-processors/atlas.html">Atlas</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-post-processors-compress") %>>
|
||||
<a href="/docs/post-processors/compress.html">Compress</a>
|
||||
</li>
|
||||
|
|
Loading…
Reference in New Issue