Add DigitalOcean post-processor.
This commit is contained in:
parent
444fcd12c9
commit
c0c5c6afac
|
@ -12,16 +12,16 @@ import (
|
|||
|
||||
type Artifact struct {
|
||||
// The name of the snapshot
|
||||
snapshotName string
|
||||
SnapshotName string
|
||||
|
||||
// The ID of the image
|
||||
snapshotId int
|
||||
SnapshotId int
|
||||
|
||||
// The name of the region
|
||||
regionNames []string
|
||||
RegionNames []string
|
||||
|
||||
// The client for making API calls
|
||||
client *godo.Client
|
||||
Client *godo.Client
|
||||
}
|
||||
|
||||
func (*Artifact) BuilderId() string {
|
||||
|
@ -34,11 +34,11 @@ func (*Artifact) Files() []string {
|
|||
}
|
||||
|
||||
func (a *Artifact) Id() string {
|
||||
return fmt.Sprintf("%s:%s", strings.Join(a.regionNames[:], ","), strconv.FormatUint(uint64(a.snapshotId), 10))
|
||||
return fmt.Sprintf("%s:%s", strings.Join(a.RegionNames[:], ","), strconv.FormatUint(uint64(a.SnapshotId), 10))
|
||||
}
|
||||
|
||||
func (a *Artifact) String() string {
|
||||
return fmt.Sprintf("A snapshot was created: '%v' (ID: %v) in regions '%v'", a.snapshotName, a.snapshotId, strings.Join(a.regionNames[:], ","))
|
||||
return fmt.Sprintf("A snapshot was created: '%v' (ID: %v) in regions '%v'", a.SnapshotName, a.SnapshotId, strings.Join(a.RegionNames[:], ","))
|
||||
}
|
||||
|
||||
func (a *Artifact) State(name string) interface{} {
|
||||
|
@ -46,7 +46,7 @@ func (a *Artifact) State(name string) interface{} {
|
|||
}
|
||||
|
||||
func (a *Artifact) Destroy() error {
|
||||
log.Printf("Destroying image: %d (%s)", a.snapshotId, a.snapshotName)
|
||||
_, err := a.client.Images.Delete(context.TODO(), a.snapshotId)
|
||||
log.Printf("Destroying image: %d (%s)", a.SnapshotId, a.SnapshotName)
|
||||
_, err := a.Client.Images.Delete(context.TODO(), a.SnapshotId)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -113,10 +113,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
}
|
||||
|
||||
artifact := &Artifact{
|
||||
snapshotName: state.Get("snapshot_name").(string),
|
||||
snapshotId: state.Get("snapshot_image_id").(int),
|
||||
regionNames: state.Get("regions").([]string),
|
||||
client: client,
|
||||
SnapshotName: state.Get("snapshot_name").(string),
|
||||
SnapshotId: state.Get("snapshot_image_id").(int),
|
||||
RegionNames: state.Get("regions").([]string),
|
||||
Client: client,
|
||||
}
|
||||
|
||||
return artifact, nil
|
||||
|
|
|
@ -91,7 +91,7 @@ func (s *stepSnapshot) Run(_ context.Context, state multistep.StateBag) multiste
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
ui.Say(fmt.Sprintf("transferring Snapshot ID: %d", imageTransfer.ID))
|
||||
if err := waitForImageState(godo.ActionCompleted, imageTransfer.ID, action.ID,
|
||||
if err := WaitForImageState(godo.ActionCompleted, imageTransfer.ID, action.ID,
|
||||
client, 20*time.Minute); err != nil {
|
||||
// If we get an error the first time, actually report it
|
||||
err := fmt.Errorf("Error waiting for snapshot transfer: %s", err)
|
||||
|
|
|
@ -158,9 +158,9 @@ func waitForActionState(
|
|||
}
|
||||
}
|
||||
|
||||
// waitForImageState simply blocks until the image action is in
|
||||
// WaitForImageState simply blocks until the image action is in
|
||||
// a state we expect, while eventually timing out.
|
||||
func waitForImageState(
|
||||
func WaitForImageState(
|
||||
desiredState string, imageId, actionId int,
|
||||
client *godo.Client, timeout time.Duration) error {
|
||||
done := make(chan struct{})
|
||||
|
|
|
@ -51,6 +51,7 @@ import (
|
|||
artificepostprocessor "github.com/hashicorp/packer/post-processor/artifice"
|
||||
checksumpostprocessor "github.com/hashicorp/packer/post-processor/checksum"
|
||||
compresspostprocessor "github.com/hashicorp/packer/post-processor/compress"
|
||||
digitaloceanimportpostprocessor "github.com/hashicorp/packer/post-processor/digitalocean-import"
|
||||
dockerimportpostprocessor "github.com/hashicorp/packer/post-processor/docker-import"
|
||||
dockerpushpostprocessor "github.com/hashicorp/packer/post-processor/docker-push"
|
||||
dockersavepostprocessor "github.com/hashicorp/packer/post-processor/docker-save"
|
||||
|
@ -142,6 +143,7 @@ var PostProcessors = map[string]packer.PostProcessor{
|
|||
"artifice": new(artificepostprocessor.PostProcessor),
|
||||
"checksum": new(checksumpostprocessor.PostProcessor),
|
||||
"compress": new(compresspostprocessor.PostProcessor),
|
||||
"digitalocean-import": new(digitaloceanimportpostprocessor.PostProcessor),
|
||||
"docker-import": new(dockerimportpostprocessor.PostProcessor),
|
||||
"docker-push": new(dockerpushpostprocessor.PostProcessor),
|
||||
"docker-save": new(dockersavepostprocessor.PostProcessor),
|
||||
|
|
|
@ -0,0 +1,359 @@
|
|||
package digitaloceanimport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"golang.org/x/oauth2"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/digitalocean/godo"
|
||||
|
||||
"github.com/hashicorp/packer/builder/digitalocean"
|
||||
"github.com/hashicorp/packer/common"
|
||||
"github.com/hashicorp/packer/helper/config"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/template/interpolate"
|
||||
)
|
||||
|
||||
const BuilderId = "packer.post-processor.digitalocean-import"
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
|
||||
APIToken string `mapstructure:"api_token"`
|
||||
SpacesKey string `mapstructure:"spaces_key"`
|
||||
SpacesSecret string `mapstructure:"spaces_secret"`
|
||||
|
||||
SpacesRegion string `mapstructure:"spaces_region"`
|
||||
SpaceName string `mapstructure:"space_name"`
|
||||
ObjectName string `mapstructure:"space_object_name"`
|
||||
SkipClean bool `mapstructure:"skip_clean"`
|
||||
Tags []string `mapstructure:"image_tags"`
|
||||
Name string `mapstructure:"image_name"`
|
||||
Description string `mapstructure:"image_description"`
|
||||
Distribution string `mapstructure:"image_distribution"`
|
||||
ImageRegions []string `mapstructure:"image_regions"`
|
||||
Timeout time.Duration `mapstructure:"timeout"`
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
type PostProcessor struct {
|
||||
config Config
|
||||
}
|
||||
|
||||
type apiTokenSource struct {
|
||||
AccessToken string
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
func (t *apiTokenSource) Token() (*oauth2.Token, error) {
|
||||
return &oauth2.Token{
|
||||
AccessToken: t.AccessToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l logger) Log(args ...interface{}) {
|
||||
l.logger.Println(args...)
|
||||
}
|
||||
|
||||
func (p *PostProcessor) Configure(raws ...interface{}) error {
|
||||
err := config.Decode(&p.config, &config.DecodeOpts{
|
||||
Interpolate: true,
|
||||
InterpolateContext: &p.config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{"space_object_name"},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.config.SpacesKey == "" {
|
||||
p.config.SpacesKey = os.Getenv("SPACES_ACCESS_KEY")
|
||||
}
|
||||
|
||||
if p.config.SpacesSecret == "" {
|
||||
p.config.SpacesSecret = os.Getenv("SPACES_SECRET_KEY")
|
||||
}
|
||||
|
||||
if p.config.APIToken == "" {
|
||||
p.config.APIToken = os.Getenv("DIGITALOCEAN_API_TOKEN")
|
||||
}
|
||||
|
||||
if p.config.ObjectName == "" {
|
||||
p.config.ObjectName = "packer-import-{{timestamp}}"
|
||||
}
|
||||
|
||||
if p.config.Distribution == "" {
|
||||
p.config.Distribution = "Unkown"
|
||||
}
|
||||
|
||||
if p.config.Timeout == 0 {
|
||||
p.config.Timeout = 20 * time.Minute
|
||||
}
|
||||
|
||||
errs := new(packer.MultiError)
|
||||
|
||||
if err = interpolate.Validate(p.config.ObjectName, &p.config.ctx); err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error parsing space_object_name template: %s", err))
|
||||
}
|
||||
|
||||
templates := map[string]*string{
|
||||
"api_token": &p.config.APIToken,
|
||||
"spaces_key": &p.config.SpacesKey,
|
||||
"spaces_secret": &p.config.SpacesSecret,
|
||||
"spaces_region": &p.config.SpacesRegion,
|
||||
"space_name": &p.config.SpaceName,
|
||||
"image_name": &p.config.Name,
|
||||
"image_regions": &p.config.ImageRegions[0],
|
||||
}
|
||||
for key, ptr := range templates {
|
||||
if *ptr == "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("%s must be set", key))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs.Errors) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
packer.LogSecretFilter.Set(p.config.SpacesKey, p.config.SpacesSecret, p.config.APIToken)
|
||||
log.Println(p.config)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
|
||||
var err error
|
||||
|
||||
p.config.ObjectName, err = interpolate.Render(p.config.ObjectName, &p.config.ctx)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("Error rendering space_object_name template: %s", err)
|
||||
}
|
||||
log.Printf("Rendered space_object_name as %s", p.config.ObjectName)
|
||||
|
||||
log.Println("Looking for image in artifact")
|
||||
source := ""
|
||||
validSuffix := []string{"raw", "img", "qcow2", "vhdx", "vdi", "vmdk", "bz2", "tar.xz", "tar.gz"}
|
||||
for _, path := range artifact.Files() {
|
||||
for _, suffix := range validSuffix {
|
||||
if strings.HasSuffix(path, suffix) {
|
||||
source = path
|
||||
break
|
||||
}
|
||||
}
|
||||
if source != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
if source == "" {
|
||||
return nil, false, fmt.Errorf("Image file not found")
|
||||
}
|
||||
|
||||
spacesCreds := credentials.NewStaticCredentials(p.config.SpacesKey, p.config.SpacesSecret, "")
|
||||
spacesEndpoint := fmt.Sprintf("https://%s.digitaloceanspaces.com", p.config.SpacesRegion)
|
||||
spacesConfig := &aws.Config{
|
||||
Credentials: spacesCreds,
|
||||
Endpoint: aws.String(spacesEndpoint),
|
||||
Region: aws.String(p.config.SpacesRegion),
|
||||
LogLevel: aws.LogLevel(aws.LogDebugWithSigning),
|
||||
Logger: &logger{
|
||||
logger: log.New(os.Stderr, "", log.LstdFlags),
|
||||
},
|
||||
}
|
||||
sess := session.New(spacesConfig)
|
||||
|
||||
ui.Message(fmt.Sprintf("Uploading %s to spaces://%s/%s", source, p.config.SpaceName, p.config.ObjectName))
|
||||
err = uploadImageToSpaces(source, p, sess)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
ui.Message(fmt.Sprintf("Completed upload of %s to spaces://%s/%s", source, p.config.SpaceName, p.config.ObjectName))
|
||||
|
||||
client := godo.NewClient(oauth2.NewClient(oauth2.NoContext, &apiTokenSource{
|
||||
AccessToken: p.config.APIToken,
|
||||
}))
|
||||
|
||||
ui.Message(fmt.Sprintf("Started import of spaces://%s/%s", p.config.SpaceName, p.config.ObjectName))
|
||||
image, err := importImageFromSpaces(p, client)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("Waiting for import of image %s to complete (may take a while)", p.config.Name))
|
||||
err = waitUntilImageAvailable(client, image.ID, p.config.Timeout)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("Import of image %s failed with error: %s", p.config.Name, err)
|
||||
}
|
||||
ui.Message(fmt.Sprintf("Import of image %s complete", p.config.Name))
|
||||
|
||||
if len(p.config.ImageRegions) > 1 {
|
||||
// Remove the first region from the slice as the image is already there.
|
||||
regions := p.config.ImageRegions
|
||||
regions[0] = regions[len(regions)-1]
|
||||
regions[len(regions)-1] = ""
|
||||
regions = regions[:len(regions)-1]
|
||||
|
||||
ui.Message(fmt.Sprintf("Distributing image %s to additional regions: %v", p.config.Name, regions))
|
||||
err = distributeImageToRegions(client, image.ID, regions, p.config.Timeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Adding created image ID %v to output artifacts", image.ID)
|
||||
artifact = &digitalocean.Artifact{
|
||||
SnapshotName: image.Name,
|
||||
SnapshotId: image.ID,
|
||||
RegionNames: p.config.ImageRegions,
|
||||
Client: client,
|
||||
}
|
||||
|
||||
if !p.config.SkipClean {
|
||||
ui.Message(fmt.Sprintf("Deleting import source spaces://%s/%s", p.config.SpaceName, p.config.ObjectName))
|
||||
err = deleteImageFromSpaces(p, sess)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
|
||||
return artifact, false, nil
|
||||
}
|
||||
|
||||
func uploadImageToSpaces(source string, p *PostProcessor, s *session.Session) (err error) {
|
||||
file, err := os.Open(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to open %s: %s", source, err)
|
||||
}
|
||||
|
||||
uploader := s3manager.NewUploader(s)
|
||||
_, err = uploader.Upload(&s3manager.UploadInput{
|
||||
Body: file,
|
||||
Bucket: &p.config.SpaceName,
|
||||
Key: &p.config.ObjectName,
|
||||
ACL: aws.String("public-read"),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to upload %s: %s", source, err)
|
||||
}
|
||||
|
||||
file.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func importImageFromSpaces(p *PostProcessor, client *godo.Client) (image *godo.Image, err error) {
|
||||
log.Printf("Importing custom image from spaces://%s/%s", p.config.SpaceName, p.config.ObjectName)
|
||||
|
||||
url := fmt.Sprintf("https://%s.%s.digitaloceanspaces.com/%s", p.config.SpaceName, p.config.SpacesRegion, p.config.ObjectName)
|
||||
createRequest := &godo.CustomImageCreateRequest{
|
||||
Name: p.config.Name,
|
||||
Url: url,
|
||||
Region: p.config.ImageRegions[0],
|
||||
Distribution: p.config.Distribution,
|
||||
Description: p.config.Description,
|
||||
Tags: p.config.Tags,
|
||||
}
|
||||
|
||||
image, _, err = client.Images.Create(context.TODO(), createRequest)
|
||||
if err != nil {
|
||||
return image, fmt.Errorf("Failed to import from spaces://%s/%s: %s", p.config.SpaceName, p.config.ObjectName, err)
|
||||
}
|
||||
|
||||
return image, nil
|
||||
}
|
||||
|
||||
func waitUntilImageAvailable(client *godo.Client, imageId int, timeout time.Duration) (err error) {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
result := make(chan error, 1)
|
||||
go func() {
|
||||
attempts := 0
|
||||
for {
|
||||
attempts += 1
|
||||
|
||||
log.Printf("Waiting for image to become available... (attempt: %d)", attempts)
|
||||
image, _, err := client.Images.GetByID(context.TODO(), imageId)
|
||||
if err != nil {
|
||||
result <- err
|
||||
return
|
||||
}
|
||||
|
||||
if image.Status == "available" {
|
||||
result <- nil
|
||||
return
|
||||
}
|
||||
|
||||
if image.ErrorMessage != "" {
|
||||
result <- fmt.Errorf("%v", image.ErrorMessage)
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("Waiting for up to %d seconds for image to become available", timeout/time.Second)
|
||||
select {
|
||||
case err := <-result:
|
||||
return err
|
||||
case <-time.After(timeout):
|
||||
err := fmt.Errorf("Timeout while waiting to for action to become available")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func distributeImageToRegions(client *godo.Client, imageId int, regions []string, timeout time.Duration) (err error) {
|
||||
for _, region := range regions {
|
||||
transferRequest := &godo.ActionRequest{
|
||||
"type": "transfer",
|
||||
"region": region,
|
||||
}
|
||||
log.Printf("Transferring image to %s", region)
|
||||
action, _, err := client.ImageActions.Transfer(context.TODO(), imageId, transferRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error transferring image: %s", err)
|
||||
}
|
||||
|
||||
if err := digitalocean.WaitForImageState(godo.ActionCompleted, imageId, action.ID, client, timeout); err != nil {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error transferring image: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteImageFromSpaces(p *PostProcessor, s *session.Session) (err error) {
|
||||
s3conn := s3.New(s)
|
||||
_, err = s3conn.DeleteObject(&s3.DeleteObjectInput{
|
||||
Bucket: &p.config.SpaceName,
|
||||
Key: &p.config.ObjectName,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to delete spaces://%s/%s: %s", p.config.SpaceName, p.config.ObjectName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
package digitaloceanimport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/packer"
|
||||
)
|
||||
|
||||
func testConfig() map[string]interface{} {
|
||||
return map[string]interface{}{}
|
||||
}
|
||||
|
||||
func testPP(t *testing.T) *PostProcessor {
|
||||
var p PostProcessor
|
||||
if err := p.Configure(testConfig()); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
return &p
|
||||
}
|
||||
|
||||
func testUi() *packer.BasicUi {
|
||||
return &packer.BasicUi{
|
||||
Reader: new(bytes.Buffer),
|
||||
Writer: new(bytes.Buffer),
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessor_ImplementsPostProcessor(t *testing.T) {
|
||||
var _ packer.PostProcessor = new(PostProcessor)
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
---
|
||||
description: |
|
||||
The Packer DigitalOcean Import post-processor takes an image artifact
|
||||
from various builders and imports it to DigitalOcean.
|
||||
layout: docs
|
||||
page_title: 'DigitalOcean Import - Post-Processors'
|
||||
sidebar_current: 'docs-post-processors-digitalocean-import'
|
||||
---
|
||||
|
||||
# DigitalOcean Import Post-Processor
|
||||
|
||||
Type: `digitalocean-import`
|
||||
|
||||
The Packer DigitalOcean Import post-processor takes an image artifact from
|
||||
various builders and imports it to DigitalOcean.
|
||||
|
||||
## How Does it Work?
|
||||
|
||||
The import process operates uploading a temporary copy of the image to
|
||||
DigitalOcean Spaces and then importing it as a custom image via the
|
||||
DigialOcean API. The temporary copy in Spaces can be discarded after the
|
||||
import is complete.
|
||||
|
||||
For information about the requirements to use an image for a DigitalOcean
|
||||
Droplet, see DigitalOcean's [Custom Images documentation](https://www.digitalocean.com/docs/images/custom-images/overview/).
|
||||
|
||||
## Configuration
|
||||
|
||||
There are some configuration options available for the post-processor.
|
||||
|
||||
Required:
|
||||
|
||||
- `api_token` (string) - A personal access token used to communicate with
|
||||
the DigitalOcean v2 API.
|
||||
|
||||
- `spaces_key` (string) - The access key used to communicate with Spaces.
|
||||
|
||||
- `spaces_secret` (string) - The secret key used to communicate with Spaces.
|
||||
|
||||
- `spaces_region` (string) - The name of the region, such as `nyc3`, in which
|
||||
to upload the image to Spaces.
|
||||
|
||||
- `space_name` (string) - The name of the specific Space where the image file
|
||||
will be copied to for import. This Space must exist when the
|
||||
post-processor is run.
|
||||
|
||||
- `image_name` (string) - The name to be used for the resulting DigitalOcean
|
||||
custom image.
|
||||
|
||||
- `image_regions` (array of string) - A list of DigitalOcean regions, such
|
||||
as `nyc3`, where the resulting image will be available for use in creating
|
||||
Droplets.
|
||||
|
||||
Optional:
|
||||
|
||||
- `image_description` (string) - The description to set for the resulting
|
||||
imported image.
|
||||
|
||||
- `image_distribution` (string) - The name of the distribution to set for
|
||||
the resulting imported image.
|
||||
|
||||
- `image_tags` (array of strings) - A list of tags to apply to the resulting
|
||||
imported image.
|
||||
|
||||
- `skip_clean` (boolean) - Whether we should skip removing the image file
|
||||
uploaded to Spaces after the import process has completed. "true" means
|
||||
that we should leave it in the Space, "false" means to clean it out.
|
||||
Defaults to `false`.
|
||||
|
||||
- `space_object_name` (string) - The name of the key used in the Space where
|
||||
the image file will be copied to for import. If not specified, this will default to "packer-import-{{timestamp}}".
|
||||
|
||||
- `timeout` (number) - The length of time in minutes to wait for individual
|
||||
steps in the process to successfully complete. This includes both importing
|
||||
the image from Spaces as well as distributing the resulting image to
|
||||
additional regions. If not specified, this will default to 20.
|
||||
|
||||
## Basic Example
|
||||
|
||||
Here is a basic example:
|
||||
|
||||
``` json
|
||||
{
|
||||
"type": "digitalocean-import",
|
||||
"api_token": "{{user `token`}}",
|
||||
"spaces_key": "{{user `key`}}",
|
||||
"spaces_secret": "{{user `secret`}}",
|
||||
"spaces_region": "nyc3",
|
||||
"space_name": "import-bucket",
|
||||
"image_name": "ubuntu-18.10-minimal-amd64",
|
||||
"image_description": "Packer import {{timestamp}}",
|
||||
"image_regions": ["nyc3", "nyc2"],
|
||||
"image_tags": ["custom", "packer"]
|
||||
}
|
||||
```
|
|
@ -260,6 +260,9 @@
|
|||
<li<%= sidebar_current("docs-post-processors-checksum") %>>
|
||||
<a href="/docs/post-processors/checksum.html">Checksum</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-post-processors-digitalocean-import") %>>
|
||||
<a href="/docs/post-processors/digitalocean-import.html">DigitalOcean Import</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-post-processors-docker-import") %>>
|
||||
<a href="/docs/post-processors/docker-import.html">Docker Import</a>
|
||||
</li>
|
||||
|
|
Loading…
Reference in New Issue