Add new post processor googlecompute-import

This commit is contained in:
Sean Malloy 2018-03-29 22:50:58 -05:00
parent 2ef4210a98
commit 3622a669dc
16 changed files with 15620 additions and 20 deletions

View File

@ -56,6 +56,7 @@ import (
dockersavepostprocessor "github.com/hashicorp/packer/post-processor/docker-save"
dockertagpostprocessor "github.com/hashicorp/packer/post-processor/docker-tag"
googlecomputeexportpostprocessor "github.com/hashicorp/packer/post-processor/googlecompute-export"
googlecomputeimportpostprocessor "github.com/hashicorp/packer/post-processor/googlecompute-import"
manifestpostprocessor "github.com/hashicorp/packer/post-processor/manifest"
shelllocalpostprocessor "github.com/hashicorp/packer/post-processor/shell-local"
vagrantpostprocessor "github.com/hashicorp/packer/post-processor/vagrant"
@ -146,6 +147,7 @@ var PostProcessors = map[string]packer.PostProcessor{
"docker-save": new(dockersavepostprocessor.PostProcessor),
"docker-tag": new(dockertagpostprocessor.PostProcessor),
"googlecompute-export": new(googlecomputeexportpostprocessor.PostProcessor),
"googlecompute-import": new(googlecomputeimportpostprocessor.PostProcessor),
"manifest": new(manifestpostprocessor.PostProcessor),
"shell-local": new(shelllocalpostprocessor.PostProcessor),
"vagrant": new(vagrantpostprocessor.PostProcessor),

View File

@ -0,0 +1,3 @@
* 1.9.6 => GNU tar format
* 1.10.3 w/ patch => GNU tar format
* 1.10.3 w/o patch => Posix tar format

View File

@ -303,6 +303,9 @@ func createTarArchive(files []string, output io.WriteCloser) error {
return fmt.Errorf("Failed to create tar header for %s: %s", path, err)
}
// workaround for archive format on go >=1.10
setHeaderFormat(header)
if err := archive.WriteHeader(header); err != nil {
return fmt.Errorf("Failed to write tar header for %s: %s", path, err)
}

View File

@ -0,0 +1,9 @@
// +build !go1.10
package compress
import "archive/tar"
func setHeaderFormat(header *tar.Header) {
// no-op
}

View File

@ -0,0 +1,11 @@
// +build go1.10
package compress
import "archive/tar"
func setHeaderFormat(header *tar.Header) {
// We have to set the Format explicitly for the googlecompute-import
// post-processor. Google Cloud only allows importing GNU tar format.
header.Format = tar.FormatGNU
}

View File

@ -0,0 +1,37 @@
package googlecomputeimport
import (
"fmt"
)
const BuilderId = "packer.post-processor.googlecompute-import"
type Artifact struct {
paths []string
}
func (*Artifact) BuilderId() string {
return BuilderId
}
func (*Artifact) Id() string {
return ""
}
func (a *Artifact) Files() []string {
pathsCopy := make([]string, len(a.paths))
copy(pathsCopy, a.paths)
return pathsCopy
}
func (a *Artifact) String() string {
return fmt.Sprintf("Exported artifacts in: %s", a.paths)
}
func (*Artifact) State(name string) interface{} {
return nil
}
func (a *Artifact) Destroy() error {
return nil
}

View File

@ -0,0 +1,15 @@
package googlecomputeimport
import (
"testing"
"github.com/hashicorp/packer/packer"
)
func TestArtifact_ImplementsArtifact(t *testing.T) {
var raw interface{}
raw = &Artifact{}
if _, ok := raw.(packer.Artifact); !ok {
t.Fatalf("Artifact should be a Artifact")
}
}

View File

@ -0,0 +1,235 @@
package googlecomputeimport
import (
"fmt"
"net/http"
"os"
"strings"
"time"
"google.golang.org/api/compute/v1"
"google.golang.org/api/storage/v1"
"github.com/hashicorp/packer/builder/googlecompute"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/post-processor/compress"
"github.com/hashicorp/packer/template/interpolate"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
)
type Config struct {
common.PackerConfig `mapstructure:",squash"`
Bucket string `mapstructure:"bucket"`
GCSObjectName string `mapstructure:"gcs_object_name"`
ImageDescription string `mapstructure:"image_description"`
ImageFamily string `mapstructure:"image_family"`
ImageLabels map[string]string `mapstructure:"image_labels"`
ImageName string `mapstructure:"image_name"`
ProjectId string `mapstructure:"project_id"`
AccountFile string `mapstructure:"account_file"`
KeepOriginalImage bool `mapstructure:"keep_input_artifact"`
ctx interpolate.Context
}
type PostProcessor struct {
config Config
runner multistep.Runner
}
func (p *PostProcessor) Configure(raws ...interface{}) error {
err := config.Decode(&p.config, &config.DecodeOpts{
Interpolate: true,
InterpolateContext: &p.config.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
"gcs_object_name",
},
},
}, raws...)
if err != nil {
return err
}
// Set defaults
if p.config.GCSObjectName == "" {
p.config.GCSObjectName = "packer-import-{{timestamp}}.tar.gz"
}
errs := new(packer.MultiError)
// Check and render gcs_object_name
if err = interpolate.Validate(p.config.GCSObjectName, &p.config.ctx); err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error parsing gcs_object_name template: %s", err))
}
templates := map[string]*string{
"bucket": &p.config.Bucket,
"image_name": &p.config.ImageName,
"project_id": &p.config.ProjectId,
"account_file": &p.config.AccountFile,
}
for key, ptr := range templates {
if *ptr == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("%s must be set", key))
}
}
if len(errs.Errors) > 0 {
return errs
}
return nil
}
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
var err error
if artifact.BuilderId() != compress.BuilderId {
err = fmt.Errorf(
"incompatible artifact type: %s\nCan only import from Compress post-processor artifacts",
artifact.BuilderId())
return nil, false, err
}
p.config.GCSObjectName, err = interpolate.Render(p.config.GCSObjectName, &p.config.ctx)
if err != nil {
return nil, false, fmt.Errorf("Error rendering gcs_object_name template: %s", err)
}
rawImageGcsPath, err := UploadToBucket(p.config.AccountFile, ui, artifact, p.config.Bucket, p.config.GCSObjectName)
if err != nil {
return nil, p.config.KeepOriginalImage, err
}
gceImageArtifact, err := CreateGceImage(p.config.AccountFile, ui, p.config.ProjectId, rawImageGcsPath, p.config.ImageName, p.config.ImageDescription, p.config.ImageFamily, p.config.ImageLabels)
if err != nil {
return nil, p.config.KeepOriginalImage, err
}
return gceImageArtifact, p.config.KeepOriginalImage, nil
}
func UploadToBucket(accountFile string, ui packer.Ui, artifact packer.Artifact, bucket string, gcsObjectName string) (string, error) {
var client *http.Client
var account googlecompute.AccountFile
err := googlecompute.ProcessAccountFile(&account, accountFile)
if err != nil {
return "", err
}
var DriverScopes = []string{"https://www.googleapis.com/auth/devstorage.full_control"}
conf := jwt.Config{
Email: account.ClientEmail,
PrivateKey: []byte(account.PrivateKey),
Scopes: DriverScopes,
TokenURL: "https://accounts.google.com/o/oauth2/token",
}
client = conf.Client(oauth2.NoContext)
service, err := storage.New(client)
if err != nil {
return "", err
}
ui.Say("Looking for tar.gz file in list of artifacts...")
source := ""
for _, path := range artifact.Files() {
ui.Say(fmt.Sprintf("Found artifact %v...", path))
if strings.HasSuffix(path, ".tar.gz") {
source = path
break
}
}
if source == "" {
return "", fmt.Errorf("No tar.gz file found in list of articats")
}
artifactFile, err := os.Open(source)
if err != nil {
err := fmt.Errorf("error opening %v", source)
return "", err
}
ui.Say(fmt.Sprintf("Uploading file %v to GCS bucket %v/%v...", source, bucket, gcsObjectName))
storageObject, err := service.Objects.Insert(bucket, &storage.Object{Name: gcsObjectName}).Media(artifactFile).Do()
if err != nil {
ui.Say(fmt.Sprintf("Failed to upload: %v", storageObject))
return "", err
}
return "https://storage.googleapis.com/" + bucket + "/" + gcsObjectName, nil
}
func CreateGceImage(accountFile string, ui packer.Ui, project string, rawImageURL string, imageName string, imageDescription string, imageFamily string, imageLabels map[string]string) (packer.Artifact, error) {
var client *http.Client
var account googlecompute.AccountFile
err := googlecompute.ProcessAccountFile(&account, accountFile)
if err != nil {
return nil, err
}
var DriverScopes = []string{"https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/devstorage.full_control"}
conf := jwt.Config{
Email: account.ClientEmail,
PrivateKey: []byte(account.PrivateKey),
Scopes: DriverScopes,
TokenURL: "https://accounts.google.com/o/oauth2/token",
}
client = conf.Client(oauth2.NoContext)
service, err := compute.New(client)
if err != nil {
return nil, err
}
gceImage := &compute.Image{
Name: imageName,
Description: imageDescription,
Family: imageFamily,
Labels: imageLabels,
RawDisk: &compute.ImageRawDisk{Source: rawImageURL},
SourceType: "RAW",
}
ui.Say(fmt.Sprintf("Creating GCE image %v...", imageName))
op, err := service.Images.Insert(project, gceImage).Do()
if err != nil {
ui.Say("Error creating GCE image")
return nil, err
}
ui.Say("Waiting for GCE image creation operation to complete...")
for op.Status != "DONE" {
op, err = service.GlobalOperations.Get(project, op.Name).Do()
if err != nil {
return nil, err
}
time.Sleep(5 * time.Second)
}
// fail if image creation operation has an error
if op.Error != nil {
var imageError string
for _, error := range op.Error.Errors {
imageError += error.Message
}
err = fmt.Errorf("failed to create GCE image %s: %s", imageName, imageError)
return nil, err
}
return &Artifact{paths: []string{op.TargetLink}}, nil
}

17
vendor/google.golang.org/api/gensupport/go18.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.8
package gensupport
import (
"io"
"net/http"
)
// SetGetBody sets the GetBody field of req to f.
func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) {
req.GetBody = f
}

View File

@ -5,12 +5,14 @@
package gensupport
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/textproto"
"strings"
"google.golang.org/api/googleapi"
)
@ -174,26 +176,156 @@ func typeHeader(contentType string) textproto.MIMEHeader {
// PrepareUpload determines whether the data in the supplied reader should be
// uploaded in a single request, or in sequential chunks.
// chunkSize is the size of the chunk that media should be split into.
// If chunkSize is non-zero and the contents of media do not fit in a single
// chunk (or there is an error reading media), then media will be returned as a
// MediaBuffer. Otherwise, media will be returned as a Reader.
//
// If chunkSize is zero, media is returned as the first value, and the other
// two return values are nil, true.
//
// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the
// contents of media fit in a single chunk.
//
// After PrepareUpload has been called, media should no longer be used: the
// media content should be accessed via one of the return values.
func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, *MediaBuffer) {
func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) {
if chunkSize == 0 { // do not chunk
return media, nil
return media, nil, true
}
mb = NewMediaBuffer(media, chunkSize)
_, _, _, err := mb.Chunk()
// If err is io.EOF, we can upload this in a single request. Otherwise, err is
// either nil or a non-EOF error. If it is the latter, then the next call to
// mb.Chunk will return the same error. Returning a MediaBuffer ensures that this
// error will be handled at some point.
return nil, mb, err == io.EOF
}
// MediaInfo holds information for media uploads. It is intended for use by generated
// code only.
type MediaInfo struct {
// At most one of Media and MediaBuffer will be set.
media io.Reader
buffer *MediaBuffer
singleChunk bool
mType string
size int64 // mediaSize, if known. Used only for calls to progressUpdater_.
progressUpdater googleapi.ProgressUpdater
}
// NewInfoFromMedia should be invoked from the Media method of a call. It returns a
// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer
// if needed.
func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo {
mi := &MediaInfo{}
opts := googleapi.ProcessMediaOptions(options)
if !opts.ForceEmptyContentType {
r, mi.mType = DetermineContentType(r, opts.ContentType)
}
mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize)
return mi
}
// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a
// call. It returns a MediaInfo using the given reader, size and media type.
func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo {
rdr := ReaderAtToReader(r, size)
rdr, mType := DetermineContentType(rdr, mediaType)
return &MediaInfo{
size: size,
mType: mType,
buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize),
media: nil,
singleChunk: false,
}
}
func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) {
if mi != nil {
mi.progressUpdater = pu
}
}
// UploadType determines the type of upload: a single request, or a resumable
// series of requests.
func (mi *MediaInfo) UploadType() string {
if mi.singleChunk {
return "multipart"
}
return "resumable"
}
// UploadRequest sets up an HTTP request for media upload. It adds headers
// as necessary, and returns a replacement for the body and a function for http.Request.GetBody.
func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) {
cleanup = func() {}
if mi == nil {
return body, nil, cleanup
}
var media io.Reader
if mi.media != nil {
// This only happens when the caller has turned off chunking. In that
// case, we write all of media in a single non-retryable request.
media = mi.media
} else if mi.singleChunk {
// The data fits in a single chunk, which has now been read into the MediaBuffer.
// We obtain that chunk so we can write it in a single request. The request can
// be retried because the data is stored in the MediaBuffer.
media, _, _, _ = mi.buffer.Chunk()
}
if media != nil {
fb := readerFunc(body)
fm := readerFunc(media)
combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType)
if fb != nil && fm != nil {
getBody = func() (io.ReadCloser, error) {
rb := ioutil.NopCloser(fb())
rm := ioutil.NopCloser(fm())
r, _ := CombineBodyMedia(rb, "application/json", rm, mi.mType)
return r, nil
}
}
cleanup = func() { combined.Close() }
reqHeaders.Set("Content-Type", ctype)
body = combined
}
if mi.buffer != nil && mi.mType != "" && !mi.singleChunk {
reqHeaders.Set("X-Upload-Content-Type", mi.mType)
}
return body, getBody, cleanup
}
// readerFunc returns a function that always returns an io.Reader that has the same
// contents as r, provided that can be done without consuming r. Otherwise, it
// returns nil.
// See http.NewRequest (in net/http/request.go).
func readerFunc(r io.Reader) func() io.Reader {
switch r := r.(type) {
case *bytes.Buffer:
buf := r.Bytes()
return func() io.Reader { return bytes.NewReader(buf) }
case *bytes.Reader:
snapshot := *r
return func() io.Reader { r := snapshot; return &r }
case *strings.Reader:
snapshot := *r
return func() io.Reader { r := snapshot; return &r }
default:
return nil
}
}
// ResumableUpload returns an appropriately configured ResumableUpload value if the
// upload is resumable, or nil otherwise.
func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload {
if mi == nil || mi.singleChunk {
return nil
}
return &ResumableUpload{
URI: locURI,
Media: mi.buffer,
MediaType: mi.mType,
Callback: func(curr int64) {
if mi.progressUpdater != nil {
mi.progressUpdater(curr, mi.size)
}
},
}
mb := NewMediaBuffer(media, chunkSize)
rdr, _, _, err := mb.Chunk()
if err == io.EOF { // we can upload this in a single request
return rdr, nil
}
// err might be a non-EOF error. If it is, the next call to mb.Chunk will
// return the same error. Returning a MediaBuffer ensures that this error
// will be handled at some point.
return nil, mb
}

14
vendor/google.golang.org/api/gensupport/not_go18.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.8
package gensupport
import (
"io"
"net/http"
)
func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) {}

View File

@ -5,6 +5,8 @@
package gensupport
import (
"encoding/json"
"errors"
"net/http"
"golang.org/x/net/context"
@ -32,6 +34,11 @@ func RegisterHook(h Hook) {
// If ctx is non-nil, it calls all hooks, then sends the request with
// ctxhttp.Do, then calls any functions returned by the hooks in reverse order.
func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
// Disallow Accept-Encoding because it interferes with the automatic gzip handling
// done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219.
if _, ok := req.Header["Accept-Encoding"]; ok {
return nil, errors.New("google api: custom Accept-Encoding headers not allowed")
}
if ctx == nil {
return client.Do(req)
}
@ -53,3 +60,12 @@ func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*
}
return resp, err
}
// DecodeResponse decodes the body of res into target. If there is no body,
// target is unchanged.
func DecodeResponse(target interface{}, res *http.Response) error {
if res.StatusCode == http.StatusNoContent {
return nil
}
return json.NewDecoder(res.Body).Decode(target)
}

3784
vendor/google.golang.org/api/storage/v1/storage-api.json generated vendored Normal file

File diff suppressed because it is too large Load Diff

11171
vendor/google.golang.org/api/storage/v1/storage-gen.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

12
vendor/vendor.json vendored
View File

@ -1568,10 +1568,10 @@
"revisionTime": "2017-07-07T17:19:04Z"
},
{
"checksumSHA1": "gvrxuXnqGhfzY0O3MFbS8XhMH/k=",
"checksumSHA1": "EooPqEpEyY/7NCRwHDMWhhlkQNw=",
"path": "google.golang.org/api/gensupport",
"revision": "e665075b5ff79143ba49c58fab02df9dc122afd5",
"revisionTime": "2017-07-09T10:32:00Z"
"revision": "9c79deebf7496e355d7e95d82d4af1fe4e769b2f",
"revisionTime": "2018-04-16T00:04:00Z"
},
{
"checksumSHA1": "yQREK/OWrz9PLljbr127+xFk6J0=",
@ -1583,6 +1583,12 @@
"path": "google.golang.org/api/googleapi/internal/uritemplates",
"revision": "ff0a1ff302946b997eb1832381419d1f95143483"
},
{
"checksumSHA1": "Zpu9YB1omKr0VhFb8iycN1u+42Y=",
"path": "google.golang.org/api/storage/v1",
"revision": "9c79deebf7496e355d7e95d82d4af1fe4e769b2f",
"revisionTime": "2018-04-16T00:04:00Z"
},
{
"checksumSHA1": "SSYsrizGeHQRKn/S7j5CQu86egU=",
"path": "google.golang.org/appengine",

View File

@ -0,0 +1,145 @@
---
description: |
The Google Compute Image Import post-processor takes a compressed raw disk
image and imports it to a GCE image available to Google Compute Engine.
layout: docs
page_title: 'Google Compute Image Import - Post-Processors'
sidebar_current: 'docs-post-processors-googlecompute-import'
---
# Google Compute Image Import Post-Processor
Type: `googlecompute-import`
The Google Compute Image Import post-processor takes a compressed raw disk
image and imports it to a GCE image available to Google Compute Engine.
~> This post-processor is for advanced users. Please ensure you read the [GCE import documentation](https://cloud.google.com/compute/docs/images/import-existing-image) before using this post-processor.
## How Does it Work?
The import process operates by uploading a temporary copy of the compressed raw disk image
to a GCS bucket, and calling an import task in GCP on the raw disk file. Once completed, a
GCE image is created containing the converted virtual machine. The temporary raw disk image
copy in GCS can be discarded after the import is complete.
Google Cloud has very specific requirements for images being imported. Please see the
[GCE import documentation](https://cloud.google.com/compute/docs/images/import-existing-image)
for details.
## Configuration
### Required
- `account_file` (string) - The JSON file containing your account credentials.
- `bucket` (string) - The name of the GCS bucket where the raw disk image
will be uploaded.
- `image_name` (string) - The unique name of the resulting image.
- `project_id` (string) - The project ID where the GCS bucket exists and
where the GCE image is stored.
### Optional
- `gcs_object_name` (string) - The name of the GCS object in `bucket` where the RAW disk image will be copied for import. Defaults to "packer-import-{{timestamp}}.tar.gz".
- `image_description` (string) - The description of the resulting image.
- `image_family` (string) - The name of the image family to which the resulting image belongs.
- `image_labels` (object of key/value strings) - Key/value pair labels to apply to the created image.
- `keep_input_artifact` (boolean) - if true, do not delete the compressed RAW disk image. Defaults to false.
## Basic Example
Here is a basic example. This assumes that the builder has produced an compressed
raw disk image artifact for us to work with, and that the GCS bucket has been created.
``` json
{
"type": "googlecompute-import",
"account_file": "account.json",
"project_id": "my-project",
"bucket": "my-bucket",
"image_name": "my-gce-image"
}
```
## QEMU Builder Example
Here is a complete example for building a Fedora 28 server GCE image. For this example
packer was run from a CentOS 7 server with KVM installed. The CentOS 7 server was running
in GCE with the nested hypervisor feature enabled.
```
$ packer build -var serial=$(tty) build.json
```
``` json
{
"variables": {
"serial": ""
},
"builders": [
{
"type": "qemu",
"accelerator": "kvm",
"communicator": "none",
"boot_command": ["<tab> console=ttyS0,115200n8 inst.text inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/fedora-28-ks.cfg rd.live.check=0<enter><wait>"],
"disk_size": "15000",
"format": "raw",
"iso_checksum_type": "sha256",
"iso_checksum": "ea1efdc692356b3346326f82e2f468903e8da59324fdee8b10eac4fea83f23fe",
"iso_url": "https://download-ib01.fedoraproject.org/pub/fedora/linux/releases/28/Server/x86_64/iso/Fedora-Server-netinst-x86_64-28-1.1.iso",
"headless": "true",
"http_directory": "http",
"http_port_max": "10089",
"http_port_min": "10082",
"output_directory": "output",
"shutdown_timeout": "30m",
"vm_name": "disk.raw",
"qemu_binary": "/usr/libexec/qemu-kvm",
"qemuargs": [
[
"-m", "1024"
],
[
"-cpu", "host"
],
[
"-chardev", "tty,id=pts,path={{user `serial`}}"
],
[
"-device", "isa-serial,chardev=pts"
],
[
"-device", "virtio-net,netdev=user.0"
]
]
}
],
"post-processors": [
[
{
"type": "compress",
"output": "output/disk.raw.tar.gz"
},
{
"type": "googlecompute-import",
"project_id": "my-project",
"account_file": "account.json",
"bucket": "my-bucket",
"image_name": "fedora28-server-{{timestamp}}",
"image_description": "Fedora 28 Server",
"image_family": "fedora28-server"
}
]
]
}
```